aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 15:49:40 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 15:49:40 -0500
commit0191b625ca5a46206d2fb862bb08f36f2fcb3b31 (patch)
tree454d1842b1833d976da62abcbd5c47521ebe9bd7 /drivers/net
parent54a696bd07c14d3b1192d03ce7269bc59b45209a (diff)
parenteb56092fc168bf5af199d47af50c0d84a96db898 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1429 commits) net: Allow dependancies of FDDI & Tokenring to be modular. igb: Fix build warning when DCA is disabled. net: Fix warning fallout from recent NAPI interface changes. gro: Fix potential use after free sfc: If AN is enabled, always read speed/duplex from the AN advertising bits sfc: When disabling the NIC, close the device rather than unregistering it sfc: SFT9001: Add cable diagnostics sfc: Add support for multiple PHY self-tests sfc: Merge top-level functions for self-tests sfc: Clean up PHY mode management in loopback self-test sfc: Fix unreliable link detection in some loopback modes sfc: Generate unique names for per-NIC workqueues 802.3ad: use standard ethhdr instead of ad_header 802.3ad: generalize out mac address initializer 802.3ad: initialize ports LACPDU from const initializer 802.3ad: remove typedef around ad_system 802.3ad: turn ports is_individual into a bool 802.3ad: turn ports is_enabled into a bool 802.3ad: make ntt bool ixgbe: Fix set_ringparam in ixgbe to use the same memory pools. ... Fixed trivial IPv4/6 address printing conflicts in fs/cifs/connect.c due to the conversion to %pI (in this networking merge) and the addition of doing IPv6 addresses (from the earlier merge of CIFS).
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c3
-rw-r--r--drivers/net/3c501.h2
-rw-r--r--drivers/net/3c503.c21
-rw-r--r--drivers/net/3c505.c51
-rw-r--r--drivers/net/3c507.c15
-rw-r--r--drivers/net/3c509.c6
-rw-r--r--drivers/net/3c515.c13
-rw-r--r--drivers/net/3c523.c46
-rw-r--r--drivers/net/3c527.c4
-rw-r--r--drivers/net/3c59x.c13
-rw-r--r--drivers/net/7990.c1
-rw-r--r--drivers/net/8139cp.c47
-rw-r--r--drivers/net/8139too.c57
-rw-r--r--drivers/net/82596.c48
-rw-r--r--drivers/net/8390.c50
-rw-r--r--drivers/net/8390.h15
-rw-r--r--drivers/net/8390p.c50
-rw-r--r--drivers/net/Kconfig64
-rw-r--r--drivers/net/Makefile11
-rw-r--r--drivers/net/a2065.c5
-rw-r--r--drivers/net/ac3200.c5
-rw-r--r--drivers/net/acenic.c37
-rw-r--r--drivers/net/amd8111e.c12
-rw-r--r--drivers/net/apne.c29
-rw-r--r--drivers/net/appletalk/cops.c9
-rw-r--r--drivers/net/appletalk/ipddp.c6
-rw-r--r--drivers/net/appletalk/ltpc.c13
-rw-r--r--drivers/net/arcnet/arc-rawmode.c5
-rw-r--r--drivers/net/arcnet/arc-rimi.c16
-rw-r--r--drivers/net/arcnet/arcnet.c32
-rw-r--r--drivers/net/arcnet/capmode.c9
-rw-r--r--drivers/net/arcnet/com20020-isa.c4
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/arcnet/com20020.c10
-rw-r--r--drivers/net/arcnet/com90io.c4
-rw-r--r--drivers/net/arcnet/com90xx.c12
-rw-r--r--drivers/net/arcnet/rfc1051.c9
-rw-r--r--drivers/net/arcnet/rfc1201.c14
-rw-r--r--drivers/net/ariadne.c21
-rw-r--r--drivers/net/arm/Kconfig10
-rw-r--r--drivers/net/arm/Makefile3
-rw-r--r--drivers/net/arm/am79c961a.c7
-rw-r--r--drivers/net/arm/at91_ether.c11
-rw-r--r--drivers/net/arm/ep93xx_eth.c8
-rw-r--r--drivers/net/arm/ether1.c5
-rw-r--r--drivers/net/arm/ether3.c5
-rw-r--r--drivers/net/arm/etherh.c24
-rw-r--r--drivers/net/arm/ixp4xx_eth.c344
-rw-r--r--drivers/net/arm/ks8695net.c1676
-rw-r--r--drivers/net/arm/ks8695net.h107
-rw-r--r--drivers/net/at1700.c16
-rw-r--r--drivers/net/atarilance.c52
-rw-r--r--drivers/net/atl1e/atl1e_main.c47
-rw-r--r--drivers/net/atlx/atl1.c102
-rw-r--r--drivers/net/atlx/atl1.h4
-rw-r--r--drivers/net/atlx/atl2.c101
-rw-r--r--drivers/net/atlx/atl2.h1
-rw-r--r--drivers/net/atlx/atlx.c13
-rw-r--r--drivers/net/atp.c29
-rw-r--r--drivers/net/au1000_eth.c50
-rw-r--r--drivers/net/ax88796.c9
-rw-r--r--drivers/net/b44.c13
-rw-r--r--drivers/net/bfin_mac.c1
-rw-r--r--drivers/net/bmac.c7
-rw-r--r--drivers/net/bnx2.c94
-rw-r--r--drivers/net/bnx2.h42
-rw-r--r--drivers/net/bnx2x_link.c72
-rw-r--r--drivers/net/bnx2x_link.h10
-rw-r--r--drivers/net/bnx2x_main.c95
-rw-r--r--drivers/net/bonding/Makefile3
-rw-r--r--drivers/net/bonding/bond_3ad.c667
-rw-r--r--drivers/net/bonding/bond_3ad.h59
-rw-r--r--drivers/net/bonding/bond_alb.c37
-rw-r--r--drivers/net/bonding/bond_ipv6.c216
-rw-r--r--drivers/net/bonding/bond_main.c477
-rw-r--r--drivers/net/bonding/bond_sysfs.c160
-rw-r--r--drivers/net/bonding/bonding.h68
-rw-r--r--drivers/net/can/vcan.c24
-rw-r--r--drivers/net/cassini.c16
-rw-r--r--drivers/net/chelsio/cxgb2.c94
-rw-r--r--drivers/net/chelsio/sge.c10
-rw-r--r--drivers/net/cpmac.c15
-rw-r--r--drivers/net/cris/eth_v10.c4
-rw-r--r--drivers/net/cs89x0.c16
-rw-r--r--drivers/net/cxgb3/adapter.h5
-rw-r--r--drivers/net/cxgb3/common.h4
-rw-r--r--drivers/net/cxgb3/cxgb3_ctl_defs.h17
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c130
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c76
-rw-r--r--drivers/net/cxgb3/sge.c105
-rw-r--r--drivers/net/cxgb3/t3_hw.c26
-rw-r--r--drivers/net/cxgb3/version.h2
-rw-r--r--drivers/net/cxgb3/vsc8211.c2
-rw-r--r--drivers/net/de600.c4
-rw-r--r--drivers/net/de620.c26
-rw-r--r--drivers/net/declance.c7
-rw-r--r--drivers/net/defxx.c27
-rw-r--r--drivers/net/depca.c54
-rw-r--r--drivers/net/dl2k.c28
-rw-r--r--drivers/net/dm9000.c40
-rw-r--r--drivers/net/dummy.c15
-rw-r--r--drivers/net/e100.c68
-rw-r--r--drivers/net/e1000/e1000.h1
-rw-r--r--drivers/net/e1000/e1000_main.c107
-rw-r--r--drivers/net/e1000e/82571.c19
-rw-r--r--drivers/net/e1000e/defines.h8
-rw-r--r--drivers/net/e1000e/e1000.h2
-rw-r--r--drivers/net/e1000e/es2lan.c194
-rw-r--r--drivers/net/e1000e/ethtool.c82
-rw-r--r--drivers/net/e1000e/hw.h8
-rw-r--r--drivers/net/e1000e/ich8lan.c151
-rw-r--r--drivers/net/e1000e/lib.c80
-rw-r--r--drivers/net/e1000e/netdev.c131
-rw-r--r--drivers/net/e1000e/phy.c12
-rw-r--r--drivers/net/e2100.c24
-rw-r--r--drivers/net/eepro.c10
-rw-r--r--drivers/net/eepro100.c2401
-rw-r--r--drivers/net/eexpress.c3
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c7
-rw-r--r--drivers/net/ehea/ehea_qmr.c18
-rw-r--r--drivers/net/enc28j60.c49
-rw-r--r--drivers/net/enic/cq_desc.h5
-rw-r--r--drivers/net/enic/enic.h2
-rw-r--r--drivers/net/enic/enic_main.c86
-rw-r--r--drivers/net/enic/enic_res.c7
-rw-r--r--drivers/net/enic/enic_res.h4
-rw-r--r--drivers/net/enic/vnic_dev.c70
-rw-r--r--drivers/net/enic/vnic_devcmd.h19
-rw-r--r--drivers/net/enic/vnic_intr.h2
-rw-r--r--drivers/net/enic/vnic_resource.h2
-rw-r--r--drivers/net/enic/vnic_rq.h9
-rw-r--r--drivers/net/enic/vnic_rss.h13
-rw-r--r--drivers/net/enic/vnic_wq.h9
-rw-r--r--drivers/net/epic100.c56
-rw-r--r--drivers/net/eql.c12
-rw-r--r--drivers/net/es3210.c34
-rw-r--r--drivers/net/eth16i.c15
-rw-r--r--drivers/net/ewrk3.c18
-rw-r--r--drivers/net/fealnx.c6
-rw-r--r--drivers/net/fec.c6
-rw-r--r--drivers/net/fec_mpc52xx.c3
-rw-r--r--drivers/net/forcedeth.c90
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c9
-rw-r--r--drivers/net/gianfar.c861
-rw-r--r--drivers/net/gianfar.h78
-rw-r--r--drivers/net/gianfar_ethtool.c89
-rw-r--r--drivers/net/gianfar_mii.c212
-rw-r--r--drivers/net/gianfar_mii.h2
-rw-r--r--drivers/net/hamachi.c27
-rw-r--r--drivers/net/hamradio/6pack.c1
-rw-r--r--drivers/net/hamradio/baycom_epp.c1
-rw-r--r--drivers/net/hamradio/bpqether.c8
-rw-r--r--drivers/net/hamradio/dmascc.c13
-rw-r--r--drivers/net/hamradio/hdlcdrv.c1
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hamradio/scc.c13
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hp-plus.c27
-rw-r--r--drivers/net/hp.c25
-rw-r--r--drivers/net/hp100.c24
-rw-r--r--drivers/net/hydra.c28
-rw-r--r--drivers/net/ibm_newemac/core.c11
-rw-r--r--drivers/net/ibmlana.c6
-rw-r--r--drivers/net/ibmveth.c46
-rw-r--r--drivers/net/ifb.c13
-rw-r--r--drivers/net/igb/e1000_defines.h7
-rw-r--r--drivers/net/igb/e1000_mac.c25
-rw-r--r--drivers/net/igb/e1000_regs.h4
-rw-r--r--drivers/net/igb/igb.h44
-rw-r--r--drivers/net/igb/igb_ethtool.c131
-rw-r--r--drivers/net/igb/igb_main.c403
-rw-r--r--drivers/net/ioc3-eth.c13
-rw-r--r--drivers/net/ipg.c9
-rw-r--r--drivers/net/irda/ali-ircc.c17
-rw-r--r--drivers/net/irda/au1k_ir.c1
-rw-r--r--drivers/net/irda/donauboe.c8
-rw-r--r--drivers/net/irda/irda-usb.c15
-rw-r--r--drivers/net/irda/irtty-sir.c7
-rw-r--r--drivers/net/irda/kingsun-sir.c1
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/irda/ma600-sir.c2
-rw-r--r--drivers/net/irda/mcs7780.c1
-rw-r--r--drivers/net/irda/nsc-ircc.c17
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/sa1100_ir.c28
-rw-r--r--drivers/net/irda/sir_dev.c12
-rw-r--r--drivers/net/irda/smsc-ircc2.c6
-rw-r--r--drivers/net/irda/stir4200.c1
-rw-r--r--drivers/net/irda/via-ircc.c16
-rw-r--r--drivers/net/irda/vlsi_ir.c37
-rw-r--r--drivers/net/irda/w83977af_ir.c29
-rw-r--r--drivers/net/isa-skeleton.c16
-rw-r--r--drivers/net/iseries_veth.c14
-rw-r--r--drivers/net/ixgb/ixgb_main.c58
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h32
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c192
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c332
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h184
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c398
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h94
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c641
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c134
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c485
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c326
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h25
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h28
-rw-r--r--drivers/net/ixp2000/ixpdev.c6
-rw-r--r--drivers/net/jazzsonic.c4
-rw-r--r--drivers/net/jme.c71
-rw-r--r--drivers/net/jme.h34
-rw-r--r--drivers/net/korina.c5
-rw-r--r--drivers/net/lance.c32
-rw-r--r--drivers/net/lib82596.c14
-rw-r--r--drivers/net/lib8390.c20
-rw-r--r--drivers/net/lne390.c29
-rw-r--r--drivers/net/loopback.c29
-rw-r--r--drivers/net/lp486e.c29
-rw-r--r--drivers/net/mac8390.c22
-rw-r--r--drivers/net/mac89x0.c16
-rw-r--r--drivers/net/macb.c18
-rw-r--r--drivers/net/mace.c34
-rw-r--r--drivers/net/macmace.c6
-rw-r--r--drivers/net/macsonic.c9
-rw-r--r--drivers/net/macvlan.c49
-rw-r--r--drivers/net/meth.c4
-rw-r--r--drivers/net/mlx4/en_cq.c4
-rw-r--r--drivers/net/mlx4/en_netdev.c48
-rw-r--r--drivers/net/mlx4/en_params.c20
-rw-r--r--drivers/net/mlx4/en_rx.c9
-rw-r--r--drivers/net/mlx4/en_tx.c29
-rw-r--r--drivers/net/mlx4/mcg.c25
-rw-r--r--drivers/net/mlx4/mlx4_en.h12
-rw-r--r--drivers/net/mv643xx_eth.c525
-rw-r--r--drivers/net/mvme147.c9
-rw-r--r--drivers/net/myri10ge/myri10ge.c71
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h175
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp_gen_header.h2
-rw-r--r--drivers/net/myri_sbus.c27
-rw-r--r--drivers/net/natsemi.c12
-rw-r--r--drivers/net/ne-h8300.c30
-rw-r--r--drivers/net/ne.c29
-rw-r--r--drivers/net/ne2.c29
-rw-r--r--drivers/net/ne2k-pci.c28
-rw-r--r--drivers/net/ne3210.c31
-rw-r--r--drivers/net/netconsole.c9
-rw-r--r--drivers/net/netx-eth.c1
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c10
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c57
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c7
-rw-r--r--drivers/net/ni5010.c15
-rw-r--r--drivers/net/ni52.c43
-rw-r--r--drivers/net/ni65.c39
-rw-r--r--drivers/net/niu.c145
-rw-r--r--drivers/net/niu.h2
-rw-r--r--drivers/net/ns83820.c28
-rw-r--r--drivers/net/pasemi_mac.c14
-rw-r--r--drivers/net/pasemi_mac_ethtool.c4
-rw-r--r--drivers/net/pci-skeleton.c8
-rw-r--r--drivers/net/pcmcia/3c574_cs.c6
-rw-r--r--drivers/net/pcmcia/3c589_cs.c6
-rw-r--r--drivers/net/pcmcia/axnet_cs.c22
-rw-r--r--drivers/net/pcmcia/com20020_cs.c6
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c79
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c6
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c3
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c5
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c6
-rw-r--r--drivers/net/pcnet32.c10
-rw-r--r--drivers/net/phy/Kconfig23
-rw-r--r--drivers/net/phy/Makefile5
-rw-r--r--drivers/net/phy/broadcom.c216
-rw-r--r--drivers/net/phy/et1011c.c113
-rw-r--r--drivers/net/phy/mdio-gpio.c296
-rw-r--r--drivers/net/phy/mdio-ofgpio.c204
-rw-r--r--drivers/net/phy/mdio_bus.c40
-rw-r--r--drivers/net/phy/national.c155
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c54
-rw-r--r--drivers/net/phy/smsc.c28
-rw-r--r--drivers/net/phy/ste10Xp.c137
-rw-r--r--drivers/net/plip.c35
-rw-r--r--drivers/net/ppp_async.c6
-rw-r--r--drivers/net/ppp_generic.c206
-rw-r--r--drivers/net/ppp_synctty.c6
-rw-r--r--drivers/net/pppoe.c5
-rw-r--r--drivers/net/pppol2tp.c94
-rw-r--r--drivers/net/ps3_gelic_net.c6
-rw-r--r--drivers/net/ps3_gelic_wireless.c37
-rw-r--r--drivers/net/ps3_gelic_wireless.h4
-rw-r--r--drivers/net/qla3xxx.c42
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c79
-rw-r--r--drivers/net/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/r6040.c51
-rw-r--r--drivers/net/r8169.c57
-rw-r--r--drivers/net/rionet.c31
-rw-r--r--drivers/net/rrunner.c25
-rw-r--r--drivers/net/s2io.c146
-rw-r--r--drivers/net/sb1000.c1
-rw-r--r--drivers/net/sb1250-mac.c11
-rw-r--r--drivers/net/sc92031.c30
-rw-r--r--drivers/net/seeq8005.c13
-rw-r--r--drivers/net/sfc/Kconfig8
-rw-r--r--drivers/net/sfc/Makefile5
-rw-r--r--drivers/net/sfc/boards.c206
-rw-r--r--drivers/net/sfc/boards.h14
-rw-r--r--drivers/net/sfc/efx.c449
-rw-r--r--drivers/net/sfc/efx.h12
-rw-r--r--drivers/net/sfc/enum.h36
-rw-r--r--drivers/net/sfc/ethtool.c175
-rw-r--r--drivers/net/sfc/falcon.c480
-rw-r--r--drivers/net/sfc/falcon.h3
-rw-r--r--drivers/net/sfc/falcon_gmac.c229
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h161
-rw-r--r--drivers/net/sfc/falcon_xmac.c261
-rw-r--r--drivers/net/sfc/gmii.h137
-rw-r--r--drivers/net/sfc/mac.h16
-rw-r--r--drivers/net/sfc/mdio_10g.c483
-rw-r--r--drivers/net/sfc/mdio_10g.h63
-rw-r--r--drivers/net/sfc/mtd.c268
-rw-r--r--drivers/net/sfc/net_driver.h131
-rw-r--r--drivers/net/sfc/phy.h7
-rw-r--r--drivers/net/sfc/rx.c2
-rw-r--r--drivers/net/sfc/selftest.c145
-rw-r--r--drivers/net/sfc/selftest.h14
-rw-r--r--drivers/net/sfc/sfe4001.c225
-rw-r--r--drivers/net/sfc/spi.h34
-rw-r--r--drivers/net/sfc/tenxpress.c796
-rw-r--r--drivers/net/sfc/workarounds.h12
-rw-r--r--drivers/net/sfc/xfp_phy.c29
-rw-r--r--drivers/net/sgiseeq.c7
-rw-r--r--drivers/net/sh_eth.c3
-rw-r--r--drivers/net/sis190.c7
-rw-r--r--drivers/net/sis900.c99
-rw-r--r--drivers/net/skfp/skfddi.c27
-rw-r--r--drivers/net/skge.c103
-rw-r--r--drivers/net/sky2.c56
-rw-r--r--drivers/net/slip.c12
-rw-r--r--drivers/net/smc-mca.c27
-rw-r--r--drivers/net/smc-ultra.c27
-rw-r--r--drivers/net/smc-ultra32.c5
-rw-r--r--drivers/net/smc911x.c16
-rw-r--r--drivers/net/smc9194.c13
-rw-r--r--drivers/net/smc91x.c42
-rw-r--r--drivers/net/smc91x.h58
-rw-r--r--drivers/net/smsc911x.c2071
-rw-r--r--drivers/net/smsc911x.h390
-rw-r--r--drivers/net/smsc9420.c1744
-rw-r--r--drivers/net/smsc9420.h275
-rw-r--r--drivers/net/sonic.c1
-rw-r--r--drivers/net/sonic.h20
-rw-r--r--drivers/net/spider_net.c17
-rw-r--r--drivers/net/spider_net_ethtool.c8
-rw-r--r--drivers/net/starfire.c24
-rw-r--r--drivers/net/stnic.c25
-rw-r--r--drivers/net/sun3_82586.c40
-rw-r--r--drivers/net/sun3lance.c10
-rw-r--r--drivers/net/sunbmac.c22
-rw-r--r--drivers/net/sundance.c6
-rw-r--r--drivers/net/sungem.c65
-rw-r--r--drivers/net/sunhme.c31
-rw-r--r--drivers/net/sunlance.c7
-rw-r--r--drivers/net/sunqe.c15
-rw-r--r--drivers/net/sunvnet.c5
-rw-r--r--drivers/net/tc35815.c49
-rw-r--r--drivers/net/tehuti.c73
-rw-r--r--drivers/net/tg3.c1068
-rw-r--r--drivers/net/tg3.h188
-rw-r--r--drivers/net/tlan.c213
-rw-r--r--drivers/net/tokenring/3c359.c13
-rw-r--r--drivers/net/tokenring/Kconfig2
-rw-r--r--drivers/net/tokenring/abyss.c4
-rw-r--r--drivers/net/tokenring/ibmtr.c15
-rw-r--r--drivers/net/tokenring/lanstreamer.c36
-rw-r--r--drivers/net/tokenring/madgemc.c12
-rw-r--r--drivers/net/tokenring/olympic.c52
-rw-r--r--drivers/net/tokenring/proteon.c7
-rw-r--r--drivers/net/tokenring/skisa.c7
-rw-r--r--drivers/net/tokenring/smctr.c2
-rw-r--r--drivers/net/tokenring/tms380tr.c15
-rw-r--r--drivers/net/tokenring/tmspci.c5
-rw-r--r--drivers/net/tsi108_eth.c12
-rw-r--r--drivers/net/tulip/de2104x.c54
-rw-r--r--drivers/net/tulip/de4x5.c17
-rw-r--r--drivers/net/tulip/dmfe.c7
-rw-r--r--drivers/net/tulip/eeprom.c2
-rw-r--r--drivers/net/tulip/interrupt.c10
-rw-r--r--drivers/net/tulip/tulip_core.c15
-rw-r--r--drivers/net/tulip/uli526x.c8
-rw-r--r--drivers/net/tulip/winbond-840.c16
-rw-r--r--drivers/net/tulip/xircom_cb.c4
-rw-r--r--drivers/net/tun.c33
-rw-r--r--drivers/net/typhoon.c15
-rw-r--r--drivers/net/ucc_geth.c408
-rw-r--r--drivers/net/ucc_geth.h1
-rw-r--r--drivers/net/usb/asix.c5
-rw-r--r--drivers/net/usb/catc.c43
-rw-r--r--drivers/net/usb/dm9601.c5
-rw-r--r--drivers/net/usb/hso.c34
-rw-r--r--drivers/net/usb/kaweth.c17
-rw-r--r--drivers/net/usb/mcs7830.c9
-rw-r--r--drivers/net/usb/pegasus.c74
-rw-r--r--drivers/net/usb/rtl8150.c38
-rw-r--r--drivers/net/usb/smsc95xx.c110
-rw-r--r--drivers/net/usb/usbnet.c5
-rw-r--r--drivers/net/veth.c30
-rw-r--r--drivers/net/via-rhine.c47
-rw-r--r--drivers/net/via-velocity.c28
-rw-r--r--drivers/net/virtio_net.c217
-rw-r--r--drivers/net/wan/Kconfig9
-rw-r--r--drivers/net/wan/Makefile3
-rw-r--r--drivers/net/wan/c101.c6
-rw-r--r--drivers/net/wan/cosa.c1
-rw-r--r--drivers/net/wan/cycx_x25.c91
-rw-r--r--drivers/net/wan/dlci.c37
-rw-r--r--drivers/net/wan/dscc4.c4
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/hd64570.c (renamed from drivers/net/wan/hd6457x.c)255
-rw-r--r--drivers/net/wan/hd64572.c640
-rw-r--r--drivers/net/wan/hdlc_fr.c10
-rw-r--r--drivers/net/wan/hdlc_ppp.c649
-rw-r--r--drivers/net/wan/hostess_sv11.c1
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1325
-rw-r--r--drivers/net/wan/lapbether.c3
-rw-r--r--drivers/net/wan/lmc/lmc_main.c1
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c1
-rw-r--r--drivers/net/wan/n2.c9
-rw-r--r--drivers/net/wan/pc300_drv.c22
-rw-r--r--drivers/net/wan/pc300too.c121
-rw-r--r--drivers/net/wan/pci200syn.c79
-rw-r--r--drivers/net/wan/sbni.c101
-rw-r--r--drivers/net/wan/sdla.c48
-rw-r--r--drivers/net/wan/sealevel.c1
-rw-r--r--drivers/net/wan/syncppp.c1480
-rw-r--r--drivers/net/wan/wanxl.c9
-rw-r--r--drivers/net/wan/x25_asy.c52
-rw-r--r--drivers/net/wan/z85230.c12
-rw-r--r--drivers/net/wd.c29
-rw-r--r--drivers/net/wireless/Kconfig163
-rw-r--r--drivers/net/wireless/Makefile22
-rw-r--r--drivers/net/wireless/adm8211.c52
-rw-r--r--drivers/net/wireless/adm8211.h2
-rw-r--r--drivers/net/wireless/airo.c243
-rw-r--r--drivers/net/wireless/arlan-main.c27
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h102
-rw-r--r--drivers/net/wireless/ath5k/attach.c14
-rw-r--r--drivers/net/wireless/ath5k/base.c191
-rw-r--r--drivers/net/wireless/ath5k/desc.c4
-rw-r--r--drivers/net/wireless/ath5k/dma.c190
-rw-r--r--drivers/net/wireless/ath5k/eeprom.c1194
-rw-r--r--drivers/net/wireless/ath5k/eeprom.h253
-rw-r--r--drivers/net/wireless/ath5k/initvals.c8
-rw-r--r--drivers/net/wireless/ath5k/pcu.c233
-rw-r--r--drivers/net/wireless/ath5k/phy.c9
-rw-r--r--drivers/net/wireless/ath5k/qcu.c37
-rw-r--r--drivers/net/wireless/ath5k/reg.h16
-rw-r--r--drivers/net/wireless/ath5k/reset.c9
-rw-r--r--drivers/net/wireless/ath9k/Kconfig11
-rw-r--r--drivers/net/wireless/ath9k/Makefile9
-rw-r--r--drivers/net/wireless/ath9k/ani.c852
-rw-r--r--drivers/net/wireless/ath9k/ath9k.h432
-rw-r--r--drivers/net/wireless/ath9k/beacon.c319
-rw-r--r--drivers/net/wireless/ath9k/calib.c1021
-rw-r--r--drivers/net/wireless/ath9k/core.c1886
-rw-r--r--drivers/net/wireless/ath9k/core.h926
-rw-r--r--drivers/net/wireless/ath9k/debug.c262
-rw-r--r--drivers/net/wireless/ath9k/eeprom.c2824
-rw-r--r--drivers/net/wireless/ath9k/hw.c8490
-rw-r--r--drivers/net/wireless/ath9k/hw.h165
-rw-r--r--drivers/net/wireless/ath9k/initvals.h1938
-rw-r--r--drivers/net/wireless/ath9k/mac.c946
-rw-r--r--drivers/net/wireless/ath9k/main.c2353
-rw-r--r--drivers/net/wireless/ath9k/phy.c24
-rw-r--r--drivers/net/wireless/ath9k/phy.h5
-rw-r--r--drivers/net/wireless/ath9k/rc.c1832
-rw-r--r--drivers/net/wireless/ath9k/rc.h220
-rw-r--r--drivers/net/wireless/ath9k/recv.c1242
-rw-r--r--drivers/net/wireless/ath9k/reg.h109
-rw-r--r--drivers/net/wireless/ath9k/regd.c85
-rw-r--r--drivers/net/wireless/ath9k/regd.h2
-rw-r--r--drivers/net/wireless/ath9k/xmit.c1532
-rw-r--r--drivers/net/wireless/atmel.c79
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/debugfs.c1
-rw-r--r--drivers/net/wireless/b43/debugfs.h1
-rw-r--r--drivers/net/wireless/b43/dma.c4
-rw-r--r--drivers/net/wireless/b43/main.c280
-rw-r--r--drivers/net/wireless/b43/phy_a.c4
-rw-r--r--drivers/net/wireless/b43/phy_common.c18
-rw-r--r--drivers/net/wireless/b43/phy_g.c28
-rw-r--r--drivers/net/wireless/b43/pio.c3
-rw-r--r--drivers/net/wireless/b43/xmit.c64
-rw-r--r--drivers/net/wireless/b43/xmit.h5
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h5
-rw-r--r--drivers/net/wireless/b43legacy/debugfs.c2
-rw-r--r--drivers/net/wireless/b43legacy/dma.c48
-rw-r--r--drivers/net/wireless/b43legacy/main.c211
-rw-r--r--drivers/net/wireless/b43legacy/phy.c6
-rw-r--r--drivers/net/wireless/b43legacy/pio.c31
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c26
-rw-r--r--drivers/net/wireless/b43legacy/xmit.h2
-rw-r--r--drivers/net/wireless/hostap/Kconfig13
-rw-r--r--drivers/net/wireless/hostap/hostap.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_80211.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c72
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c23
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c253
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.h8
-rw-r--r--drivers/net/wireless/hostap/hostap_common.h13
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c71
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c16
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c157
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c45
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c35
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h8
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig191
-rw-r--r--drivers/net/wireless/ipw2x00/Makefile14
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c (renamed from drivers/net/wireless/ipw2100.c)71
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h (renamed from drivers/net/wireless/ipw2100.h)0
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c (renamed from drivers/net/wireless/ipw2200.c)482
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.h (renamed from drivers/net/wireless/ipw2200.h)2
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_geo.c195
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c293
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c1799
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_tx.c546
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_wx.c760
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig2
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-commands.h148
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-core.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-io.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c223
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c125
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h203
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c251
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h64
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c440
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd-check.c108
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c179
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1241
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h536
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c335
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h78
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c83
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h183
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c50
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h173
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h111
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c38
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c230
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c131
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c259
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c438
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c751
-rw-r--r--drivers/net/wireless/libertas/assoc.c69
-rw-r--r--drivers/net/wireless/libertas/cmd.c32
-rw-r--r--drivers/net/wireless/libertas/cmd.h3
-rw-r--r--drivers/net/wireless/libertas/debugfs.c11
-rw-r--r--drivers/net/wireless/libertas/decl.h4
-rw-r--r--drivers/net/wireless/libertas/defs.h14
-rw-r--r--drivers/net/wireless/libertas/dev.h7
-rw-r--r--drivers/net/wireless/libertas/ethtool.c14
-rw-r--r--drivers/net/wireless/libertas/host.h9
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h26
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/libertas/if_usb.c7
-rw-r--r--drivers/net/wireless/libertas/main.c135
-rw-r--r--drivers/net/wireless/libertas/persistcfg.c18
-rw-r--r--drivers/net/wireless/libertas/radiotap.h3
-rw-r--r--drivers/net/wireless/libertas/scan.c105
-rw-r--r--drivers/net/wireless/libertas/scan.h4
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/types.h5
-rw-r--r--drivers/net/wireless/libertas/wext.c80
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c5
-rw-r--r--drivers/net/wireless/libertas_tf/main.c8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c294
-rw-r--r--drivers/net/wireless/netwave_cs.c6
-rw-r--r--drivers/net/wireless/orinoco/Makefile12
-rw-r--r--drivers/net/wireless/orinoco/airport.c (renamed from drivers/net/wireless/airport.c)2
-rw-r--r--drivers/net/wireless/orinoco/hermes.c (renamed from drivers/net/wireless/hermes.c)0
-rw-r--r--drivers/net/wireless/orinoco/hermes.h (renamed from drivers/net/wireless/hermes.h)0
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c (renamed from drivers/net/wireless/hermes_dld.c)0
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.h (renamed from drivers/net/wireless/hermes_dld.h)0
-rw-r--r--drivers/net/wireless/orinoco/hermes_rid.h (renamed from drivers/net/wireless/hermes_rid.h)0
-rw-r--r--drivers/net/wireless/orinoco/orinoco.c (renamed from drivers/net/wireless/orinoco.c)231
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h (renamed from drivers/net/wireless/orinoco.h)9
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c (renamed from drivers/net/wireless/orinoco_cs.c)10
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c (renamed from drivers/net/wireless/orinoco_nortel.c)0
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c (renamed from drivers/net/wireless/orinoco_pci.c)0
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.h (renamed from drivers/net/wireless/orinoco_pci.h)0
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c (renamed from drivers/net/wireless/orinoco_plx.c)0
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c (renamed from drivers/net/wireless/orinoco_tmd.c)0
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c (renamed from drivers/net/wireless/spectrum_cs.c)31
-rw-r--r--drivers/net/wireless/p54/p54.h74
-rw-r--r--drivers/net/wireless/p54/p54common.c1620
-rw-r--r--drivers/net/wireless/p54/p54common.h349
-rw-r--r--drivers/net/wireless/p54/p54pci.c70
-rw-r--r--drivers/net/wireless/p54/p54pci.h2
-rw-r--r--drivers/net/wireless/p54/p54usb.c261
-rw-r--r--drivers/net/wireless/p54/p54usb.h1
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c24
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c8
-rw-r--r--drivers/net/wireless/ray_cs.c12
-rw-r--r--drivers/net/wireless/rndis_wlan.c95
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig1
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c373
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c407
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c443
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h113
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c202
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c79
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.h13
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c86
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c88
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h33
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c56
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c41
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h37
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c94
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h47
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c145
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h141
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c469
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c689
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h2
-rw-r--r--drivers/net/wireless/rtl818x/Makefile7
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h (renamed from drivers/net/wireless/rtl8180.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c (renamed from drivers/net/wireless/rtl8180_dev.c)48
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_grf5101.c (renamed from drivers/net/wireless/rtl8180_grf5101.c)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_grf5101.h (renamed from drivers/net/wireless/rtl8180_grf5101.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_max2820.c (renamed from drivers/net/wireless/rtl8180_max2820.c)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_max2820.h (renamed from drivers/net/wireless/rtl8180_max2820.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_rtl8225.c (renamed from drivers/net/wireless/rtl8180_rtl8225.c)14
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_rtl8225.h (renamed from drivers/net/wireless/rtl8180_rtl8225.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_sa2400.c (renamed from drivers/net/wireless/rtl8180_sa2400.c)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_sa2400.h (renamed from drivers/net/wireless/rtl8180_sa2400.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h (renamed from drivers/net/wireless/rtl8187.h)8
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c (renamed from drivers/net/wireless/rtl8187_dev.c)459
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_rtl8225.c (renamed from drivers/net/wireless/rtl8187_rtl8225.c)397
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_rtl8225.h (renamed from drivers/net/wireless/rtl8187_rtl8225.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h (renamed from drivers/net/wireless/rtl818x.h)1
-rw-r--r--drivers/net/wireless/strip.c22
-rw-r--r--drivers/net/wireless/wavelan.c122
-rw-r--r--drivers/net/wireless/wavelan_cs.c39
-rw-r--r--drivers/net/wireless/wl3501.h4
-rw-r--r--drivers/net/wireless/wl3501_cs.c11
-rw-r--r--drivers/net/wireless/zd1201.c131
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c44
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netfront.c9
-rw-r--r--drivers/net/xtsonic.c6
-rw-r--r--drivers/net/yellowfin.c42
-rw-r--r--drivers/net/znet.c28
-rw-r--r--drivers/net/zorro8390.c29
690 files changed, 53535 insertions, 35468 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 7d15e7c6bcad..3d1318a3e688 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -297,8 +297,8 @@ static int __init el1_probe1(struct net_device *dev, int ioaddr)
297 if (el_debug) 297 if (el_debug)
298 printk(KERN_DEBUG "%s", version); 298 printk(KERN_DEBUG "%s", version);
299 299
300 memset(dev->priv, 0, sizeof(struct net_local));
301 lp = netdev_priv(dev); 300 lp = netdev_priv(dev);
301 memset(lp, 0, sizeof(struct net_local));
302 spin_lock_init(&lp->lock); 302 spin_lock_init(&lp->lock);
303 303
304 /* 304 /*
@@ -725,7 +725,6 @@ static void el_receive(struct net_device *dev)
725 insb(DATAPORT, skb_put(skb, pkt_len), pkt_len); 725 insb(DATAPORT, skb_put(skb, pkt_len), pkt_len);
726 skb->protocol = eth_type_trans(skb, dev); 726 skb->protocol = eth_type_trans(skb, dev);
727 netif_rx(skb); 727 netif_rx(skb);
728 dev->last_rx = jiffies;
729 dev->stats.rx_packets++; 728 dev->stats.rx_packets++;
730 dev->stats.rx_bytes += pkt_len; 729 dev->stats.rx_bytes += pkt_len;
731 } 730 }
diff --git a/drivers/net/3c501.h b/drivers/net/3c501.h
index cfec64efff78..f40b0493337a 100644
--- a/drivers/net/3c501.h
+++ b/drivers/net/3c501.h
@@ -23,7 +23,7 @@ static const struct ethtool_ops netdev_ethtool_ops;
23static int el_debug = EL_DEBUG; 23static int el_debug = EL_DEBUG;
24 24
25/* 25/*
26 * Board-specific info in dev->priv. 26 * Board-specific info in netdev_priv(dev).
27 */ 27 */
28 28
29struct net_local 29struct net_local
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 900b0ffdcc68..c092c3929224 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -168,6 +168,21 @@ out:
168} 168}
169#endif 169#endif
170 170
171static const struct net_device_ops el2_netdev_ops = {
172 .ndo_open = el2_open,
173 .ndo_stop = el2_close,
174
175 .ndo_start_xmit = eip_start_xmit,
176 .ndo_tx_timeout = eip_tx_timeout,
177 .ndo_get_stats = eip_get_stats,
178 .ndo_set_multicast_list = eip_set_multicast_list,
179 .ndo_validate_addr = eth_validate_addr,
180 .ndo_change_mtu = eth_change_mtu,
181#ifdef CONFIG_NET_POLL_CONTROLLER
182 .ndo_poll_controller = eip_poll,
183#endif
184};
185
171/* Probe for the Etherlink II card at I/O port base IOADDR, 186/* Probe for the Etherlink II card at I/O port base IOADDR,
172 returning non-zero on success. If found, set the station 187 returning non-zero on success. If found, set the station
173 address and memory parameters in DEVICE. */ 188 address and memory parameters in DEVICE. */
@@ -177,7 +192,6 @@ el2_probe1(struct net_device *dev, int ioaddr)
177 int i, iobase_reg, membase_reg, saved_406, wordlength, retval; 192 int i, iobase_reg, membase_reg, saved_406, wordlength, retval;
178 static unsigned version_printed; 193 static unsigned version_printed;
179 unsigned long vendor_id; 194 unsigned long vendor_id;
180 DECLARE_MAC_BUF(mac);
181 195
182 if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME)) 196 if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME))
183 return -EBUSY; 197 return -EBUSY;
@@ -228,7 +242,7 @@ el2_probe1(struct net_device *dev, int ioaddr)
228 /* Retrieve and print the ethernet address. */ 242 /* Retrieve and print the ethernet address. */
229 for (i = 0; i < 6; i++) 243 for (i = 0; i < 6; i++)
230 dev->dev_addr[i] = inb(ioaddr + i); 244 dev->dev_addr[i] = inb(ioaddr + i);
231 printk("%s", print_mac(mac, dev->dev_addr)); 245 printk("%pM", dev->dev_addr);
232 246
233 /* Map the 8390 back into the window. */ 247 /* Map the 8390 back into the window. */
234 outb(ECNTRL_THIN, ioaddr + 0x406); 248 outb(ECNTRL_THIN, ioaddr + 0x406);
@@ -336,8 +350,7 @@ el2_probe1(struct net_device *dev, int ioaddr)
336 350
337 ei_status.saved_irq = dev->irq; 351 ei_status.saved_irq = dev->irq;
338 352
339 dev->open = &el2_open; 353 dev->netdev_ops = &el2_netdev_ops;
340 dev->stop = &el2_close;
341 dev->ethtool_ops = &netdev_ethtool_ops; 354 dev->ethtool_ops = &netdev_ethtool_ops;
342#ifdef CONFIG_NET_POLL_CONTROLLER 355#ifdef CONFIG_NET_POLL_CONTROLLER
343 dev->poll_controller = eip_poll; 356 dev->poll_controller = eip_poll;
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index a424869707a5..6124605bef05 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -203,10 +203,10 @@ static inline int inb_command(unsigned int base_addr)
203static inline void outb_control(unsigned char val, struct net_device *dev) 203static inline void outb_control(unsigned char val, struct net_device *dev)
204{ 204{
205 outb(val, dev->base_addr + PORT_CONTROL); 205 outb(val, dev->base_addr + PORT_CONTROL);
206 ((elp_device *)(dev->priv))->hcr_val = val; 206 ((elp_device *)(netdev_priv(dev)))->hcr_val = val;
207} 207}
208 208
209#define HCR_VAL(x) (((elp_device *)((x)->priv))->hcr_val) 209#define HCR_VAL(x) (((elp_device *)(netdev_priv(x)))->hcr_val)
210 210
211static inline void outb_command(unsigned char val, unsigned int base_addr) 211static inline void outb_command(unsigned char val, unsigned int base_addr)
212{ 212{
@@ -247,7 +247,7 @@ static inline int get_status(unsigned int base_addr)
247 247
248static inline void set_hsf(struct net_device *dev, int hsf) 248static inline void set_hsf(struct net_device *dev, int hsf)
249{ 249{
250 elp_device *adapter = dev->priv; 250 elp_device *adapter = netdev_priv(dev);
251 unsigned long flags; 251 unsigned long flags;
252 252
253 spin_lock_irqsave(&adapter->lock, flags); 253 spin_lock_irqsave(&adapter->lock, flags);
@@ -260,7 +260,7 @@ static bool start_receive(struct net_device *, pcb_struct *);
260static inline void adapter_reset(struct net_device *dev) 260static inline void adapter_reset(struct net_device *dev)
261{ 261{
262 unsigned long timeout; 262 unsigned long timeout;
263 elp_device *adapter = dev->priv; 263 elp_device *adapter = netdev_priv(dev);
264 unsigned char orig_hcr = adapter->hcr_val; 264 unsigned char orig_hcr = adapter->hcr_val;
265 265
266 outb_control(0, dev); 266 outb_control(0, dev);
@@ -293,7 +293,7 @@ static inline void adapter_reset(struct net_device *dev)
293 */ 293 */
294static inline void check_3c505_dma(struct net_device *dev) 294static inline void check_3c505_dma(struct net_device *dev)
295{ 295{
296 elp_device *adapter = dev->priv; 296 elp_device *adapter = netdev_priv(dev);
297 if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) { 297 if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) {
298 unsigned long flags, f; 298 unsigned long flags, f;
299 printk(KERN_ERR "%s: DMA %s timed out, %d bytes left\n", dev->name, adapter->current_dma.direction ? "download" : "upload", get_dma_residue(dev->dma)); 299 printk(KERN_ERR "%s: DMA %s timed out, %d bytes left\n", dev->name, adapter->current_dma.direction ? "download" : "upload", get_dma_residue(dev->dma));
@@ -340,7 +340,7 @@ static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte)
340/* Check to see if the receiver needs restarting, and kick it if so */ 340/* Check to see if the receiver needs restarting, and kick it if so */
341static inline void prime_rx(struct net_device *dev) 341static inline void prime_rx(struct net_device *dev)
342{ 342{
343 elp_device *adapter = dev->priv; 343 elp_device *adapter = netdev_priv(dev);
344 while (adapter->rx_active < ELP_RX_PCBS && netif_running(dev)) { 344 while (adapter->rx_active < ELP_RX_PCBS && netif_running(dev)) {
345 if (!start_receive(dev, &adapter->itx_pcb)) 345 if (!start_receive(dev, &adapter->itx_pcb))
346 break; 346 break;
@@ -375,7 +375,7 @@ static bool send_pcb(struct net_device *dev, pcb_struct * pcb)
375{ 375{
376 int i; 376 int i;
377 unsigned long timeout; 377 unsigned long timeout;
378 elp_device *adapter = dev->priv; 378 elp_device *adapter = netdev_priv(dev);
379 unsigned long flags; 379 unsigned long flags;
380 380
381 check_3c505_dma(dev); 381 check_3c505_dma(dev);
@@ -463,7 +463,7 @@ static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
463 unsigned long timeout; 463 unsigned long timeout;
464 unsigned long flags; 464 unsigned long flags;
465 465
466 elp_device *adapter = dev->priv; 466 elp_device *adapter = netdev_priv(dev);
467 467
468 set_hsf(dev, 0); 468 set_hsf(dev, 0);
469 469
@@ -543,7 +543,7 @@ static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
543static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb) 543static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb)
544{ 544{
545 bool status; 545 bool status;
546 elp_device *adapter = dev->priv; 546 elp_device *adapter = netdev_priv(dev);
547 547
548 if (elp_debug >= 3) 548 if (elp_debug >= 3)
549 printk(KERN_DEBUG "%s: restarting receiver\n", dev->name); 549 printk(KERN_DEBUG "%s: restarting receiver\n", dev->name);
@@ -571,7 +571,7 @@ static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb)
571static void receive_packet(struct net_device *dev, int len) 571static void receive_packet(struct net_device *dev, int len)
572{ 572{
573 int rlen; 573 int rlen;
574 elp_device *adapter = dev->priv; 574 elp_device *adapter = netdev_priv(dev);
575 void *target; 575 void *target;
576 struct sk_buff *skb; 576 struct sk_buff *skb;
577 unsigned long flags; 577 unsigned long flags;
@@ -638,13 +638,10 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
638 int len; 638 int len;
639 int dlen; 639 int dlen;
640 int icount = 0; 640 int icount = 0;
641 struct net_device *dev; 641 struct net_device *dev = dev_id;
642 elp_device *adapter; 642 elp_device *adapter = netdev_priv(dev);
643 unsigned long timeout; 643 unsigned long timeout;
644 644
645 dev = dev_id;
646 adapter = (elp_device *) dev->priv;
647
648 spin_lock(&adapter->lock); 645 spin_lock(&adapter->lock);
649 646
650 do { 647 do {
@@ -672,7 +669,6 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
672 skb->protocol = eth_type_trans(skb,dev); 669 skb->protocol = eth_type_trans(skb,dev);
673 dev->stats.rx_bytes += skb->len; 670 dev->stats.rx_bytes += skb->len;
674 netif_rx(skb); 671 netif_rx(skb);
675 dev->last_rx = jiffies;
676 } 672 }
677 } 673 }
678 adapter->dmaing = 0; 674 adapter->dmaing = 0;
@@ -838,11 +834,9 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
838 834
839static int elp_open(struct net_device *dev) 835static int elp_open(struct net_device *dev)
840{ 836{
841 elp_device *adapter; 837 elp_device *adapter = netdev_priv(dev);
842 int retval; 838 int retval;
843 839
844 adapter = dev->priv;
845
846 if (elp_debug >= 3) 840 if (elp_debug >= 3)
847 printk(KERN_DEBUG "%s: request to open device\n", dev->name); 841 printk(KERN_DEBUG "%s: request to open device\n", dev->name);
848 842
@@ -971,7 +965,7 @@ static int elp_open(struct net_device *dev)
971 965
972static bool send_packet(struct net_device *dev, struct sk_buff *skb) 966static bool send_packet(struct net_device *dev, struct sk_buff *skb)
973{ 967{
974 elp_device *adapter = dev->priv; 968 elp_device *adapter = netdev_priv(dev);
975 unsigned long target; 969 unsigned long target;
976 unsigned long flags; 970 unsigned long flags;
977 971
@@ -1062,7 +1056,7 @@ static void elp_timeout(struct net_device *dev)
1062static int elp_start_xmit(struct sk_buff *skb, struct net_device *dev) 1056static int elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
1063{ 1057{
1064 unsigned long flags; 1058 unsigned long flags;
1065 elp_device *adapter = dev->priv; 1059 elp_device *adapter = netdev_priv(dev);
1066 1060
1067 spin_lock_irqsave(&adapter->lock, flags); 1061 spin_lock_irqsave(&adapter->lock, flags);
1068 check_3c505_dma(dev); 1062 check_3c505_dma(dev);
@@ -1104,7 +1098,7 @@ static int elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
1104 1098
1105static struct net_device_stats *elp_get_stats(struct net_device *dev) 1099static struct net_device_stats *elp_get_stats(struct net_device *dev)
1106{ 1100{
1107 elp_device *adapter = (elp_device *) dev->priv; 1101 elp_device *adapter = netdev_priv(dev);
1108 1102
1109 if (elp_debug >= 3) 1103 if (elp_debug >= 3)
1110 printk(KERN_DEBUG "%s: request for stats\n", dev->name); 1104 printk(KERN_DEBUG "%s: request for stats\n", dev->name);
@@ -1166,9 +1160,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1166 1160
1167static int elp_close(struct net_device *dev) 1161static int elp_close(struct net_device *dev)
1168{ 1162{
1169 elp_device *adapter; 1163 elp_device *adapter = netdev_priv(dev);
1170
1171 adapter = dev->priv;
1172 1164
1173 if (elp_debug >= 3) 1165 if (elp_debug >= 3)
1174 printk(KERN_DEBUG "%s: request to close device\n", dev->name); 1166 printk(KERN_DEBUG "%s: request to close device\n", dev->name);
@@ -1209,7 +1201,7 @@ static int elp_close(struct net_device *dev)
1209 1201
1210static void elp_set_mc_list(struct net_device *dev) 1202static void elp_set_mc_list(struct net_device *dev)
1211{ 1203{
1212 elp_device *adapter = (elp_device *) dev->priv; 1204 elp_device *adapter = netdev_priv(dev);
1213 struct dev_mc_list *dmi = dev->mc_list; 1205 struct dev_mc_list *dmi = dev->mc_list;
1214 int i; 1206 int i;
1215 unsigned long flags; 1207 unsigned long flags;
@@ -1380,12 +1372,11 @@ static int __init elp_autodetect(struct net_device *dev)
1380 1372
1381static int __init elplus_setup(struct net_device *dev) 1373static int __init elplus_setup(struct net_device *dev)
1382{ 1374{
1383 elp_device *adapter = dev->priv; 1375 elp_device *adapter = netdev_priv(dev);
1384 int i, tries, tries1, okay; 1376 int i, tries, tries1, okay;
1385 unsigned long timeout; 1377 unsigned long timeout;
1386 unsigned long cookie = 0; 1378 unsigned long cookie = 0;
1387 int err = -ENODEV; 1379 int err = -ENODEV;
1388 DECLARE_MAC_BUF(mac);
1389 1380
1390 /* 1381 /*
1391 * setup adapter structure 1382 * setup adapter structure
@@ -1522,9 +1513,9 @@ static int __init elplus_setup(struct net_device *dev)
1522 * print remainder of startup message 1513 * print remainder of startup message
1523 */ 1514 */
1524 printk(KERN_INFO "%s: 3c505 at %#lx, irq %d, dma %d, " 1515 printk(KERN_INFO "%s: 3c505 at %#lx, irq %d, dma %d, "
1525 "addr %s, ", 1516 "addr %pM, ",
1526 dev->name, dev->base_addr, dev->irq, dev->dma, 1517 dev->name, dev->base_addr, dev->irq, dev->dma,
1527 print_mac(mac, dev->dev_addr)); 1518 dev->dev_addr);
1528 1519
1529 /* 1520 /*
1530 * read more information from the adapter 1521 * read more information from the adapter
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index 030c147211ba..423e65d0ba73 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -357,7 +357,6 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
357 static unsigned char init_ID_done, version_printed; 357 static unsigned char init_ID_done, version_printed;
358 int i, irq, irqval, retval; 358 int i, irq, irqval, retval;
359 struct net_local *lp; 359 struct net_local *lp;
360 DECLARE_MAC_BUF(mac);
361 360
362 if (init_ID_done == 0) { 361 if (init_ID_done == 0) {
363 ushort lrs_state = 0xff; 362 ushort lrs_state = 0xff;
@@ -405,7 +404,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
405 outb(0x01, ioaddr + MISC_CTRL); 404 outb(0x01, ioaddr + MISC_CTRL);
406 for (i = 0; i < 6; i++) 405 for (i = 0; i < 6; i++)
407 dev->dev_addr[i] = inb(ioaddr + i); 406 dev->dev_addr[i] = inb(ioaddr + i);
408 printk(" %s", print_mac(mac, dev->dev_addr)); 407 printk(" %pM", dev->dev_addr);
409 408
410 if (mem_start) 409 if (mem_start)
411 net_debug = mem_start & 7; 410 net_debug = mem_start & 7;
@@ -866,7 +865,6 @@ static void el16_rx(struct net_device *dev)
866 865
867 skb->protocol=eth_type_trans(skb,dev); 866 skb->protocol=eth_type_trans(skb,dev);
868 netif_rx(skb); 867 netif_rx(skb);
869 dev->last_rx = jiffies;
870 dev->stats.rx_packets++; 868 dev->stats.rx_packets++;
871 dev->stats.rx_bytes += pkt_len; 869 dev->stats.rx_bytes += pkt_len;
872 } 870 }
@@ -938,14 +936,3 @@ cleanup_module(void)
938} 936}
939#endif /* MODULE */ 937#endif /* MODULE */
940MODULE_LICENSE("GPL"); 938MODULE_LICENSE("GPL");
941
942
943/*
944 * Local variables:
945 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -I/usr/src/linux/drivers/net -Wall -Wstrict-prototypes -O6 -m486 -c 3c507.c"
946 * version-control: t
947 * kept-new-versions: 5
948 * tab-width: 4
949 * c-indent-level: 4
950 * End:
951 */
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index c7a4f3bcc2bc..535c234286ea 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -541,7 +541,6 @@ static int __devinit el3_common_init(struct net_device *dev)
541{ 541{
542 struct el3_private *lp = netdev_priv(dev); 542 struct el3_private *lp = netdev_priv(dev);
543 int err; 543 int err;
544 DECLARE_MAC_BUF(mac);
545 const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"}; 544 const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"};
546 545
547 spin_lock_init(&lp->lock); 546 spin_lock_init(&lp->lock);
@@ -575,9 +574,9 @@ static int __devinit el3_common_init(struct net_device *dev)
575 } 574 }
576 575
577 printk(KERN_INFO "%s: 3c5x9 found at %#3.3lx, %s port, " 576 printk(KERN_INFO "%s: 3c5x9 found at %#3.3lx, %s port, "
578 "address %s, IRQ %d.\n", 577 "address %pM, IRQ %d.\n",
579 dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)], 578 dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)],
580 print_mac(mac, dev->dev_addr), dev->irq); 579 dev->dev_addr, dev->irq);
581 580
582 if (el3_debug > 0) 581 if (el3_debug > 0)
583 printk(KERN_INFO "%s", version); 582 printk(KERN_INFO "%s", version);
@@ -1075,7 +1074,6 @@ el3_rx(struct net_device *dev)
1075 outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ 1074 outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
1076 skb->protocol = eth_type_trans(skb,dev); 1075 skb->protocol = eth_type_trans(skb,dev);
1077 netif_rx(skb); 1076 netif_rx(skb);
1078 dev->last_rx = jiffies;
1079 dev->stats.rx_bytes += pkt_len; 1077 dev->stats.rx_bytes += pkt_len;
1080 dev->stats.rx_packets++; 1078 dev->stats.rx_packets++;
1081 continue; 1079 continue;
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index a0f8b6e2d0af..39ac12233aa7 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -570,7 +570,6 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
570 unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ 570 unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
571 int i; 571 int i;
572 int irq; 572 int irq;
573 DECLARE_MAC_BUF(mac);
574 573
575#ifdef __ISAPNP__ 574#ifdef __ISAPNP__
576 if (idev) { 575 if (idev) {
@@ -636,7 +635,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
636 checksum = (checksum ^ (checksum >> 8)) & 0xff; 635 checksum = (checksum ^ (checksum >> 8)) & 0xff;
637 if (checksum != 0x00) 636 if (checksum != 0x00)
638 printk(" ***INVALID CHECKSUM %4.4x*** ", checksum); 637 printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
639 printk(" %s", print_mac(mac, dev->dev_addr)); 638 printk(" %pM", dev->dev_addr);
640 if (eeprom[16] == 0x11c7) { /* Corkscrew */ 639 if (eeprom[16] == 0x11c7) { /* Corkscrew */
641 if (request_dma(dev->dma, "3c515")) { 640 if (request_dma(dev->dma, "3c515")) {
642 printk(", DMA %d allocation failed", dev->dma); 641 printk(", DMA %d allocation failed", dev->dma);
@@ -1302,7 +1301,6 @@ static int corkscrew_rx(struct net_device *dev)
1302 outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ 1301 outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
1303 skb->protocol = eth_type_trans(skb, dev); 1302 skb->protocol = eth_type_trans(skb, dev);
1304 netif_rx(skb); 1303 netif_rx(skb);
1305 dev->last_rx = jiffies;
1306 dev->stats.rx_packets++; 1304 dev->stats.rx_packets++;
1307 dev->stats.rx_bytes += pkt_len; 1305 dev->stats.rx_bytes += pkt_len;
1308 /* Wait a limited time to go to next packet. */ 1306 /* Wait a limited time to go to next packet. */
@@ -1389,7 +1387,6 @@ static int boomerang_rx(struct net_device *dev)
1389 } 1387 }
1390 skb->protocol = eth_type_trans(skb, dev); 1388 skb->protocol = eth_type_trans(skb, dev);
1391 netif_rx(skb); 1389 netif_rx(skb);
1392 dev->last_rx = jiffies;
1393 dev->stats.rx_packets++; 1390 dev->stats.rx_packets++;
1394 } 1391 }
1395 entry = (++vp->cur_rx) % RX_RING_SIZE; 1392 entry = (++vp->cur_rx) % RX_RING_SIZE;
@@ -1580,11 +1577,3 @@ void cleanup_module(void)
1580 } 1577 }
1581} 1578}
1582#endif /* MODULE */ 1579#endif /* MODULE */
1583
1584/*
1585 * Local variables:
1586 * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c 3c515.c"
1587 * c-indent-level: 4
1588 * tab-width: 4
1589 * End:
1590 */
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index e2ce41d3828e..ff41e1ff5603 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -308,7 +308,7 @@ static int elmc_open(struct net_device *dev)
308 308
309static int __init check586(struct net_device *dev, unsigned long where, unsigned size) 309static int __init check586(struct net_device *dev, unsigned long where, unsigned size)
310{ 310{
311 struct priv *p = (struct priv *) dev->priv; 311 struct priv *p = netdev_priv(dev);
312 char *iscp_addrs[2]; 312 char *iscp_addrs[2];
313 int i = 0; 313 int i = 0;
314 314
@@ -347,9 +347,9 @@ static int __init check586(struct net_device *dev, unsigned long where, unsigned
347 * set iscp at the right place, called by elmc_probe and open586. 347 * set iscp at the right place, called by elmc_probe and open586.
348 */ 348 */
349 349
350void alloc586(struct net_device *dev) 350static void alloc586(struct net_device *dev)
351{ 351{
352 struct priv *p = (struct priv *) dev->priv; 352 struct priv *p = netdev_priv(dev);
353 353
354 elmc_id_reset586(); 354 elmc_id_reset586();
355 DELAY(2); 355 DELAY(2);
@@ -383,7 +383,6 @@ static int elmc_getinfo(char *buf, int slot, void *d)
383{ 383{
384 int len = 0; 384 int len = 0;
385 struct net_device *dev = d; 385 struct net_device *dev = d;
386 DECLARE_MAC_BUF(mac);
387 386
388 if (dev == NULL) 387 if (dev == NULL)
389 return len; 388 return len;
@@ -398,8 +397,8 @@ static int elmc_getinfo(char *buf, int slot, void *d)
398 len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ? 397 len += sprintf(buf + len, "Transceiver: %s\n", dev->if_port ?
399 "External" : "Internal"); 398 "External" : "Internal");
400 len += sprintf(buf + len, "Device: %s\n", dev->name); 399 len += sprintf(buf + len, "Device: %s\n", dev->name);
401 len += sprintf(buf + len, "Hardware Address: %s\n", 400 len += sprintf(buf + len, "Hardware Address: %pM\n",
402 print_mac(mac, dev->dev_addr)); 401 dev->dev_addr);
403 402
404 return len; 403 return len;
405} /* elmc_getinfo() */ 404} /* elmc_getinfo() */
@@ -416,8 +415,7 @@ static int __init do_elmc_probe(struct net_device *dev)
416 int i = 0; 415 int i = 0;
417 unsigned int size = 0; 416 unsigned int size = 0;
418 int retval; 417 int retval;
419 struct priv *pr = dev->priv; 418 struct priv *pr = netdev_priv(dev);
420 DECLARE_MAC_BUF(mac);
421 419
422 if (MCA_bus == 0) { 420 if (MCA_bus == 0) {
423 return -ENODEV; 421 return -ENODEV;
@@ -543,8 +541,8 @@ static int __init do_elmc_probe(struct net_device *dev)
543 for (i = 0; i < 6; i++) 541 for (i = 0; i < 6; i++)
544 dev->dev_addr[i] = inb(dev->base_addr + i); 542 dev->dev_addr[i] = inb(dev->base_addr + i);
545 543
546 printk(KERN_INFO "%s: hardware address %s\n", 544 printk(KERN_INFO "%s: hardware address %pM\n",
547 dev->name, print_mac(mac, dev->dev_addr)); 545 dev->name, dev->dev_addr);
548 546
549 dev->open = &elmc_open; 547 dev->open = &elmc_open;
550 dev->stop = &elmc_close; 548 dev->stop = &elmc_close;
@@ -578,13 +576,14 @@ err_out:
578 return retval; 576 return retval;
579} 577}
580 578
579#ifdef MODULE
581static void cleanup_card(struct net_device *dev) 580static void cleanup_card(struct net_device *dev)
582{ 581{
583 mca_set_adapter_procfn(((struct priv *) (dev->priv))->slot, NULL, NULL); 582 mca_set_adapter_procfn(((struct priv *)netdev_priv(dev))->slot,
583 NULL, NULL);
584 release_region(dev->base_addr, ELMC_IO_EXTENT); 584 release_region(dev->base_addr, ELMC_IO_EXTENT);
585} 585}
586 586#else
587#ifndef MODULE
588struct net_device * __init elmc_probe(int unit) 587struct net_device * __init elmc_probe(int unit)
589{ 588{
590 struct net_device *dev = alloc_etherdev(sizeof(struct priv)); 589 struct net_device *dev = alloc_etherdev(sizeof(struct priv));
@@ -616,7 +615,7 @@ static int init586(struct net_device *dev)
616 void *ptr; 615 void *ptr;
617 unsigned long s; 616 unsigned long s;
618 int i, result = 0; 617 int i, result = 0;
619 struct priv *p = (struct priv *) dev->priv; 618 struct priv *p = netdev_priv(dev);
620 volatile struct configure_cmd_struct *cfg_cmd; 619 volatile struct configure_cmd_struct *cfg_cmd;
621 volatile struct iasetup_cmd_struct *ias_cmd; 620 volatile struct iasetup_cmd_struct *ias_cmd;
622 volatile struct tdr_cmd_struct *tdr_cmd; 621 volatile struct tdr_cmd_struct *tdr_cmd;
@@ -852,7 +851,7 @@ static void *alloc_rfa(struct net_device *dev, void *ptr)
852 volatile struct rfd_struct *rfd = (struct rfd_struct *) ptr; 851 volatile struct rfd_struct *rfd = (struct rfd_struct *) ptr;
853 volatile struct rbd_struct *rbd; 852 volatile struct rbd_struct *rbd;
854 int i; 853 int i;
855 struct priv *p = (struct priv *) dev->priv; 854 struct priv *p = netdev_priv(dev);
856 855
857 memset((char *) rfd, 0, sizeof(struct rfd_struct) * p->num_recv_buffs); 856 memset((char *) rfd, 0, sizeof(struct rfd_struct) * p->num_recv_buffs);
858 p->rfd_first = rfd; 857 p->rfd_first = rfd;
@@ -913,7 +912,7 @@ elmc_interrupt(int irq, void *dev_id)
913 } 912 }
914 /* reading ELMC_CTRL also clears the INT bit. */ 913 /* reading ELMC_CTRL also clears the INT bit. */
915 914
916 p = (struct priv *) dev->priv; 915 p = netdev_priv(dev);
917 916
918 while ((stat = p->scb->status & STAT_MASK)) 917 while ((stat = p->scb->status & STAT_MASK))
919 { 918 {
@@ -969,7 +968,7 @@ static void elmc_rcv_int(struct net_device *dev)
969 unsigned short totlen; 968 unsigned short totlen;
970 struct sk_buff *skb; 969 struct sk_buff *skb;
971 struct rbd_struct *rbd; 970 struct rbd_struct *rbd;
972 struct priv *p = (struct priv *) dev->priv; 971 struct priv *p = netdev_priv(dev);
973 972
974 for (; (status = p->rfd_top->status) & STAT_COMPL;) { 973 for (; (status = p->rfd_top->status) & STAT_COMPL;) {
975 rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); 974 rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset);
@@ -985,7 +984,6 @@ static void elmc_rcv_int(struct net_device *dev)
985 skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen); 984 skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen);
986 skb->protocol = eth_type_trans(skb, dev); 985 skb->protocol = eth_type_trans(skb, dev);
987 netif_rx(skb); 986 netif_rx(skb);
988 dev->last_rx = jiffies;
989 dev->stats.rx_packets++; 987 dev->stats.rx_packets++;
990 dev->stats.rx_bytes += totlen; 988 dev->stats.rx_bytes += totlen;
991 } else { 989 } else {
@@ -1013,7 +1011,7 @@ static void elmc_rcv_int(struct net_device *dev)
1013 1011
1014static void elmc_rnr_int(struct net_device *dev) 1012static void elmc_rnr_int(struct net_device *dev)
1015{ 1013{
1016 struct priv *p = (struct priv *) dev->priv; 1014 struct priv *p = netdev_priv(dev);
1017 1015
1018 dev->stats.rx_errors++; 1016 dev->stats.rx_errors++;
1019 1017
@@ -1036,7 +1034,7 @@ static void elmc_rnr_int(struct net_device *dev)
1036static void elmc_xmt_int(struct net_device *dev) 1034static void elmc_xmt_int(struct net_device *dev)
1037{ 1035{
1038 int status; 1036 int status;
1039 struct priv *p = (struct priv *) dev->priv; 1037 struct priv *p = netdev_priv(dev);
1040 1038
1041 status = p->xmit_cmds[p->xmit_last]->cmd_status; 1039 status = p->xmit_cmds[p->xmit_last]->cmd_status;
1042 if (!(status & STAT_COMPL)) { 1040 if (!(status & STAT_COMPL)) {
@@ -1079,7 +1077,7 @@ static void elmc_xmt_int(struct net_device *dev)
1079 1077
1080static void startrecv586(struct net_device *dev) 1078static void startrecv586(struct net_device *dev)
1081{ 1079{
1082 struct priv *p = (struct priv *) dev->priv; 1080 struct priv *p = netdev_priv(dev);
1083 1081
1084 p->scb->rfa_offset = make16(p->rfd_first); 1082 p->scb->rfa_offset = make16(p->rfd_first);
1085 p->scb->cmd = RUC_START; 1083 p->scb->cmd = RUC_START;
@@ -1093,7 +1091,7 @@ static void startrecv586(struct net_device *dev)
1093 1091
1094static void elmc_timeout(struct net_device *dev) 1092static void elmc_timeout(struct net_device *dev)
1095{ 1093{
1096 struct priv *p = (struct priv *) dev->priv; 1094 struct priv *p = netdev_priv(dev);
1097 /* COMMAND-UNIT active? */ 1095 /* COMMAND-UNIT active? */
1098 if (p->scb->status & CU_ACTIVE) { 1096 if (p->scb->status & CU_ACTIVE) {
1099#ifdef DEBUG 1097#ifdef DEBUG
@@ -1129,7 +1127,7 @@ static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
1129#ifndef NO_NOPCOMMANDS 1127#ifndef NO_NOPCOMMANDS
1130 int next_nop; 1128 int next_nop;
1131#endif 1129#endif
1132 struct priv *p = (struct priv *) dev->priv; 1130 struct priv *p = netdev_priv(dev);
1133 1131
1134 netif_stop_queue(dev); 1132 netif_stop_queue(dev);
1135 1133
@@ -1200,7 +1198,7 @@ static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
1200 1198
1201static struct net_device_stats *elmc_get_stats(struct net_device *dev) 1199static struct net_device_stats *elmc_get_stats(struct net_device *dev)
1202{ 1200{
1203 struct priv *p = (struct priv *) dev->priv; 1201 struct priv *p = netdev_priv(dev);
1204 unsigned short crc, aln, rsc, ovrn; 1202 unsigned short crc, aln, rsc, ovrn;
1205 1203
1206 crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */ 1204 crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index abc84f765973..2df3af3b9b20 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -335,7 +335,6 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
335 "82586 initialisation failure", 335 "82586 initialisation failure",
336 "Adapter list configuration error" 336 "Adapter list configuration error"
337 }; 337 };
338 DECLARE_MAC_BUF(mac);
339 338
340 /* Time to play MCA games */ 339 /* Time to play MCA games */
341 340
@@ -405,7 +404,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
405 dev->dev_addr[i] = mca_read_pos(slot,3); 404 dev->dev_addr[i] = mca_read_pos(slot,3);
406 } 405 }
407 406
408 printk("%s: Address %s", dev->name, print_mac(mac, dev->dev_addr)); 407 printk("%s: Address %pM", dev->name, dev->dev_addr);
409 408
410 mca_write_pos(slot, 6, 0); 409 mca_write_pos(slot, 6, 0);
411 mca_write_pos(slot, 7, 0); 410 mca_write_pos(slot, 7, 0);
@@ -1187,7 +1186,6 @@ static void mc32_rx_ring(struct net_device *dev)
1187 } 1186 }
1188 1187
1189 skb->protocol=eth_type_trans(skb,dev); 1188 skb->protocol=eth_type_trans(skb,dev);
1190 dev->last_rx = jiffies;
1191 dev->stats.rx_packets++; 1189 dev->stats.rx_packets++;
1192 dev->stats.rx_bytes += length; 1190 dev->stats.rx_bytes += length;
1193 netif_rx(skb); 1191 netif_rx(skb);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 9ba295d9dd97..665e7fdf27a1 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -803,7 +803,7 @@ static int vortex_suspend(struct pci_dev *pdev, pm_message_t state)
803{ 803{
804 struct net_device *dev = pci_get_drvdata(pdev); 804 struct net_device *dev = pci_get_drvdata(pdev);
805 805
806 if (dev && dev->priv) { 806 if (dev && netdev_priv(dev)) {
807 if (netif_running(dev)) { 807 if (netif_running(dev)) {
808 netif_device_detach(dev); 808 netif_device_detach(dev);
809 vortex_down(dev, 1); 809 vortex_down(dev, 1);
@@ -1013,7 +1013,6 @@ static int __devinit vortex_probe1(struct device *gendev,
1013 const char *print_name = "3c59x"; 1013 const char *print_name = "3c59x";
1014 struct pci_dev *pdev = NULL; 1014 struct pci_dev *pdev = NULL;
1015 struct eisa_device *edev = NULL; 1015 struct eisa_device *edev = NULL;
1016 DECLARE_MAC_BUF(mac);
1017 1016
1018 if (!printed_version) { 1017 if (!printed_version) {
1019 printk (version); 1018 printk (version);
@@ -1026,7 +1025,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1026 } 1025 }
1027 1026
1028 if ((edev = DEVICE_EISA(gendev))) { 1027 if ((edev = DEVICE_EISA(gendev))) {
1029 print_name = edev->dev.bus_id; 1028 print_name = dev_name(&edev->dev);
1030 } 1029 }
1031 } 1030 }
1032 1031
@@ -1206,7 +1205,7 @@ static int __devinit vortex_probe1(struct device *gendev,
1206 ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); 1205 ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
1207 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 1206 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1208 if (print_info) 1207 if (print_info)
1209 printk(" %s", print_mac(mac, dev->dev_addr)); 1208 printk(" %pM", dev->dev_addr);
1210 /* Unfortunately an all zero eeprom passes the checksum and this 1209 /* Unfortunately an all zero eeprom passes the checksum and this
1211 gets found in the wild in failure cases. Crypto is hard 8) */ 1210 gets found in the wild in failure cases. Crypto is hard 8) */
1212 if (!is_valid_ether_addr(dev->dev_addr)) { 1211 if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -2447,7 +2446,6 @@ static int vortex_rx(struct net_device *dev)
2447 iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ 2446 iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
2448 skb->protocol = eth_type_trans(skb, dev); 2447 skb->protocol = eth_type_trans(skb, dev);
2449 netif_rx(skb); 2448 netif_rx(skb);
2450 dev->last_rx = jiffies;
2451 dev->stats.rx_packets++; 2449 dev->stats.rx_packets++;
2452 /* Wait a limited time to go to next packet. */ 2450 /* Wait a limited time to go to next packet. */
2453 for (i = 200; i >= 0; i--) 2451 for (i = 200; i >= 0; i--)
@@ -2530,7 +2528,6 @@ boomerang_rx(struct net_device *dev)
2530 } 2528 }
2531 } 2529 }
2532 netif_rx(skb); 2530 netif_rx(skb);
2533 dev->last_rx = jiffies;
2534 dev->stats.rx_packets++; 2531 dev->stats.rx_packets++;
2535 } 2532 }
2536 entry = (++vp->cur_rx) % RX_RING_SIZE; 2533 entry = (++vp->cur_rx) % RX_RING_SIZE;
@@ -2886,7 +2883,7 @@ static void vortex_get_drvinfo(struct net_device *dev,
2886 strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); 2883 strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
2887 } else { 2884 } else {
2888 if (VORTEX_EISA(vp)) 2885 if (VORTEX_EISA(vp))
2889 sprintf(info->bus_info, vp->gendev->bus_id); 2886 sprintf(info->bus_info, dev_name(vp->gendev));
2890 else 2887 else
2891 sprintf(info->bus_info, "EISA 0x%lx %d", 2888 sprintf(info->bus_info, "EISA 0x%lx %d",
2892 dev->base_addr, dev->irq); 2889 dev->base_addr, dev->irq);
@@ -3217,7 +3214,7 @@ static void __exit vortex_eisa_cleanup(void)
3217#endif 3214#endif
3218 3215
3219 if (compaq_net_device) { 3216 if (compaq_net_device) {
3220 vp = compaq_net_device->priv; 3217 vp = netdev_priv(compaq_net_device);
3221 ioaddr = ioport_map(compaq_net_device->base_addr, 3218 ioaddr = ioport_map(compaq_net_device->base_addr,
3222 VORTEX_TOTAL_SIZE); 3219 VORTEX_TOTAL_SIZE);
3223 3220
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index ad6b8a5b6574..7a331acc34ad 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -336,7 +336,6 @@ static int lance_rx (struct net_device *dev)
336 len); 336 len);
337 skb->protocol = eth_type_trans (skb, dev); 337 skb->protocol = eth_type_trans (skb, dev);
338 netif_rx (skb); 338 netif_rx (skb);
339 dev->last_rx = jiffies;
340 dev->stats.rx_packets++; 339 dev->stats.rx_packets++;
341 dev->stats.rx_bytes += len; 340 dev->stats.rx_bytes += len;
342 } 341 }
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 9ba1f0b46429..dd7ac8290aec 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -457,7 +457,6 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
457 457
458 cp->dev->stats.rx_packets++; 458 cp->dev->stats.rx_packets++;
459 cp->dev->stats.rx_bytes += skb->len; 459 cp->dev->stats.rx_bytes += skb->len;
460 cp->dev->last_rx = jiffies;
461 460
462#if CP_VLAN_TAG_USED 461#if CP_VLAN_TAG_USED
463 if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) { 462 if (cp->vlgrp && (desc->opts2 & cpu_to_le32(RxVlanTagged))) {
@@ -605,7 +604,7 @@ rx_next:
605 604
606 spin_lock_irqsave(&cp->lock, flags); 605 spin_lock_irqsave(&cp->lock, flags);
607 cpw16_f(IntrMask, cp_intr_mask); 606 cpw16_f(IntrMask, cp_intr_mask);
608 __netif_rx_complete(dev, napi); 607 __netif_rx_complete(napi);
609 spin_unlock_irqrestore(&cp->lock, flags); 608 spin_unlock_irqrestore(&cp->lock, flags);
610 } 609 }
611 610
@@ -642,9 +641,9 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
642 } 641 }
643 642
644 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr)) 643 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
645 if (netif_rx_schedule_prep(dev, &cp->napi)) { 644 if (netif_rx_schedule_prep(&cp->napi)) {
646 cpw16_f(IntrMask, cp_norx_intr_mask); 645 cpw16_f(IntrMask, cp_norx_intr_mask);
647 __netif_rx_schedule(dev, &cp->napi); 646 __netif_rx_schedule(&cp->napi);
648 } 647 }
649 648
650 if (status & (TxOK | TxErr | TxEmpty | SWInt)) 649 if (status & (TxOK | TxErr | TxEmpty | SWInt))
@@ -1818,6 +1817,26 @@ static void cp_set_d3_state (struct cp_private *cp)
1818 pci_set_power_state (cp->pdev, PCI_D3hot); 1817 pci_set_power_state (cp->pdev, PCI_D3hot);
1819} 1818}
1820 1819
1820static const struct net_device_ops cp_netdev_ops = {
1821 .ndo_open = cp_open,
1822 .ndo_stop = cp_close,
1823 .ndo_validate_addr = eth_validate_addr,
1824 .ndo_set_multicast_list = cp_set_rx_mode,
1825 .ndo_get_stats = cp_get_stats,
1826 .ndo_do_ioctl = cp_ioctl,
1827 .ndo_start_xmit = cp_start_xmit,
1828 .ndo_tx_timeout = cp_tx_timeout,
1829#if CP_VLAN_TAG_USED
1830 .ndo_vlan_rx_register = cp_vlan_rx_register,
1831#endif
1832#ifdef BROKEN
1833 .ndo_change_mtu = cp_change_mtu,
1834#endif
1835#ifdef CONFIG_NET_POLL_CONTROLLER
1836 .ndo_poll_controller = cp_poll_controller,
1837#endif
1838};
1839
1821static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 1840static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1822{ 1841{
1823 struct net_device *dev; 1842 struct net_device *dev;
@@ -1826,7 +1845,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1826 void __iomem *regs; 1845 void __iomem *regs;
1827 resource_size_t pciaddr; 1846 resource_size_t pciaddr;
1828 unsigned int addr_len, i, pci_using_dac; 1847 unsigned int addr_len, i, pci_using_dac;
1829 DECLARE_MAC_BUF(mac);
1830 1848
1831#ifndef MODULE 1849#ifndef MODULE
1832 static int version_printed; 1850 static int version_printed;
@@ -1931,26 +1949,13 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1931 cpu_to_le16(read_eeprom (regs, i + 7, addr_len)); 1949 cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1932 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 1950 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1933 1951
1934 dev->open = cp_open; 1952 dev->netdev_ops = &cp_netdev_ops;
1935 dev->stop = cp_close;
1936 dev->set_multicast_list = cp_set_rx_mode;
1937 dev->hard_start_xmit = cp_start_xmit;
1938 dev->get_stats = cp_get_stats;
1939 dev->do_ioctl = cp_ioctl;
1940#ifdef CONFIG_NET_POLL_CONTROLLER
1941 dev->poll_controller = cp_poll_controller;
1942#endif
1943 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); 1953 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1944#ifdef BROKEN
1945 dev->change_mtu = cp_change_mtu;
1946#endif
1947 dev->ethtool_ops = &cp_ethtool_ops; 1954 dev->ethtool_ops = &cp_ethtool_ops;
1948 dev->tx_timeout = cp_tx_timeout;
1949 dev->watchdog_timeo = TX_TIMEOUT; 1955 dev->watchdog_timeo = TX_TIMEOUT;
1950 1956
1951#if CP_VLAN_TAG_USED 1957#if CP_VLAN_TAG_USED
1952 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1958 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1953 dev->vlan_rx_register = cp_vlan_rx_register;
1954#endif 1959#endif
1955 1960
1956 if (pci_using_dac) 1961 if (pci_using_dac)
@@ -1967,10 +1972,10 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1967 goto err_out_iomap; 1972 goto err_out_iomap;
1968 1973
1969 printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, " 1974 printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
1970 "%s, IRQ %d\n", 1975 "%pM, IRQ %d\n",
1971 dev->name, 1976 dev->name,
1972 dev->base_addr, 1977 dev->base_addr,
1973 print_mac(mac, dev->dev_addr), 1978 dev->dev_addr,
1974 dev->irq); 1979 dev->irq);
1975 1980
1976 pci_set_drvdata(pdev, dev); 1981 pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 63f906b04899..fe370f805793 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -741,8 +741,7 @@ static void rtl8139_chip_reset (void __iomem *ioaddr)
741} 741}
742 742
743 743
744static int __devinit rtl8139_init_board (struct pci_dev *pdev, 744static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
745 struct net_device **dev_out)
746{ 745{
747 void __iomem *ioaddr; 746 void __iomem *ioaddr;
748 struct net_device *dev; 747 struct net_device *dev;
@@ -756,13 +755,11 @@ static int __devinit rtl8139_init_board (struct pci_dev *pdev,
756 755
757 assert (pdev != NULL); 756 assert (pdev != NULL);
758 757
759 *dev_out = NULL;
760
761 /* dev and priv zeroed in alloc_etherdev */ 758 /* dev and priv zeroed in alloc_etherdev */
762 dev = alloc_etherdev (sizeof (*tp)); 759 dev = alloc_etherdev (sizeof (*tp));
763 if (dev == NULL) { 760 if (dev == NULL) {
764 dev_err(&pdev->dev, "Unable to alloc new net device\n"); 761 dev_err(&pdev->dev, "Unable to alloc new net device\n");
765 return -ENOMEM; 762 return ERR_PTR(-ENOMEM);
766 } 763 }
767 SET_NETDEV_DEV(dev, &pdev->dev); 764 SET_NETDEV_DEV(dev, &pdev->dev);
768 765
@@ -906,16 +903,29 @@ match:
906 903
907 rtl8139_chip_reset (ioaddr); 904 rtl8139_chip_reset (ioaddr);
908 905
909 *dev_out = dev; 906 return dev;
910 return 0;
911 907
912err_out: 908err_out:
913 __rtl8139_cleanup_dev (dev); 909 __rtl8139_cleanup_dev (dev);
914 if (disable_dev_on_err) 910 if (disable_dev_on_err)
915 pci_disable_device (pdev); 911 pci_disable_device (pdev);
916 return rc; 912 return ERR_PTR(rc);
917} 913}
918 914
915static const struct net_device_ops rtl8139_netdev_ops = {
916 .ndo_open = rtl8139_open,
917 .ndo_stop = rtl8139_close,
918 .ndo_get_stats = rtl8139_get_stats,
919 .ndo_validate_addr = eth_validate_addr,
920 .ndo_start_xmit = rtl8139_start_xmit,
921 .ndo_set_multicast_list = rtl8139_set_rx_mode,
922 .ndo_do_ioctl = netdev_ioctl,
923 .ndo_tx_timeout = rtl8139_tx_timeout,
924#ifdef CONFIG_NET_POLL_CONTROLLER
925 .ndo_poll_controller = rtl8139_poll_controller,
926#endif
927
928};
919 929
920static int __devinit rtl8139_init_one (struct pci_dev *pdev, 930static int __devinit rtl8139_init_one (struct pci_dev *pdev,
921 const struct pci_device_id *ent) 931 const struct pci_device_id *ent)
@@ -925,7 +935,6 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
925 int i, addr_len, option; 935 int i, addr_len, option;
926 void __iomem *ioaddr; 936 void __iomem *ioaddr;
927 static int board_idx = -1; 937 static int board_idx = -1;
928 DECLARE_MAC_BUF(mac);
929 938
930 assert (pdev != NULL); 939 assert (pdev != NULL);
931 assert (ent != NULL); 940 assert (ent != NULL);
@@ -959,9 +968,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
959 use_io = 1; 968 use_io = 1;
960 } 969 }
961 970
962 i = rtl8139_init_board (pdev, &dev); 971 dev = rtl8139_init_board (pdev);
963 if (i < 0) 972 if (IS_ERR(dev))
964 return i; 973 return PTR_ERR(dev);
965 974
966 assert (dev != NULL); 975 assert (dev != NULL);
967 tp = netdev_priv(dev); 976 tp = netdev_priv(dev);
@@ -977,19 +986,10 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
977 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 986 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
978 987
979 /* The Rtl8139-specific entries in the device structure. */ 988 /* The Rtl8139-specific entries in the device structure. */
980 dev->open = rtl8139_open; 989 dev->netdev_ops = &rtl8139_netdev_ops;
981 dev->hard_start_xmit = rtl8139_start_xmit;
982 netif_napi_add(dev, &tp->napi, rtl8139_poll, 64);
983 dev->stop = rtl8139_close;
984 dev->get_stats = rtl8139_get_stats;
985 dev->set_multicast_list = rtl8139_set_rx_mode;
986 dev->do_ioctl = netdev_ioctl;
987 dev->ethtool_ops = &rtl8139_ethtool_ops; 990 dev->ethtool_ops = &rtl8139_ethtool_ops;
988 dev->tx_timeout = rtl8139_tx_timeout;
989 dev->watchdog_timeo = TX_TIMEOUT; 991 dev->watchdog_timeo = TX_TIMEOUT;
990#ifdef CONFIG_NET_POLL_CONTROLLER 992 netif_napi_add(dev, &tp->napi, rtl8139_poll, 64);
991 dev->poll_controller = rtl8139_poll_controller;
992#endif
993 993
994 /* note: the hardware is not capable of sg/csum/highdma, however 994 /* note: the hardware is not capable of sg/csum/highdma, however
995 * through the use of skb_copy_and_csum_dev we enable these 995 * through the use of skb_copy_and_csum_dev we enable these
@@ -1024,11 +1024,11 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1024 pci_set_drvdata (pdev, dev); 1024 pci_set_drvdata (pdev, dev);
1025 1025
1026 printk (KERN_INFO "%s: %s at 0x%lx, " 1026 printk (KERN_INFO "%s: %s at 0x%lx, "
1027 "%s, IRQ %d\n", 1027 "%pM, IRQ %d\n",
1028 dev->name, 1028 dev->name,
1029 board_info[ent->driver_data].name, 1029 board_info[ent->driver_data].name,
1030 dev->base_addr, 1030 dev->base_addr,
1031 print_mac(mac, dev->dev_addr), 1031 dev->dev_addr,
1032 dev->irq); 1032 dev->irq);
1033 1033
1034 printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n", 1034 printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n",
@@ -2026,7 +2026,6 @@ no_early_rx:
2026 2026
2027 skb->protocol = eth_type_trans (skb, dev); 2027 skb->protocol = eth_type_trans (skb, dev);
2028 2028
2029 dev->last_rx = jiffies;
2030 dev->stats.rx_bytes += pkt_size; 2029 dev->stats.rx_bytes += pkt_size;
2031 dev->stats.rx_packets++; 2030 dev->stats.rx_packets++;
2032 2031
@@ -2129,7 +2128,7 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
2129 */ 2128 */
2130 spin_lock_irqsave(&tp->lock, flags); 2129 spin_lock_irqsave(&tp->lock, flags);
2131 RTL_W16_F(IntrMask, rtl8139_intr_mask); 2130 RTL_W16_F(IntrMask, rtl8139_intr_mask);
2132 __netif_rx_complete(dev, napi); 2131 __netif_rx_complete(napi);
2133 spin_unlock_irqrestore(&tp->lock, flags); 2132 spin_unlock_irqrestore(&tp->lock, flags);
2134 } 2133 }
2135 spin_unlock(&tp->rx_lock); 2134 spin_unlock(&tp->rx_lock);
@@ -2179,9 +2178,9 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
2179 /* Receive packets are processed by poll routine. 2178 /* Receive packets are processed by poll routine.
2180 If not running start it now. */ 2179 If not running start it now. */
2181 if (status & RxAckBits){ 2180 if (status & RxAckBits){
2182 if (netif_rx_schedule_prep(dev, &tp->napi)) { 2181 if (netif_rx_schedule_prep(&tp->napi)) {
2183 RTL_W16_F (IntrMask, rtl8139_norx_intr_mask); 2182 RTL_W16_F (IntrMask, rtl8139_norx_intr_mask);
2184 __netif_rx_schedule(dev, &tp->napi); 2183 __netif_rx_schedule(&tp->napi);
2185 } 2184 }
2186 } 2185 }
2187 2186
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index da292e647eb1..b273596368e3 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -457,7 +457,7 @@ static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int del
457 457
458static void i596_display_data(struct net_device *dev) 458static void i596_display_data(struct net_device *dev)
459{ 459{
460 struct i596_private *lp = dev->priv; 460 struct i596_private *lp = dev->ml_priv;
461 struct i596_cmd *cmd; 461 struct i596_cmd *cmd;
462 struct i596_rfd *rfd; 462 struct i596_rfd *rfd;
463 struct i596_rbd *rbd; 463 struct i596_rbd *rbd;
@@ -527,7 +527,7 @@ static irqreturn_t i596_error(int irq, void *dev_id)
527 527
528static inline void init_rx_bufs(struct net_device *dev) 528static inline void init_rx_bufs(struct net_device *dev)
529{ 529{
530 struct i596_private *lp = dev->priv; 530 struct i596_private *lp = dev->ml_priv;
531 int i; 531 int i;
532 struct i596_rfd *rfd; 532 struct i596_rfd *rfd;
533 struct i596_rbd *rbd; 533 struct i596_rbd *rbd;
@@ -578,7 +578,7 @@ static inline void init_rx_bufs(struct net_device *dev)
578 578
579static inline void remove_rx_bufs(struct net_device *dev) 579static inline void remove_rx_bufs(struct net_device *dev)
580{ 580{
581 struct i596_private *lp = dev->priv; 581 struct i596_private *lp = dev->ml_priv;
582 struct i596_rbd *rbd; 582 struct i596_rbd *rbd;
583 int i; 583 int i;
584 584
@@ -592,7 +592,7 @@ static inline void remove_rx_bufs(struct net_device *dev)
592 592
593static void rebuild_rx_bufs(struct net_device *dev) 593static void rebuild_rx_bufs(struct net_device *dev)
594{ 594{
595 struct i596_private *lp = dev->priv; 595 struct i596_private *lp = dev->ml_priv;
596 int i; 596 int i;
597 597
598 /* Ensure rx frame/buffer descriptors are tidy */ 598 /* Ensure rx frame/buffer descriptors are tidy */
@@ -611,7 +611,7 @@ static void rebuild_rx_bufs(struct net_device *dev)
611 611
612static int init_i596_mem(struct net_device *dev) 612static int init_i596_mem(struct net_device *dev)
613{ 613{
614 struct i596_private *lp = dev->priv; 614 struct i596_private *lp = dev->ml_priv;
615#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT) 615#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT)
616 short ioaddr = dev->base_addr; 616 short ioaddr = dev->base_addr;
617#endif 617#endif
@@ -764,7 +764,7 @@ failed:
764 764
765static inline int i596_rx(struct net_device *dev) 765static inline int i596_rx(struct net_device *dev)
766{ 766{
767 struct i596_private *lp = dev->priv; 767 struct i596_private *lp = dev->ml_priv;
768 struct i596_rfd *rfd; 768 struct i596_rfd *rfd;
769 struct i596_rbd *rbd; 769 struct i596_rbd *rbd;
770 int frames = 0; 770 int frames = 0;
@@ -841,7 +841,6 @@ memory_squeeze:
841 pkt_len); 841 pkt_len);
842#endif 842#endif
843 netif_rx(skb); 843 netif_rx(skb);
844 dev->last_rx = jiffies;
845 dev->stats.rx_packets++; 844 dev->stats.rx_packets++;
846 dev->stats.rx_bytes+=pkt_len; 845 dev->stats.rx_bytes+=pkt_len;
847 } 846 }
@@ -959,7 +958,7 @@ static void i596_reset(struct net_device *dev, struct i596_private *lp,
959 958
960static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) 959static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
961{ 960{
962 struct i596_private *lp = dev->priv; 961 struct i596_private *lp = dev->ml_priv;
963 int ioaddr = dev->base_addr; 962 int ioaddr = dev->base_addr;
964 unsigned long flags; 963 unsigned long flags;
965 964
@@ -1029,7 +1028,7 @@ static int i596_open(struct net_device *dev)
1029 1028
1030static void i596_tx_timeout (struct net_device *dev) 1029static void i596_tx_timeout (struct net_device *dev)
1031{ 1030{
1032 struct i596_private *lp = dev->priv; 1031 struct i596_private *lp = dev->ml_priv;
1033 int ioaddr = dev->base_addr; 1032 int ioaddr = dev->base_addr;
1034 1033
1035 /* Transmitter timeout, serious problems. */ 1034 /* Transmitter timeout, serious problems. */
@@ -1058,7 +1057,7 @@ static void i596_tx_timeout (struct net_device *dev)
1058 1057
1059static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) 1058static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1060{ 1059{
1061 struct i596_private *lp = dev->priv; 1060 struct i596_private *lp = dev->ml_priv;
1062 struct tx_cmd *tx_cmd; 1061 struct tx_cmd *tx_cmd;
1063 struct i596_tbd *tbd; 1062 struct i596_tbd *tbd;
1064 short length = skb->len; 1063 short length = skb->len;
@@ -1116,12 +1115,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1116 1115
1117static void print_eth(unsigned char *add, char *str) 1116static void print_eth(unsigned char *add, char *str)
1118{ 1117{
1119 DECLARE_MAC_BUF(mac); 1118 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1120 DECLARE_MAC_BUF(mac2); 1119 add, add + 6, add, add[12], add[13], str);
1121
1122 printk(KERN_DEBUG "i596 0x%p, %s --> %s %02X%02X, %s\n",
1123 add, print_mac(mac, add + 6), print_mac(mac2, add),
1124 add[12], add[13], str);
1125} 1120}
1126 1121
1127static int io = 0x300; 1122static int io = 0x300;
@@ -1244,9 +1239,9 @@ found:
1244 dev->tx_timeout = i596_tx_timeout; 1239 dev->tx_timeout = i596_tx_timeout;
1245 dev->watchdog_timeo = TX_TIMEOUT; 1240 dev->watchdog_timeo = TX_TIMEOUT;
1246 1241
1247 dev->priv = (void *)(dev->mem_start); 1242 dev->ml_priv = (void *)(dev->mem_start);
1248 1243
1249 lp = dev->priv; 1244 lp = dev->ml_priv;
1250 DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), " 1245 DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), "
1251 "lp->scb at 0x%08lx\n", 1246 "lp->scb at 0x%08lx\n",
1252 dev->name, (unsigned long)lp, 1247 dev->name, (unsigned long)lp,
@@ -1307,7 +1302,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
1307 } 1302 }
1308 1303
1309 ioaddr = dev->base_addr; 1304 ioaddr = dev->base_addr;
1310 lp = dev->priv; 1305 lp = dev->ml_priv;
1311 1306
1312 spin_lock (&lp->lock); 1307 spin_lock (&lp->lock);
1313 1308
@@ -1450,7 +1445,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
1450 1445
1451static int i596_close(struct net_device *dev) 1446static int i596_close(struct net_device *dev)
1452{ 1447{
1453 struct i596_private *lp = dev->priv; 1448 struct i596_private *lp = dev->ml_priv;
1454 unsigned long flags; 1449 unsigned long flags;
1455 1450
1456 netif_stop_queue(dev); 1451 netif_stop_queue(dev);
@@ -1500,7 +1495,7 @@ static int i596_close(struct net_device *dev)
1500 1495
1501static void set_multicast_list(struct net_device *dev) 1496static void set_multicast_list(struct net_device *dev)
1502{ 1497{
1503 struct i596_private *lp = dev->priv; 1498 struct i596_private *lp = dev->ml_priv;
1504 int config = 0, cnt; 1499 int config = 0, cnt;
1505 1500
1506 DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n", 1501 DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
@@ -1544,7 +1539,6 @@ static void set_multicast_list(struct net_device *dev)
1544 struct dev_mc_list *dmi; 1539 struct dev_mc_list *dmi;
1545 unsigned char *cp; 1540 unsigned char *cp;
1546 struct mc_cmd *cmd; 1541 struct mc_cmd *cmd;
1547 DECLARE_MAC_BUF(mac);
1548 1542
1549 if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out")) 1543 if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
1550 return; 1544 return;
@@ -1555,8 +1549,8 @@ static void set_multicast_list(struct net_device *dev)
1555 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) { 1549 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1556 memcpy(cp, dmi->dmi_addr, 6); 1550 memcpy(cp, dmi->dmi_addr, 6);
1557 if (i596_debug > 1) 1551 if (i596_debug > 1)
1558 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %s\n", 1552 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
1559 dev->name, print_mac(mac, cp))); 1553 dev->name, cp));
1560 } 1554 }
1561 i596_add_cmd(dev, &cmd->cmd); 1555 i596_add_cmd(dev, &cmd->cmd);
1562 } 1556 }
@@ -1604,9 +1598,3 @@ void __exit cleanup_module(void)
1604} 1598}
1605 1599
1606#endif /* MODULE */ 1600#endif /* MODULE */
1607
1608/*
1609 * Local variables:
1610 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c 82596.c"
1611 * End:
1612 */
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index f72a2e87d569..fbe609a51e02 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -17,6 +17,30 @@ int ei_close(struct net_device *dev)
17} 17}
18EXPORT_SYMBOL(ei_close); 18EXPORT_SYMBOL(ei_close);
19 19
20int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
21{
22 return __ei_start_xmit(skb, dev);
23}
24EXPORT_SYMBOL(ei_start_xmit);
25
26struct net_device_stats *ei_get_stats(struct net_device *dev)
27{
28 return __ei_get_stats(dev);
29}
30EXPORT_SYMBOL(ei_get_stats);
31
32void ei_set_multicast_list(struct net_device *dev)
33{
34 __ei_set_multicast_list(dev);
35}
36EXPORT_SYMBOL(ei_set_multicast_list);
37
38void ei_tx_timeout(struct net_device *dev)
39{
40 __ei_tx_timeout(dev);
41}
42EXPORT_SYMBOL(ei_tx_timeout);
43
20irqreturn_t ei_interrupt(int irq, void *dev_id) 44irqreturn_t ei_interrupt(int irq, void *dev_id)
21{ 45{
22 return __ei_interrupt(irq, dev_id); 46 return __ei_interrupt(irq, dev_id);
@@ -31,9 +55,33 @@ void ei_poll(struct net_device *dev)
31EXPORT_SYMBOL(ei_poll); 55EXPORT_SYMBOL(ei_poll);
32#endif 56#endif
33 57
58const struct net_device_ops ei_netdev_ops = {
59 .ndo_open = ei_open,
60 .ndo_stop = ei_close,
61 .ndo_start_xmit = ei_start_xmit,
62 .ndo_tx_timeout = ei_tx_timeout,
63 .ndo_get_stats = ei_get_stats,
64 .ndo_set_multicast_list = ei_set_multicast_list,
65 .ndo_validate_addr = eth_validate_addr,
66 .ndo_change_mtu = eth_change_mtu,
67#ifdef CONFIG_NET_POLL_CONTROLLER
68 .ndo_poll_controller = ei_poll,
69#endif
70};
71EXPORT_SYMBOL(ei_netdev_ops);
72
34struct net_device *__alloc_ei_netdev(int size) 73struct net_device *__alloc_ei_netdev(int size)
35{ 74{
36 return ____alloc_ei_netdev(size); 75 struct net_device *dev = ____alloc_ei_netdev(size);
76#ifdef CONFIG_COMPAT_NET_DEV_OPS
77 if (dev) {
78 dev->hard_start_xmit = ei_start_xmit;
79 dev->get_stats = ei_get_stats;
80 dev->set_multicast_list = ei_set_multicast_list;
81 dev->tx_timeout = ei_tx_timeout;
82 }
83#endif
84 return dev;
37} 85}
38EXPORT_SYMBOL(__alloc_ei_netdev); 86EXPORT_SYMBOL(__alloc_ei_netdev);
39 87
diff --git a/drivers/net/8390.h b/drivers/net/8390.h
index 8e209f5e7c11..3c61d6d2748a 100644
--- a/drivers/net/8390.h
+++ b/drivers/net/8390.h
@@ -33,11 +33,19 @@ extern void ei_poll(struct net_device *dev);
33extern void eip_poll(struct net_device *dev); 33extern void eip_poll(struct net_device *dev);
34#endif 34#endif
35 35
36
36/* Without I/O delay - non ISA or later chips */ 37/* Without I/O delay - non ISA or later chips */
37extern void NS8390_init(struct net_device *dev, int startp); 38extern void NS8390_init(struct net_device *dev, int startp);
38extern int ei_open(struct net_device *dev); 39extern int ei_open(struct net_device *dev);
39extern int ei_close(struct net_device *dev); 40extern int ei_close(struct net_device *dev);
40extern irqreturn_t ei_interrupt(int irq, void *dev_id); 41extern irqreturn_t ei_interrupt(int irq, void *dev_id);
42extern void ei_tx_timeout(struct net_device *dev);
43extern int ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
44extern void ei_set_multicast_list(struct net_device *dev);
45extern struct net_device_stats *ei_get_stats(struct net_device *dev);
46
47extern const struct net_device_ops ei_netdev_ops;
48
41extern struct net_device *__alloc_ei_netdev(int size); 49extern struct net_device *__alloc_ei_netdev(int size);
42static inline struct net_device *alloc_ei_netdev(void) 50static inline struct net_device *alloc_ei_netdev(void)
43{ 51{
@@ -49,6 +57,13 @@ extern void NS8390p_init(struct net_device *dev, int startp);
49extern int eip_open(struct net_device *dev); 57extern int eip_open(struct net_device *dev);
50extern int eip_close(struct net_device *dev); 58extern int eip_close(struct net_device *dev);
51extern irqreturn_t eip_interrupt(int irq, void *dev_id); 59extern irqreturn_t eip_interrupt(int irq, void *dev_id);
60extern void eip_tx_timeout(struct net_device *dev);
61extern int eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
62extern void eip_set_multicast_list(struct net_device *dev);
63extern struct net_device_stats *eip_get_stats(struct net_device *dev);
64
65extern const struct net_device_ops eip_netdev_ops;
66
52extern struct net_device *__alloc_eip_netdev(int size); 67extern struct net_device *__alloc_eip_netdev(int size);
53static inline struct net_device *alloc_eip_netdev(void) 68static inline struct net_device *alloc_eip_netdev(void)
54{ 69{
diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c
index 4c6eea4611a2..ee70b358a816 100644
--- a/drivers/net/8390p.c
+++ b/drivers/net/8390p.c
@@ -22,6 +22,30 @@ int eip_close(struct net_device *dev)
22} 22}
23EXPORT_SYMBOL(eip_close); 23EXPORT_SYMBOL(eip_close);
24 24
25int eip_start_xmit(struct sk_buff *skb, struct net_device *dev)
26{
27 return __ei_start_xmit(skb, dev);
28}
29EXPORT_SYMBOL(eip_start_xmit);
30
31struct net_device_stats *eip_get_stats(struct net_device *dev)
32{
33 return __ei_get_stats(dev);
34}
35EXPORT_SYMBOL(eip_get_stats);
36
37void eip_set_multicast_list(struct net_device *dev)
38{
39 __ei_set_multicast_list(dev);
40}
41EXPORT_SYMBOL(eip_set_multicast_list);
42
43void eip_tx_timeout(struct net_device *dev)
44{
45 __ei_tx_timeout(dev);
46}
47EXPORT_SYMBOL(eip_tx_timeout);
48
25irqreturn_t eip_interrupt(int irq, void *dev_id) 49irqreturn_t eip_interrupt(int irq, void *dev_id)
26{ 50{
27 return __ei_interrupt(irq, dev_id); 51 return __ei_interrupt(irq, dev_id);
@@ -36,9 +60,33 @@ void eip_poll(struct net_device *dev)
36EXPORT_SYMBOL(eip_poll); 60EXPORT_SYMBOL(eip_poll);
37#endif 61#endif
38 62
63const struct net_device_ops eip_netdev_ops = {
64 .ndo_open = eip_open,
65 .ndo_stop = eip_close,
66 .ndo_start_xmit = eip_start_xmit,
67 .ndo_tx_timeout = eip_tx_timeout,
68 .ndo_get_stats = eip_get_stats,
69 .ndo_set_multicast_list = eip_set_multicast_list,
70 .ndo_validate_addr = eth_validate_addr,
71 .ndo_change_mtu = eth_change_mtu,
72#ifdef CONFIG_NET_POLL_CONTROLLER
73 .ndo_poll_controller = eip_poll,
74#endif
75};
76EXPORT_SYMBOL(eip_netdev_ops);
77
39struct net_device *__alloc_eip_netdev(int size) 78struct net_device *__alloc_eip_netdev(int size)
40{ 79{
41 return ____alloc_ei_netdev(size); 80 struct net_device *dev = ____alloc_ei_netdev(size);
81#ifdef CONFIG_COMPAT_NET_DEV_OPS
82 if (dev) {
83 dev->hard_start_xmit = eip_start_xmit;
84 dev->get_stats = eip_get_stats;
85 dev->set_multicast_list = eip_set_multicast_list;
86 dev->tx_timeout = eip_tx_timeout;
87 }
88#endif
89 return dev;
42} 90}
43EXPORT_SYMBOL(__alloc_eip_netdev); 91EXPORT_SYMBOL(__alloc_eip_netdev);
44 92
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 231eeaf1d552..72a9212da865 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -61,6 +61,7 @@ config DUMMY
61config BONDING 61config BONDING
62 tristate "Bonding driver support" 62 tristate "Bonding driver support"
63 depends on INET 63 depends on INET
64 depends on IPV6 || IPV6=n
64 ---help--- 65 ---help---
65 Say 'Y' or 'M' if you wish to be able to 'bond' multiple Ethernet 66 Say 'Y' or 'M' if you wish to be able to 'bond' multiple Ethernet
66 Channels together. This is called 'Etherchannel' by Cisco, 67 Channels together. This is called 'Etherchannel' by Cisco,
@@ -978,6 +979,20 @@ config SMC911X
978 called smc911x. If you want to compile it as a module, say M 979 called smc911x. If you want to compile it as a module, say M
979 here and read <file:Documentation/kbuild/modules.txt> 980 here and read <file:Documentation/kbuild/modules.txt>
980 981
982config SMSC911X
983 tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
984 depends on ARM || SUPERH
985 select CRC32
986 select MII
987 select PHYLIB
988 ---help---
989 Say Y here if you want support for SMSC LAN911x and LAN921x families
990 of ethernet controllers.
991
992 To compile this driver as a module, choose M here and read
993 <file:Documentation/networking/net-modules.txt>. The module
994 will be called smsc911x.
995
981config NET_VENDOR_RACAL 996config NET_VENDOR_RACAL
982 bool "Racal-Interlan (Micom) NI cards" 997 bool "Racal-Interlan (Micom) NI cards"
983 depends on ISA 998 depends on ISA
@@ -1414,19 +1429,6 @@ config TC35815
1414 depends on NET_PCI && PCI && MIPS 1429 depends on NET_PCI && PCI && MIPS
1415 select PHYLIB 1430 select PHYLIB
1416 1431
1417config EEPRO100
1418 tristate "EtherExpressPro/100 support (eepro100, original Becker driver)"
1419 depends on NET_PCI && PCI
1420 select MII
1421 help
1422 If you have an Intel EtherExpress PRO/100 PCI network (Ethernet)
1423 card, say Y and read the Ethernet-HOWTO, available from
1424 <http://www.tldp.org/docs.html#howto>.
1425
1426 To compile this driver as a module, choose M here. The module
1427 will be called eepro100.
1428
1429
1430config E100 1432config E100
1431 tristate "Intel(R) PRO/100+ support" 1433 tristate "Intel(R) PRO/100+ support"
1432 depends on NET_PCI && PCI 1434 depends on NET_PCI && PCI
@@ -1636,6 +1638,22 @@ config EPIC100
1636 More specific information and updates are available from 1638 More specific information and updates are available from
1637 <http://www.scyld.com/network/epic100.html>. 1639 <http://www.scyld.com/network/epic100.html>.
1638 1640
1641config SMSC9420
1642 tristate "SMSC LAN9420 PCI ethernet adapter support"
1643 depends on NET_PCI && PCI
1644 select CRC32
1645 select PHYLIB
1646 select SMSC_PHY
1647 help
1648 This is a driver for SMSC's LAN9420 PCI ethernet adapter.
1649 Say Y if you want it compiled into the kernel,
1650 and read the Ethernet-HOWTO, available from
1651 <http://www.linuxdoc.org/docs.html#howto>.
1652
1653 This driver is also available as a module. The module will be
1654 called smsc9420. If you want to compile it as a module, say M
1655 here and read <file:Documentation/kbuild/modules.txt>
1656
1639config SUNDANCE 1657config SUNDANCE
1640 tristate "Sundance Alta support" 1658 tristate "Sundance Alta support"
1641 depends on NET_PCI && PCI 1659 depends on NET_PCI && PCI
@@ -1981,10 +1999,10 @@ config IP1000
1981 will be called ipg. This is recommended. 1999 will be called ipg. This is recommended.
1982 2000
1983config IGB 2001config IGB
1984 tristate "Intel(R) 82575 PCI-Express Gigabit Ethernet support" 2002 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
1985 depends on PCI 2003 depends on PCI
1986 ---help--- 2004 ---help---
1987 This driver supports Intel(R) 82575 gigabit ethernet family of 2005 This driver supports Intel(R) 82575/82576 gigabit ethernet family of
1988 adapters. For more information on how to identify your adapter, go 2006 adapters. For more information on how to identify your adapter, go
1989 to the Adapter & Driver ID Guide at: 2007 to the Adapter & Driver ID Guide at:
1990 2008
@@ -2276,10 +2294,6 @@ config UGETH_MAGIC_PACKET
2276 bool "Magic Packet detection support" 2294 bool "Magic Packet detection support"
2277 depends on UCC_GETH 2295 depends on UCC_GETH
2278 2296
2279config UGETH_FILTERING
2280 bool "Mac address filtering support"
2281 depends on UCC_GETH
2282
2283config UGETH_TX_ON_DEMAND 2297config UGETH_TX_ON_DEMAND
2284 bool "Transmit on Demand support" 2298 bool "Transmit on Demand support"
2285 depends on UCC_GETH 2299 depends on UCC_GETH
@@ -2450,6 +2464,16 @@ config IXGBE_DCA
2450 driver. DCA is a method for warming the CPU cache before data 2464 driver. DCA is a method for warming the CPU cache before data
2451 is used, with the intent of lessening the impact of cache misses. 2465 is used, with the intent of lessening the impact of cache misses.
2452 2466
2467config IXGBE_DCB
2468 bool "Data Center Bridging (DCB) Support"
2469 default n
2470 depends on IXGBE && DCB
2471 ---help---
2472 Say Y here if you want to use Data Center Bridging (DCB) in the
2473 driver.
2474
2475 If unsure, say N.
2476
2453config IXGB 2477config IXGB
2454 tristate "Intel(R) PRO/10GbE support" 2478 tristate "Intel(R) PRO/10GbE support"
2455 depends on PCI 2479 depends on PCI
@@ -2626,7 +2650,7 @@ config RIONET_RX_SIZE
2626 default "128" 2650 default "128"
2627 2651
2628config FDDI 2652config FDDI
2629 bool "FDDI driver support" 2653 tristate "FDDI driver support"
2630 depends on (PCI || EISA || TC) 2654 depends on (PCI || EISA || TC)
2631 help 2655 help
2632 Fiber Distributed Data Interface is a high speed local area network 2656 Fiber Distributed Data Interface is a high speed local area network
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 017383ad5ec6..e5c34b464211 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -53,10 +53,10 @@ obj-$(CONFIG_VORTEX) += 3c59x.o
53obj-$(CONFIG_TYPHOON) += typhoon.o 53obj-$(CONFIG_TYPHOON) += typhoon.o
54obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o 54obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
55obj-$(CONFIG_PCNET32) += pcnet32.o 55obj-$(CONFIG_PCNET32) += pcnet32.o
56obj-$(CONFIG_EEPRO100) += eepro100.o
57obj-$(CONFIG_E100) += e100.o 56obj-$(CONFIG_E100) += e100.o
58obj-$(CONFIG_TLAN) += tlan.o 57obj-$(CONFIG_TLAN) += tlan.o
59obj-$(CONFIG_EPIC100) += epic100.o 58obj-$(CONFIG_EPIC100) += epic100.o
59obj-$(CONFIG_SMSC9420) += smsc9420.o
60obj-$(CONFIG_SIS190) += sis190.o 60obj-$(CONFIG_SIS190) += sis190.o
61obj-$(CONFIG_SIS900) += sis900.o 61obj-$(CONFIG_SIS900) += sis900.o
62obj-$(CONFIG_R6040) += r6040.o 62obj-$(CONFIG_R6040) += r6040.o
@@ -98,7 +98,7 @@ obj-$(CONFIG_HAMACHI) += hamachi.o
98obj-$(CONFIG_NET) += Space.o loopback.o 98obj-$(CONFIG_NET) += Space.o loopback.o
99obj-$(CONFIG_SEEQ8005) += seeq8005.o 99obj-$(CONFIG_SEEQ8005) += seeq8005.o
100obj-$(CONFIG_NET_SB1000) += sb1000.o 100obj-$(CONFIG_NET_SB1000) += sb1000.o
101obj-$(CONFIG_MAC8390) += mac8390.o 101obj-$(CONFIG_MAC8390) += mac8390.o 8390.o
102obj-$(CONFIG_APNE) += apne.o 8390.o 102obj-$(CONFIG_APNE) += apne.o 8390.o
103obj-$(CONFIG_PCMCIA_PCNET) += 8390.o 103obj-$(CONFIG_PCMCIA_PCNET) += 8390.o
104obj-$(CONFIG_HP100) += hp100.o 104obj-$(CONFIG_HP100) += hp100.o
@@ -125,7 +125,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o
125obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o 125obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
126obj-$(CONFIG_B44) += b44.o 126obj-$(CONFIG_B44) += b44.o
127obj-$(CONFIG_FORCEDETH) += forcedeth.o 127obj-$(CONFIG_FORCEDETH) += forcedeth.o
128obj-$(CONFIG_NE_H8300) += ne-h8300.o 128obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
129obj-$(CONFIG_AX88796) += ax88796.o 129obj-$(CONFIG_AX88796) += ax88796.o
130 130
131obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o 131obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o
@@ -191,7 +191,7 @@ obj-$(CONFIG_SC92031) += sc92031.o
191obj-$(CONFIG_LP486E) += lp486e.o 191obj-$(CONFIG_LP486E) += lp486e.o
192 192
193obj-$(CONFIG_ETH16I) += eth16i.o 193obj-$(CONFIG_ETH16I) += eth16i.o
194obj-$(CONFIG_ZORRO8390) += zorro8390.o 194obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
195obj-$(CONFIG_HPLANCE) += hplance.o 7990.o 195obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
196obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o 196obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
197obj-$(CONFIG_EQUALIZER) += eql.o 197obj-$(CONFIG_EQUALIZER) += eql.o
@@ -203,7 +203,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
203obj-$(CONFIG_DECLANCE) += declance.o 203obj-$(CONFIG_DECLANCE) += declance.o
204obj-$(CONFIG_ATARILANCE) += atarilance.o 204obj-$(CONFIG_ATARILANCE) += atarilance.o
205obj-$(CONFIG_A2065) += a2065.o 205obj-$(CONFIG_A2065) += a2065.o
206obj-$(CONFIG_HYDRA) += hydra.o 206obj-$(CONFIG_HYDRA) += hydra.o 8390.o
207obj-$(CONFIG_ARIADNE) += ariadne.o 207obj-$(CONFIG_ARIADNE) += ariadne.o
208obj-$(CONFIG_CS89x0) += cs89x0.o 208obj-$(CONFIG_CS89x0) += cs89x0.o
209obj-$(CONFIG_MACSONIC) += macsonic.o 209obj-$(CONFIG_MACSONIC) += macsonic.o
@@ -220,6 +220,7 @@ obj-$(CONFIG_S2IO) += s2io.o
220obj-$(CONFIG_MYRI10GE) += myri10ge/ 220obj-$(CONFIG_MYRI10GE) += myri10ge/
221obj-$(CONFIG_SMC91X) += smc91x.o 221obj-$(CONFIG_SMC91X) += smc91x.o
222obj-$(CONFIG_SMC911X) += smc911x.o 222obj-$(CONFIG_SMC911X) += smc911x.o
223obj-$(CONFIG_SMSC911X) += smsc911x.o
223obj-$(CONFIG_BFIN_MAC) += bfin_mac.o 224obj-$(CONFIG_BFIN_MAC) += bfin_mac.o
224obj-$(CONFIG_DM9000) += dm9000.o 225obj-$(CONFIG_DM9000) += dm9000.o
225obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o 226obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index 9c0837435b68..7a60bdd9a242 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -324,7 +324,6 @@ static int lance_rx (struct net_device *dev)
324 len); 324 len);
325 skb->protocol = eth_type_trans (skb, dev); 325 skb->protocol = eth_type_trans (skb, dev);
326 netif_rx (skb); 326 netif_rx (skb);
327 dev->last_rx = jiffies;
328 dev->stats.rx_packets++; 327 dev->stats.rx_packets++;
329 dev->stats.rx_bytes += len; 328 dev->stats.rx_bytes += len;
330 } 329 }
@@ -710,7 +709,6 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
710 unsigned long board, base_addr, mem_start; 709 unsigned long board, base_addr, mem_start;
711 struct resource *r1, *r2; 710 struct resource *r1, *r2;
712 int err; 711 int err;
713 DECLARE_MAC_BUF(mac);
714 712
715 board = z->resource.start; 713 board = z->resource.start;
716 base_addr = board+A2065_LANCE; 714 base_addr = board+A2065_LANCE;
@@ -787,8 +785,7 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
787 zorro_set_drvdata(z, dev); 785 zorro_set_drvdata(z, dev);
788 786
789 printk(KERN_INFO "%s: A2065 at 0x%08lx, Ethernet Address " 787 printk(KERN_INFO "%s: A2065 at 0x%08lx, Ethernet Address "
790 "%s\n", dev->name, board, 788 "%pM\n", dev->name, board, dev->dev_addr);
791 print_mac(mac, dev->dev_addr));
792 789
793 return 0; 790 return 0;
794} 791}
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index b1448637107f..071a851a2ea1 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -146,7 +146,6 @@ out:
146static int __init ac_probe1(int ioaddr, struct net_device *dev) 146static int __init ac_probe1(int ioaddr, struct net_device *dev)
147{ 147{
148 int i, retval; 148 int i, retval;
149 DECLARE_MAC_BUF(mac);
150 149
151 if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME)) 150 if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME))
152 return -EBUSY; 151 return -EBUSY;
@@ -171,8 +170,8 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev)
171 for (i = 0; i < 6; i++) 170 for (i = 0; i < 6; i++)
172 dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i); 171 dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i);
173 172
174 printk(KERN_DEBUG "AC3200 in EISA slot %d, node %s", 173 printk(KERN_DEBUG "AC3200 in EISA slot %d, node %pM",
175 ioaddr/0x1000, print_mac(mac, dev->dev_addr)); 174 ioaddr/0x1000, dev->dev_addr);
176#if 0 175#if 0
177 /* Check the vendor ID/prefix. Redundant after checking the EISA ID */ 176 /* Check the vendor ID/prefix. Redundant after checking the EISA ID */
178 if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0 177 if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 66de80b64b92..517fce48d94a 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -450,6 +450,20 @@ static const struct ethtool_ops ace_ethtool_ops = {
450 450
451static void ace_watchdog(struct net_device *dev); 451static void ace_watchdog(struct net_device *dev);
452 452
453static const struct net_device_ops ace_netdev_ops = {
454 .ndo_open = ace_open,
455 .ndo_stop = ace_close,
456 .ndo_tx_timeout = ace_watchdog,
457 .ndo_get_stats = ace_get_stats,
458 .ndo_start_xmit = ace_start_xmit,
459 .ndo_set_multicast_list = ace_set_multicast_list,
460 .ndo_set_mac_address = ace_set_mac_addr,
461 .ndo_change_mtu = ace_change_mtu,
462#if ACENIC_DO_VLAN
463 .ndo_vlan_rx_register = ace_vlan_rx_register,
464#endif
465};
466
453static int __devinit acenic_probe_one(struct pci_dev *pdev, 467static int __devinit acenic_probe_one(struct pci_dev *pdev,
454 const struct pci_device_id *id) 468 const struct pci_device_id *id)
455{ 469{
@@ -466,27 +480,19 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev,
466 480
467 SET_NETDEV_DEV(dev, &pdev->dev); 481 SET_NETDEV_DEV(dev, &pdev->dev);
468 482
469 ap = dev->priv; 483 ap = netdev_priv(dev);
470 ap->pdev = pdev; 484 ap->pdev = pdev;
471 ap->name = pci_name(pdev); 485 ap->name = pci_name(pdev);
472 486
473 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 487 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
474#if ACENIC_DO_VLAN 488#if ACENIC_DO_VLAN
475 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 489 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
476 dev->vlan_rx_register = ace_vlan_rx_register;
477#endif 490#endif
478 491
479 dev->tx_timeout = &ace_watchdog;
480 dev->watchdog_timeo = 5*HZ; 492 dev->watchdog_timeo = 5*HZ;
481 493
482 dev->open = &ace_open; 494 dev->netdev_ops = &ace_netdev_ops;
483 dev->stop = &ace_close;
484 dev->hard_start_xmit = &ace_start_xmit;
485 dev->get_stats = &ace_get_stats;
486 dev->set_multicast_list = &ace_set_multicast_list;
487 SET_ETHTOOL_OPS(dev, &ace_ethtool_ops); 495 SET_ETHTOOL_OPS(dev, &ace_ethtool_ops);
488 dev->set_mac_address = &ace_set_mac_addr;
489 dev->change_mtu = &ace_change_mtu;
490 496
491 /* we only display this string ONCE */ 497 /* we only display this string ONCE */
492 if (!boards_found) 498 if (!boards_found)
@@ -892,7 +898,6 @@ static int __devinit ace_init(struct net_device *dev)
892 int board_idx, ecode = 0; 898 int board_idx, ecode = 0;
893 short i; 899 short i;
894 unsigned char cache_size; 900 unsigned char cache_size;
895 DECLARE_MAC_BUF(mac);
896 901
897 ap = netdev_priv(dev); 902 ap = netdev_priv(dev);
898 regs = ap->regs; 903 regs = ap->regs;
@@ -1019,7 +1024,7 @@ static int __devinit ace_init(struct net_device *dev)
1019 dev->dev_addr[4] = (mac2 >> 8) & 0xff; 1024 dev->dev_addr[4] = (mac2 >> 8) & 0xff;
1020 dev->dev_addr[5] = mac2 & 0xff; 1025 dev->dev_addr[5] = mac2 & 0xff;
1021 1026
1022 printk("MAC: %s\n", print_mac(mac, dev->dev_addr)); 1027 printk("MAC: %pM\n", dev->dev_addr);
1023 1028
1024 /* 1029 /*
1025 * Looks like this is necessary to deal with on all architectures, 1030 * Looks like this is necessary to deal with on all architectures,
@@ -2034,7 +2039,6 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
2034#endif 2039#endif
2035 netif_rx(skb); 2040 netif_rx(skb);
2036 2041
2037 dev->last_rx = jiffies;
2038 dev->stats.rx_packets++; 2042 dev->stats.rx_packets++;
2039 dev->stats.rx_bytes += retdesc->size; 2043 dev->stats.rx_bytes += retdesc->size;
2040 2044
@@ -3220,10 +3224,3 @@ static int __devinit read_eeprom_byte(struct net_device *dev,
3220 ap->name, offset); 3224 ap->name, offset);
3221 goto out; 3225 goto out;
3222} 3226}
3223
3224
3225/*
3226 * Local variables:
3227 * compile-command: "gcc -D__SMP__ -D__KERNEL__ -DMODULE -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -DMODVERSIONS -include ../../include/linux/modversions.h -c -o acenic.o acenic.c"
3228 * End:
3229 */
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 07a6697e3635..187ac6eb6e94 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -809,7 +809,6 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
809 lp->coal_conf.rx_packets++; 809 lp->coal_conf.rx_packets++;
810 lp->coal_conf.rx_bytes += pkt_len; 810 lp->coal_conf.rx_bytes += pkt_len;
811 num_rx_pkt++; 811 num_rx_pkt++;
812 dev->last_rx = jiffies;
813 812
814 err_next_pkt: 813 err_next_pkt:
815 lp->rx_ring[rx_index].buff_phy_addr 814 lp->rx_ring[rx_index].buff_phy_addr
@@ -832,7 +831,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
832 if (rx_pkt_limit > 0) { 831 if (rx_pkt_limit > 0) {
833 /* Receive descriptor is empty now */ 832 /* Receive descriptor is empty now */
834 spin_lock_irqsave(&lp->lock, flags); 833 spin_lock_irqsave(&lp->lock, flags);
835 __netif_rx_complete(dev, napi); 834 __netif_rx_complete(napi);
836 writel(VAL0|RINTEN0, mmio + INTEN0); 835 writel(VAL0|RINTEN0, mmio + INTEN0);
837 writel(VAL2 | RDMD0, mmio + CMD0); 836 writel(VAL2 | RDMD0, mmio + CMD0);
838 spin_unlock_irqrestore(&lp->lock, flags); 837 spin_unlock_irqrestore(&lp->lock, flags);
@@ -1171,11 +1170,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1171 1170
1172 /* Check if Receive Interrupt has occurred. */ 1171 /* Check if Receive Interrupt has occurred. */
1173 if (intr0 & RINT0) { 1172 if (intr0 & RINT0) {
1174 if (netif_rx_schedule_prep(dev, &lp->napi)) { 1173 if (netif_rx_schedule_prep(&lp->napi)) {
1175 /* Disable receive interupts */ 1174 /* Disable receive interupts */
1176 writel(RINTEN0, mmio + INTEN0); 1175 writel(RINTEN0, mmio + INTEN0);
1177 /* Schedule a polling routine */ 1176 /* Schedule a polling routine */
1178 __netif_rx_schedule(dev, &lp->napi); 1177 __netif_rx_schedule(&lp->napi);
1179 } else if (intren0 & RINTEN0) { 1178 } else if (intren0 & RINTEN0) {
1180 printk("************Driver bug! \ 1179 printk("************Driver bug! \
1181 interrupt while in poll\n"); 1180 interrupt while in poll\n");
@@ -1821,7 +1820,6 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1821 unsigned long reg_addr,reg_len; 1820 unsigned long reg_addr,reg_len;
1822 struct amd8111e_priv* lp; 1821 struct amd8111e_priv* lp;
1823 struct net_device* dev; 1822 struct net_device* dev;
1824 DECLARE_MAC_BUF(mac);
1825 1823
1826 err = pci_enable_device(pdev); 1824 err = pci_enable_device(pdev);
1827 if(err){ 1825 if(err){
@@ -1963,8 +1961,8 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1963 chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28; 1961 chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1964 printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", 1962 printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
1965 dev->name,MODULE_VERS); 1963 dev->name,MODULE_VERS);
1966 printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %s\n", 1964 printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1967 dev->name, chip_version, print_mac(mac, dev->dev_addr)); 1965 dev->name, chip_version, dev->dev_addr);
1968 if (lp->ext_phy_id) 1966 if (lp->ext_phy_id)
1969 printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n", 1967 printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
1970 dev->name, lp->ext_phy_id, lp->ext_phy_addr); 1968 dev->name, lp->ext_phy_id, lp->ext_phy_addr);
diff --git a/drivers/net/apne.c b/drivers/net/apne.c
index 867f6fff543c..1437f5d12121 100644
--- a/drivers/net/apne.c
+++ b/drivers/net/apne.c
@@ -78,9 +78,6 @@
78struct net_device * __init apne_probe(int unit); 78struct net_device * __init apne_probe(int unit);
79static int apne_probe1(struct net_device *dev, int ioaddr); 79static int apne_probe1(struct net_device *dev, int ioaddr);
80 80
81static int apne_open(struct net_device *dev);
82static int apne_close(struct net_device *dev);
83
84static void apne_reset_8390(struct net_device *dev); 81static void apne_reset_8390(struct net_device *dev);
85static void apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, 82static void apne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
86 int ring_page); 83 int ring_page);
@@ -207,7 +204,6 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
207 int neX000, ctron; 204 int neX000, ctron;
208#endif 205#endif
209 static unsigned version_printed; 206 static unsigned version_printed;
210 DECLARE_MAC_BUF(mac);
211 207
212 if (ei_debug && version_printed++ == 0) 208 if (ei_debug && version_printed++ == 0)
213 printk(version); 209 printk(version);
@@ -315,6 +311,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
315 311
316 dev->base_addr = ioaddr; 312 dev->base_addr = ioaddr;
317 dev->irq = IRQ_AMIGA_PORTS; 313 dev->irq = IRQ_AMIGA_PORTS;
314 dev->netdev_ops = &ei_netdev_ops;
318 315
319 /* Install the Interrupt handler */ 316 /* Install the Interrupt handler */
320 i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev); 317 i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
@@ -323,7 +320,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
323 for(i = 0; i < ETHER_ADDR_LEN; i++) 320 for(i = 0; i < ETHER_ADDR_LEN; i++)
324 dev->dev_addr[i] = SA_prom[i]; 321 dev->dev_addr[i] = SA_prom[i];
325 322
326 printk(" %s\n", print_mac(mac, dev->dev_addr)); 323 printk(" %pM\n", dev->dev_addr);
327 324
328 printk("%s: %s found.\n", dev->name, name); 325 printk("%s: %s found.\n", dev->name, name);
329 326
@@ -338,11 +335,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
338 ei_status.block_input = &apne_block_input; 335 ei_status.block_input = &apne_block_input;
339 ei_status.block_output = &apne_block_output; 336 ei_status.block_output = &apne_block_output;
340 ei_status.get_8390_hdr = &apne_get_8390_hdr; 337 ei_status.get_8390_hdr = &apne_get_8390_hdr;
341 dev->open = &apne_open; 338
342 dev->stop = &apne_close;
343#ifdef CONFIG_NET_POLL_CONTROLLER
344 dev->poll_controller = ei_poll;
345#endif
346 NS8390_init(dev, 0); 339 NS8390_init(dev, 0);
347 340
348 pcmcia_ack_int(pcmcia_get_intreq()); /* ack PCMCIA int req */ 341 pcmcia_ack_int(pcmcia_get_intreq()); /* ack PCMCIA int req */
@@ -353,22 +346,6 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
353 return 0; 346 return 0;
354} 347}
355 348
356static int
357apne_open(struct net_device *dev)
358{
359 ei_open(dev);
360 return 0;
361}
362
363static int
364apne_close(struct net_device *dev)
365{
366 if (ei_debug > 1)
367 printk("%s: Shutting down ethercard.\n", dev->name);
368 ei_close(dev);
369 return 0;
370}
371
372/* Hard reset the card. This used to pause for the same period that a 349/* Hard reset the card. This used to pause for the same period that a
373 8390 reset command required, but that shouldn't be necessary. */ 350 8390 reset command required, but that shouldn't be necessary. */
374static void 351static void
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 735fc9476403..54819a34ba0a 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -851,7 +851,6 @@ static void cops_rx(struct net_device *dev)
851 851
852 /* Send packet to a higher place. */ 852 /* Send packet to a higher place. */
853 netif_rx(skb); 853 netif_rx(skb);
854 dev->last_rx = jiffies;
855} 854}
856 855
857static void cops_timeout(struct net_device *dev) 856static void cops_timeout(struct net_device *dev)
@@ -1025,11 +1024,3 @@ static void __exit cops_module_exit(void)
1025module_init(cops_module_init); 1024module_init(cops_module_init);
1026module_exit(cops_module_exit); 1025module_exit(cops_module_exit);
1027#endif /* MODULE */ 1026#endif /* MODULE */
1028
1029/*
1030 * Local variables:
1031 * compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -c cops.c"
1032 * c-basic-offset: 4
1033 * c-file-offsets: ((substatement-open . 0))
1034 * End:
1035 */
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 1071144edd66..9a0be9b2eaad 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -108,7 +108,7 @@ static struct net_device * __init ipddp_init(void)
108 */ 108 */
109static struct net_device_stats *ipddp_get_stats(struct net_device *dev) 109static struct net_device_stats *ipddp_get_stats(struct net_device *dev)
110{ 110{
111 return dev->priv; 111 return netdev_priv(dev);
112} 112}
113 113
114/* 114/*
@@ -170,8 +170,8 @@ static int ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
170 170
171 skb->protocol = htons(ETH_P_ATALK); /* Protocol has changed */ 171 skb->protocol = htons(ETH_P_ATALK); /* Protocol has changed */
172 172
173 ((struct net_device_stats *) dev->priv)->tx_packets++; 173 ((struct net_device_stats *) netdev_priv(dev))->tx_packets++;
174 ((struct net_device_stats *) dev->priv)->tx_bytes+=skb->len; 174 ((struct net_device_stats *) netdev_priv(dev))->tx_bytes += skb->len;
175 175
176 if(aarp_send_ddp(rt->dev, skb, &rt->at, NULL) < 0) 176 if(aarp_send_ddp(rt->dev, skb, &rt->at, NULL) < 0)
177 dev_kfree_skb(skb); 177 dev_kfree_skb(skb);
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index fef5560bc7a2..dc4d49605603 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -726,7 +726,8 @@ static int sendup_buffer (struct net_device *dev)
726 int dnode, snode, llaptype, len; 726 int dnode, snode, llaptype, len;
727 int sklen; 727 int sklen;
728 struct sk_buff *skb; 728 struct sk_buff *skb;
729 struct net_device_stats *stats = &((struct ltpc_private *)dev->priv)->stats; 729 struct ltpc_private *ltpc_priv = netdev_priv(dev);
730 struct net_device_stats *stats = &ltpc_priv->stats;
730 struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf; 731 struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
731 732
732 if (ltc->command != LT_RCVLAP) { 733 if (ltc->command != LT_RCVLAP) {
@@ -783,7 +784,6 @@ static int sendup_buffer (struct net_device *dev)
783 784
784 /* toss it onwards */ 785 /* toss it onwards */
785 netif_rx(skb); 786 netif_rx(skb);
786 dev->last_rx = jiffies;
787 return 0; 787 return 0;
788} 788}
789 789
@@ -823,7 +823,8 @@ static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
823{ 823{
824 struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr; 824 struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr;
825 /* we'll keep the localtalk node address in dev->pa_addr */ 825 /* we'll keep the localtalk node address in dev->pa_addr */
826 struct atalk_addr *aa = &((struct ltpc_private *)dev->priv)->my_addr; 826 struct ltpc_private *ltpc_priv = netdev_priv(dev);
827 struct atalk_addr *aa = &ltpc_priv->my_addr;
827 struct lt_init c; 828 struct lt_init c;
828 int ltflags; 829 int ltflags;
829 830
@@ -904,7 +905,8 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
904 * and skb->len is the length of the ddp data + ddp header 905 * and skb->len is the length of the ddp data + ddp header
905 */ 906 */
906 907
907 struct net_device_stats *stats = &((struct ltpc_private *)dev->priv)->stats; 908 struct ltpc_private *ltpc_priv = netdev_priv(dev);
909 struct net_device_stats *stats = &ltpc_priv->stats;
908 910
909 int i; 911 int i;
910 struct lt_sendlap cbuf; 912 struct lt_sendlap cbuf;
@@ -943,7 +945,8 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
943 945
944static struct net_device_stats *ltpc_get_stats(struct net_device *dev) 946static struct net_device_stats *ltpc_get_stats(struct net_device *dev)
945{ 947{
946 struct net_device_stats *stats = &((struct ltpc_private *) dev->priv)->stats; 948 struct ltpc_private *ltpc_priv = netdev_priv(dev);
949 struct net_device_stats *stats = &ltpc_priv->stats;
947 return stats; 950 return stats;
948} 951}
949 952
diff --git a/drivers/net/arcnet/arc-rawmode.c b/drivers/net/arcnet/arc-rawmode.c
index e0a18e7c73cb..3ff9affb1a91 100644
--- a/drivers/net/arcnet/arc-rawmode.c
+++ b/drivers/net/arcnet/arc-rawmode.c
@@ -87,7 +87,7 @@ MODULE_LICENSE("GPL");
87static void rx(struct net_device *dev, int bufnum, 87static void rx(struct net_device *dev, int bufnum,
88 struct archdr *pkthdr, int length) 88 struct archdr *pkthdr, int length)
89{ 89{
90 struct arcnet_local *lp = dev->priv; 90 struct arcnet_local *lp = netdev_priv(dev);
91 struct sk_buff *skb; 91 struct sk_buff *skb;
92 struct archdr *pkt = pkthdr; 92 struct archdr *pkt = pkthdr;
93 int ofs; 93 int ofs;
@@ -125,7 +125,6 @@ static void rx(struct net_device *dev, int bufnum,
125 skb->protocol = __constant_htons(ETH_P_ARCNET); 125 skb->protocol = __constant_htons(ETH_P_ARCNET);
126; 126;
127 netif_rx(skb); 127 netif_rx(skb);
128 dev->last_rx = jiffies;
129} 128}
130 129
131 130
@@ -168,7 +167,7 @@ static int build_header(struct sk_buff *skb, struct net_device *dev,
168static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, 167static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
169 int bufnum) 168 int bufnum)
170{ 169{
171 struct arcnet_local *lp = dev->priv; 170 struct arcnet_local *lp = netdev_priv(dev);
172 struct arc_hardware *hard = &pkt->hard; 171 struct arc_hardware *hard = &pkt->hard;
173 int ofs; 172 int ofs;
174 173
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 8c8d6c453c45..e3082a9350fc 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -194,7 +194,7 @@ static int __init arcrimi_found(struct net_device *dev)
194 194
195 /* initialize the rest of the device structure. */ 195 /* initialize the rest of the device structure. */
196 196
197 lp = dev->priv; 197 lp = netdev_priv(dev);
198 lp->card_name = "RIM I"; 198 lp->card_name = "RIM I";
199 lp->hw.command = arcrimi_command; 199 lp->hw.command = arcrimi_command;
200 lp->hw.status = arcrimi_status; 200 lp->hw.status = arcrimi_status;
@@ -260,7 +260,7 @@ err_free_irq:
260 */ 260 */
261static int arcrimi_reset(struct net_device *dev, int really_reset) 261static int arcrimi_reset(struct net_device *dev, int really_reset)
262{ 262{
263 struct arcnet_local *lp = dev->priv; 263 struct arcnet_local *lp = netdev_priv(dev);
264 void __iomem *ioaddr = lp->mem_start + 0x800; 264 void __iomem *ioaddr = lp->mem_start + 0x800;
265 265
266 BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS()); 266 BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS());
@@ -281,7 +281,7 @@ static int arcrimi_reset(struct net_device *dev, int really_reset)
281 281
282static void arcrimi_setmask(struct net_device *dev, int mask) 282static void arcrimi_setmask(struct net_device *dev, int mask)
283{ 283{
284 struct arcnet_local *lp = dev->priv; 284 struct arcnet_local *lp = netdev_priv(dev);
285 void __iomem *ioaddr = lp->mem_start + 0x800; 285 void __iomem *ioaddr = lp->mem_start + 0x800;
286 286
287 AINTMASK(mask); 287 AINTMASK(mask);
@@ -289,7 +289,7 @@ static void arcrimi_setmask(struct net_device *dev, int mask)
289 289
290static int arcrimi_status(struct net_device *dev) 290static int arcrimi_status(struct net_device *dev)
291{ 291{
292 struct arcnet_local *lp = dev->priv; 292 struct arcnet_local *lp = netdev_priv(dev);
293 void __iomem *ioaddr = lp->mem_start + 0x800; 293 void __iomem *ioaddr = lp->mem_start + 0x800;
294 294
295 return ASTATUS(); 295 return ASTATUS();
@@ -297,7 +297,7 @@ static int arcrimi_status(struct net_device *dev)
297 297
298static void arcrimi_command(struct net_device *dev, int cmd) 298static void arcrimi_command(struct net_device *dev, int cmd)
299{ 299{
300 struct arcnet_local *lp = dev->priv; 300 struct arcnet_local *lp = netdev_priv(dev);
301 void __iomem *ioaddr = lp->mem_start + 0x800; 301 void __iomem *ioaddr = lp->mem_start + 0x800;
302 302
303 ACOMMAND(cmd); 303 ACOMMAND(cmd);
@@ -306,7 +306,7 @@ static void arcrimi_command(struct net_device *dev, int cmd)
306static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset, 306static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset,
307 void *buf, int count) 307 void *buf, int count)
308{ 308{
309 struct arcnet_local *lp = dev->priv; 309 struct arcnet_local *lp = netdev_priv(dev);
310 void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset; 310 void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset;
311 TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count)); 311 TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count));
312} 312}
@@ -315,7 +315,7 @@ static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset,
315static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset, 315static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset,
316 void *buf, int count) 316 void *buf, int count)
317{ 317{
318 struct arcnet_local *lp = dev->priv; 318 struct arcnet_local *lp = netdev_priv(dev);
319 void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset; 319 void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset;
320 TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count)); 320 TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
321} 321}
@@ -361,7 +361,7 @@ static int __init arc_rimi_init(void)
361static void __exit arc_rimi_exit(void) 361static void __exit arc_rimi_exit(void)
362{ 362{
363 struct net_device *dev = my_dev; 363 struct net_device *dev = my_dev;
364 struct arcnet_local *lp = dev->priv; 364 struct arcnet_local *lp = netdev_priv(dev);
365 365
366 unregister_netdev(dev); 366 unregister_netdev(dev);
367 iounmap(lp->mem_start); 367 iounmap(lp->mem_start);
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index a5b07691e466..6b53e5ed125c 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -181,7 +181,7 @@ EXPORT_SYMBOL(arcnet_dump_skb);
181static void arcnet_dump_packet(struct net_device *dev, int bufnum, 181static void arcnet_dump_packet(struct net_device *dev, int bufnum,
182 char *desc, int take_arcnet_lock) 182 char *desc, int take_arcnet_lock)
183{ 183{
184 struct arcnet_local *lp = dev->priv; 184 struct arcnet_local *lp = netdev_priv(dev);
185 int i, length; 185 int i, length;
186 unsigned long flags = 0; 186 unsigned long flags = 0;
187 static uint8_t buf[512]; 187 static uint8_t buf[512];
@@ -247,7 +247,7 @@ void arcnet_unregister_proto(struct ArcProto *proto)
247 */ 247 */
248static void release_arcbuf(struct net_device *dev, int bufnum) 248static void release_arcbuf(struct net_device *dev, int bufnum)
249{ 249{
250 struct arcnet_local *lp = dev->priv; 250 struct arcnet_local *lp = netdev_priv(dev);
251 int i; 251 int i;
252 252
253 lp->buf_queue[lp->first_free_buf++] = bufnum; 253 lp->buf_queue[lp->first_free_buf++] = bufnum;
@@ -269,7 +269,7 @@ static void release_arcbuf(struct net_device *dev, int bufnum)
269 */ 269 */
270static int get_arcbuf(struct net_device *dev) 270static int get_arcbuf(struct net_device *dev)
271{ 271{
272 struct arcnet_local *lp = dev->priv; 272 struct arcnet_local *lp = netdev_priv(dev);
273 int buf = -1, i; 273 int buf = -1, i;
274 274
275 if (!atomic_dec_and_test(&lp->buf_lock)) { 275 if (!atomic_dec_and_test(&lp->buf_lock)) {
@@ -357,7 +357,7 @@ struct net_device *alloc_arcdev(char *name)
357 dev = alloc_netdev(sizeof(struct arcnet_local), 357 dev = alloc_netdev(sizeof(struct arcnet_local),
358 name && *name ? name : "arc%d", arcdev_setup); 358 name && *name ? name : "arc%d", arcdev_setup);
359 if(dev) { 359 if(dev) {
360 struct arcnet_local *lp = (struct arcnet_local *) dev->priv; 360 struct arcnet_local *lp = netdev_priv(dev);
361 spin_lock_init(&lp->lock); 361 spin_lock_init(&lp->lock);
362 } 362 }
363 363
@@ -374,7 +374,7 @@ struct net_device *alloc_arcdev(char *name)
374 */ 374 */
375static int arcnet_open(struct net_device *dev) 375static int arcnet_open(struct net_device *dev)
376{ 376{
377 struct arcnet_local *lp = dev->priv; 377 struct arcnet_local *lp = netdev_priv(dev);
378 int count, newmtu, error; 378 int count, newmtu, error;
379 379
380 BUGMSG(D_INIT,"opened."); 380 BUGMSG(D_INIT,"opened.");
@@ -474,7 +474,7 @@ static int arcnet_open(struct net_device *dev)
474/* The inverse routine to arcnet_open - shuts down the card. */ 474/* The inverse routine to arcnet_open - shuts down the card. */
475static int arcnet_close(struct net_device *dev) 475static int arcnet_close(struct net_device *dev)
476{ 476{
477 struct arcnet_local *lp = dev->priv; 477 struct arcnet_local *lp = netdev_priv(dev);
478 478
479 netif_stop_queue(dev); 479 netif_stop_queue(dev);
480 480
@@ -556,7 +556,7 @@ static int arcnet_header(struct sk_buff *skb, struct net_device *dev,
556static int arcnet_rebuild_header(struct sk_buff *skb) 556static int arcnet_rebuild_header(struct sk_buff *skb)
557{ 557{
558 struct net_device *dev = skb->dev; 558 struct net_device *dev = skb->dev;
559 struct arcnet_local *lp = dev->priv; 559 struct arcnet_local *lp = netdev_priv(dev);
560 int status = 0; /* default is failure */ 560 int status = 0; /* default is failure */
561 unsigned short type; 561 unsigned short type;
562 uint8_t daddr=0; 562 uint8_t daddr=0;
@@ -603,7 +603,7 @@ static int arcnet_rebuild_header(struct sk_buff *skb)
603/* Called by the kernel in order to transmit a packet. */ 603/* Called by the kernel in order to transmit a packet. */
604static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev) 604static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
605{ 605{
606 struct arcnet_local *lp = dev->priv; 606 struct arcnet_local *lp = netdev_priv(dev);
607 struct archdr *pkt; 607 struct archdr *pkt;
608 struct arc_rfc1201 *soft; 608 struct arc_rfc1201 *soft;
609 struct ArcProto *proto; 609 struct ArcProto *proto;
@@ -693,7 +693,7 @@ static int arcnet_send_packet(struct sk_buff *skb, struct net_device *dev)
693 */ 693 */
694static int go_tx(struct net_device *dev) 694static int go_tx(struct net_device *dev)
695{ 695{
696 struct arcnet_local *lp = dev->priv; 696 struct arcnet_local *lp = netdev_priv(dev);
697 697
698 BUGMSG(D_DURING, "go_tx: status=%Xh, intmask=%Xh, next_tx=%d, cur_tx=%d\n", 698 BUGMSG(D_DURING, "go_tx: status=%Xh, intmask=%Xh, next_tx=%d, cur_tx=%d\n",
699 ASTATUS(), lp->intmask, lp->next_tx, lp->cur_tx); 699 ASTATUS(), lp->intmask, lp->next_tx, lp->cur_tx);
@@ -723,7 +723,7 @@ static int go_tx(struct net_device *dev)
723static void arcnet_timeout(struct net_device *dev) 723static void arcnet_timeout(struct net_device *dev)
724{ 724{
725 unsigned long flags; 725 unsigned long flags;
726 struct arcnet_local *lp = dev->priv; 726 struct arcnet_local *lp = netdev_priv(dev);
727 int status = ASTATUS(); 727 int status = ASTATUS();
728 char *msg; 728 char *msg;
729 729
@@ -771,8 +771,8 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
771 BUGMSG(D_DURING, "\n"); 771 BUGMSG(D_DURING, "\n");
772 772
773 BUGMSG(D_DURING, "in arcnet_interrupt\n"); 773 BUGMSG(D_DURING, "in arcnet_interrupt\n");
774 774
775 lp = dev->priv; 775 lp = netdev_priv(dev);
776 BUG_ON(!lp); 776 BUG_ON(!lp);
777 777
778 spin_lock(&lp->lock); 778 spin_lock(&lp->lock);
@@ -1010,7 +1010,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
1010 */ 1010 */
1011static void arcnet_rx(struct net_device *dev, int bufnum) 1011static void arcnet_rx(struct net_device *dev, int bufnum)
1012{ 1012{
1013 struct arcnet_local *lp = dev->priv; 1013 struct arcnet_local *lp = netdev_priv(dev);
1014 struct archdr pkt; 1014 struct archdr pkt;
1015 struct arc_rfc1201 *soft; 1015 struct arc_rfc1201 *soft;
1016 int length, ofs; 1016 int length, ofs;
@@ -1074,7 +1074,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
1074 */ 1074 */
1075static struct net_device_stats *arcnet_get_stats(struct net_device *dev) 1075static struct net_device_stats *arcnet_get_stats(struct net_device *dev)
1076{ 1076{
1077 struct arcnet_local *lp = dev->priv; 1077 struct arcnet_local *lp = netdev_priv(dev);
1078 return &lp->stats; 1078 return &lp->stats;
1079} 1079}
1080 1080
@@ -1091,7 +1091,7 @@ static void null_rx(struct net_device *dev, int bufnum,
1091static int null_build_header(struct sk_buff *skb, struct net_device *dev, 1091static int null_build_header(struct sk_buff *skb, struct net_device *dev,
1092 unsigned short type, uint8_t daddr) 1092 unsigned short type, uint8_t daddr)
1093{ 1093{
1094 struct arcnet_local *lp = dev->priv; 1094 struct arcnet_local *lp = netdev_priv(dev);
1095 1095
1096 BUGMSG(D_PROTO, 1096 BUGMSG(D_PROTO,
1097 "tx: can't build header for encap %02Xh; load a protocol driver.\n", 1097 "tx: can't build header for encap %02Xh; load a protocol driver.\n",
@@ -1106,7 +1106,7 @@ static int null_build_header(struct sk_buff *skb, struct net_device *dev,
1106static int null_prepare_tx(struct net_device *dev, struct archdr *pkt, 1106static int null_prepare_tx(struct net_device *dev, struct archdr *pkt,
1107 int length, int bufnum) 1107 int length, int bufnum)
1108{ 1108{
1109 struct arcnet_local *lp = dev->priv; 1109 struct arcnet_local *lp = netdev_priv(dev);
1110 struct arc_hardware newpkt; 1110 struct arc_hardware newpkt;
1111 1111
1112 BUGMSG(D_PROTO, "tx: no encap for this host; load a protocol driver.\n"); 1112 BUGMSG(D_PROTO, "tx: no encap for this host; load a protocol driver.\n");
diff --git a/drivers/net/arcnet/capmode.c b/drivers/net/arcnet/capmode.c
index 02cb8f1c1148..30580bbe252d 100644
--- a/drivers/net/arcnet/capmode.c
+++ b/drivers/net/arcnet/capmode.c
@@ -61,7 +61,7 @@ static struct ArcProto capmode_proto =
61}; 61};
62 62
63 63
64void arcnet_cap_init(void) 64static void arcnet_cap_init(void)
65{ 65{
66 int count; 66 int count;
67 67
@@ -103,7 +103,7 @@ MODULE_LICENSE("GPL");
103static void rx(struct net_device *dev, int bufnum, 103static void rx(struct net_device *dev, int bufnum,
104 struct archdr *pkthdr, int length) 104 struct archdr *pkthdr, int length)
105{ 105{
106 struct arcnet_local *lp = (struct arcnet_local *) dev->priv; 106 struct arcnet_local *lp = netdev_priv(dev);
107 struct sk_buff *skb; 107 struct sk_buff *skb;
108 struct archdr *pkt = pkthdr; 108 struct archdr *pkt = pkthdr;
109 char *pktbuf, *pkthdrbuf; 109 char *pktbuf, *pkthdrbuf;
@@ -151,7 +151,6 @@ static void rx(struct net_device *dev, int bufnum,
151 skb->protocol = __constant_htons(ETH_P_ARCNET); 151 skb->protocol = __constant_htons(ETH_P_ARCNET);
152; 152;
153 netif_rx(skb); 153 netif_rx(skb);
154 dev->last_rx = jiffies;
155} 154}
156 155
157 156
@@ -198,7 +197,7 @@ static int build_header(struct sk_buff *skb,
198static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, 197static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
199 int bufnum) 198 int bufnum)
200{ 199{
201 struct arcnet_local *lp = (struct arcnet_local *) dev->priv; 200 struct arcnet_local *lp = netdev_priv(dev);
202 struct arc_hardware *hard = &pkt->hard; 201 struct arc_hardware *hard = &pkt->hard;
203 int ofs; 202 int ofs;
204 203
@@ -250,7 +249,7 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
250 249
251static int ack_tx(struct net_device *dev, int acked) 250static int ack_tx(struct net_device *dev, int acked)
252{ 251{
253 struct arcnet_local *lp = (struct arcnet_local *) dev->priv; 252 struct arcnet_local *lp = netdev_priv(dev);
254 struct sk_buff *ackskb; 253 struct sk_buff *ackskb;
255 struct archdr *ackpkt; 254 struct archdr *ackpkt;
256 int length=sizeof(struct arc_cap); 255 int length=sizeof(struct arc_cap);
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 9289e6103de5..ea53a940272f 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -52,7 +52,7 @@ static int __init com20020isa_probe(struct net_device *dev)
52{ 52{
53 int ioaddr; 53 int ioaddr;
54 unsigned long airqmask; 54 unsigned long airqmask;
55 struct arcnet_local *lp = dev->priv; 55 struct arcnet_local *lp = netdev_priv(dev);
56 int err; 56 int err;
57 57
58 BUGLVL(D_NORMAL) printk(VERSION); 58 BUGLVL(D_NORMAL) printk(VERSION);
@@ -151,7 +151,7 @@ static int __init com20020_init(void)
151 if (node && node != 0xff) 151 if (node && node != 0xff)
152 dev->dev_addr[0] = node; 152 dev->dev_addr[0] = node;
153 153
154 lp = dev->priv; 154 lp = netdev_priv(dev);
155 lp->backplane = backplane; 155 lp->backplane = backplane;
156 lp->clockp = clockp & 7; 156 lp->clockp = clockp & 7;
157 lp->clockm = clockm & 3; 157 lp->clockm = clockm & 3;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index b8c0fa6d401d..8b51f632581d 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -72,7 +72,7 @@ static int __devinit com20020pci_probe(struct pci_dev *pdev, const struct pci_de
72 dev = alloc_arcdev(device); 72 dev = alloc_arcdev(device);
73 if (!dev) 73 if (!dev)
74 return -ENOMEM; 74 return -ENOMEM;
75 lp = dev->priv; 75 lp = netdev_priv(dev);
76 76
77 pci_set_drvdata(pdev, dev); 77 pci_set_drvdata(pdev, dev);
78 78
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 70124a944e7d..103688358fb8 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -89,7 +89,7 @@ static void com20020_copy_to_card(struct net_device *dev, int bufnum,
89int com20020_check(struct net_device *dev) 89int com20020_check(struct net_device *dev)
90{ 90{
91 int ioaddr = dev->base_addr, status; 91 int ioaddr = dev->base_addr, status;
92 struct arcnet_local *lp = dev->priv; 92 struct arcnet_local *lp = netdev_priv(dev);
93 93
94 ARCRESET0; 94 ARCRESET0;
95 mdelay(RESETtime); 95 mdelay(RESETtime);
@@ -159,7 +159,7 @@ int com20020_found(struct net_device *dev, int shared)
159 159
160 /* Initialize the rest of the device structure. */ 160 /* Initialize the rest of the device structure. */
161 161
162 lp = dev->priv; 162 lp = netdev_priv(dev);
163 163
164 lp->hw.owner = THIS_MODULE; 164 lp->hw.owner = THIS_MODULE;
165 lp->hw.command = com20020_command; 165 lp->hw.command = com20020_command;
@@ -233,7 +233,7 @@ int com20020_found(struct net_device *dev, int shared)
233 */ 233 */
234static int com20020_reset(struct net_device *dev, int really_reset) 234static int com20020_reset(struct net_device *dev, int really_reset)
235{ 235{
236 struct arcnet_local *lp = dev->priv; 236 struct arcnet_local *lp = netdev_priv(dev);
237 u_int ioaddr = dev->base_addr; 237 u_int ioaddr = dev->base_addr;
238 u_char inbyte; 238 u_char inbyte;
239 239
@@ -300,7 +300,7 @@ static int com20020_status(struct net_device *dev)
300 300
301static void com20020_close(struct net_device *dev) 301static void com20020_close(struct net_device *dev)
302{ 302{
303 struct arcnet_local *lp = dev->priv; 303 struct arcnet_local *lp = netdev_priv(dev);
304 int ioaddr = dev->base_addr; 304 int ioaddr = dev->base_addr;
305 305
306 /* disable transmitter */ 306 /* disable transmitter */
@@ -317,7 +317,7 @@ static void com20020_close(struct net_device *dev)
317 */ 317 */
318static void com20020_set_mc_list(struct net_device *dev) 318static void com20020_set_mc_list(struct net_device *dev)
319{ 319{
320 struct arcnet_local *lp = dev->priv; 320 struct arcnet_local *lp = netdev_priv(dev);
321 int ioaddr = dev->base_addr; 321 int ioaddr = dev->base_addr;
322 322
323 if ((dev->flags & IFF_PROMISC) && (dev->flags & IFF_UP)) { /* Enable promiscuous mode */ 323 if ((dev->flags & IFF_PROMISC) && (dev->flags & IFF_UP)) { /* Enable promiscuous mode */
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 6599f1046c7b..89de29b3b1dc 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -248,7 +248,7 @@ static int __init com90io_found(struct net_device *dev)
248 return -EBUSY; 248 return -EBUSY;
249 } 249 }
250 250
251 lp = dev->priv; 251 lp = netdev_priv(dev);
252 lp->card_name = "COM90xx I/O"; 252 lp->card_name = "COM90xx I/O";
253 lp->hw.command = com90io_command; 253 lp->hw.command = com90io_command;
254 lp->hw.status = com90io_status; 254 lp->hw.status = com90io_status;
@@ -290,7 +290,7 @@ static int __init com90io_found(struct net_device *dev)
290 */ 290 */
291static int com90io_reset(struct net_device *dev, int really_reset) 291static int com90io_reset(struct net_device *dev, int really_reset)
292{ 292{
293 struct arcnet_local *lp = dev->priv; 293 struct arcnet_local *lp = netdev_priv(dev);
294 short ioaddr = dev->base_addr; 294 short ioaddr = dev->base_addr;
295 295
296 BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS()); 296 BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS());
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index 0d45553ff75c..d762fe46251e 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -468,7 +468,7 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem
468 release_mem_region(shmem, MIRROR_SIZE); 468 release_mem_region(shmem, MIRROR_SIZE);
469 return -ENOMEM; 469 return -ENOMEM;
470 } 470 }
471 lp = dev->priv; 471 lp = netdev_priv(dev);
472 /* find the real shared memory start/end points, including mirrors */ 472 /* find the real shared memory start/end points, including mirrors */
473 473
474 /* guess the actual size of one "memory mirror" - the number of 474 /* guess the actual size of one "memory mirror" - the number of
@@ -583,9 +583,9 @@ static void com90xx_setmask(struct net_device *dev, int mask)
583 * 583 *
584 * However, it does make sure the card is in a defined state. 584 * However, it does make sure the card is in a defined state.
585 */ 585 */
586int com90xx_reset(struct net_device *dev, int really_reset) 586static int com90xx_reset(struct net_device *dev, int really_reset)
587{ 587{
588 struct arcnet_local *lp = dev->priv; 588 struct arcnet_local *lp = netdev_priv(dev);
589 short ioaddr = dev->base_addr; 589 short ioaddr = dev->base_addr;
590 590
591 BUGMSG(D_INIT, "Resetting (status=%02Xh)\n", ASTATUS()); 591 BUGMSG(D_INIT, "Resetting (status=%02Xh)\n", ASTATUS());
@@ -621,7 +621,7 @@ int com90xx_reset(struct net_device *dev, int really_reset)
621static void com90xx_copy_to_card(struct net_device *dev, int bufnum, int offset, 621static void com90xx_copy_to_card(struct net_device *dev, int bufnum, int offset,
622 void *buf, int count) 622 void *buf, int count)
623{ 623{
624 struct arcnet_local *lp = dev->priv; 624 struct arcnet_local *lp = netdev_priv(dev);
625 void __iomem *memaddr = lp->mem_start + bufnum * 512 + offset; 625 void __iomem *memaddr = lp->mem_start + bufnum * 512 + offset;
626 TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count)); 626 TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count));
627} 627}
@@ -630,7 +630,7 @@ static void com90xx_copy_to_card(struct net_device *dev, int bufnum, int offset,
630static void com90xx_copy_from_card(struct net_device *dev, int bufnum, int offset, 630static void com90xx_copy_from_card(struct net_device *dev, int bufnum, int offset,
631 void *buf, int count) 631 void *buf, int count)
632{ 632{
633 struct arcnet_local *lp = dev->priv; 633 struct arcnet_local *lp = netdev_priv(dev);
634 void __iomem *memaddr = lp->mem_start + bufnum * 512 + offset; 634 void __iomem *memaddr = lp->mem_start + bufnum * 512 + offset;
635 TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count)); 635 TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
636} 636}
@@ -656,7 +656,7 @@ static void __exit com90xx_exit(void)
656 656
657 for (count = 0; count < numcards; count++) { 657 for (count = 0; count < numcards; count++) {
658 dev = cards[count]; 658 dev = cards[count];
659 lp = dev->priv; 659 lp = netdev_priv(dev);
660 660
661 unregister_netdev(dev); 661 unregister_netdev(dev);
662 free_irq(dev->irq, dev); 662 free_irq(dev->irq, dev);
diff --git a/drivers/net/arcnet/rfc1051.c b/drivers/net/arcnet/rfc1051.c
index dab185bc51f1..49d39a9cb696 100644
--- a/drivers/net/arcnet/rfc1051.c
+++ b/drivers/net/arcnet/rfc1051.c
@@ -88,7 +88,7 @@ MODULE_LICENSE("GPL");
88 */ 88 */
89static __be16 type_trans(struct sk_buff *skb, struct net_device *dev) 89static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
90{ 90{
91 struct arcnet_local *lp = dev->priv; 91 struct arcnet_local *lp = netdev_priv(dev);
92 struct archdr *pkt = (struct archdr *) skb->data; 92 struct archdr *pkt = (struct archdr *) skb->data;
93 struct arc_rfc1051 *soft = &pkt->soft.rfc1051; 93 struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
94 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; 94 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
@@ -125,7 +125,7 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
125static void rx(struct net_device *dev, int bufnum, 125static void rx(struct net_device *dev, int bufnum,
126 struct archdr *pkthdr, int length) 126 struct archdr *pkthdr, int length)
127{ 127{
128 struct arcnet_local *lp = dev->priv; 128 struct arcnet_local *lp = netdev_priv(dev);
129 struct sk_buff *skb; 129 struct sk_buff *skb;
130 struct archdr *pkt = pkthdr; 130 struct archdr *pkt = pkthdr;
131 int ofs; 131 int ofs;
@@ -159,7 +159,6 @@ static void rx(struct net_device *dev, int bufnum,
159 159
160 skb->protocol = type_trans(skb, dev); 160 skb->protocol = type_trans(skb, dev);
161 netif_rx(skb); 161 netif_rx(skb);
162 dev->last_rx = jiffies;
163} 162}
164 163
165 164
@@ -169,7 +168,7 @@ static void rx(struct net_device *dev, int bufnum,
169static int build_header(struct sk_buff *skb, struct net_device *dev, 168static int build_header(struct sk_buff *skb, struct net_device *dev,
170 unsigned short type, uint8_t daddr) 169 unsigned short type, uint8_t daddr)
171{ 170{
172 struct arcnet_local *lp = dev->priv; 171 struct arcnet_local *lp = netdev_priv(dev);
173 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE; 172 int hdr_size = ARC_HDR_SIZE + RFC1051_HDR_SIZE;
174 struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size); 173 struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
175 struct arc_rfc1051 *soft = &pkt->soft.rfc1051; 174 struct arc_rfc1051 *soft = &pkt->soft.rfc1051;
@@ -220,7 +219,7 @@ static int build_header(struct sk_buff *skb, struct net_device *dev,
220static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, 219static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
221 int bufnum) 220 int bufnum)
222{ 221{
223 struct arcnet_local *lp = dev->priv; 222 struct arcnet_local *lp = netdev_priv(dev);
224 struct arc_hardware *hard = &pkt->hard; 223 struct arc_hardware *hard = &pkt->hard;
225 int ofs; 224 int ofs;
226 225
diff --git a/drivers/net/arcnet/rfc1201.c b/drivers/net/arcnet/rfc1201.c
index 6d6d95cc4404..2303d3a1f4b6 100644
--- a/drivers/net/arcnet/rfc1201.c
+++ b/drivers/net/arcnet/rfc1201.c
@@ -92,7 +92,7 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
92{ 92{
93 struct archdr *pkt = (struct archdr *) skb->data; 93 struct archdr *pkt = (struct archdr *) skb->data;
94 struct arc_rfc1201 *soft = &pkt->soft.rfc1201; 94 struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
95 struct arcnet_local *lp = dev->priv; 95 struct arcnet_local *lp = netdev_priv(dev);
96 int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; 96 int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
97 97
98 /* Pull off the arcnet header. */ 98 /* Pull off the arcnet header. */
@@ -134,7 +134,7 @@ static __be16 type_trans(struct sk_buff *skb, struct net_device *dev)
134static void rx(struct net_device *dev, int bufnum, 134static void rx(struct net_device *dev, int bufnum,
135 struct archdr *pkthdr, int length) 135 struct archdr *pkthdr, int length)
136{ 136{
137 struct arcnet_local *lp = dev->priv; 137 struct arcnet_local *lp = netdev_priv(dev);
138 struct sk_buff *skb; 138 struct sk_buff *skb;
139 struct archdr *pkt = pkthdr; 139 struct archdr *pkt = pkthdr;
140 struct arc_rfc1201 *soft = &pkthdr->soft.rfc1201; 140 struct arc_rfc1201 *soft = &pkthdr->soft.rfc1201;
@@ -230,7 +230,6 @@ static void rx(struct net_device *dev, int bufnum,
230 230
231 skb->protocol = type_trans(skb, dev); 231 skb->protocol = type_trans(skb, dev);
232 netif_rx(skb); 232 netif_rx(skb);
233 dev->last_rx = jiffies;
234 } else { /* split packet */ 233 } else { /* split packet */
235 /* 234 /*
236 * NOTE: MSDOS ARP packet correction should only need to apply to 235 * NOTE: MSDOS ARP packet correction should only need to apply to
@@ -366,7 +365,6 @@ static void rx(struct net_device *dev, int bufnum,
366 365
367 skb->protocol = type_trans(skb, dev); 366 skb->protocol = type_trans(skb, dev);
368 netif_rx(skb); 367 netif_rx(skb);
369 dev->last_rx = jiffies;
370 } 368 }
371 } 369 }
372} 370}
@@ -376,7 +374,7 @@ static void rx(struct net_device *dev, int bufnum,
376static int build_header(struct sk_buff *skb, struct net_device *dev, 374static int build_header(struct sk_buff *skb, struct net_device *dev,
377 unsigned short type, uint8_t daddr) 375 unsigned short type, uint8_t daddr)
378{ 376{
379 struct arcnet_local *lp = dev->priv; 377 struct arcnet_local *lp = netdev_priv(dev);
380 int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE; 378 int hdr_size = ARC_HDR_SIZE + RFC1201_HDR_SIZE;
381 struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size); 379 struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
382 struct arc_rfc1201 *soft = &pkt->soft.rfc1201; 380 struct arc_rfc1201 *soft = &pkt->soft.rfc1201;
@@ -443,7 +441,7 @@ static int build_header(struct sk_buff *skb, struct net_device *dev,
443static void load_pkt(struct net_device *dev, struct arc_hardware *hard, 441static void load_pkt(struct net_device *dev, struct arc_hardware *hard,
444 struct arc_rfc1201 *soft, int softlen, int bufnum) 442 struct arc_rfc1201 *soft, int softlen, int bufnum)
445{ 443{
446 struct arcnet_local *lp = dev->priv; 444 struct arcnet_local *lp = netdev_priv(dev);
447 int ofs; 445 int ofs;
448 446
449 /* assume length <= XMTU: someone should have handled that by now. */ 447 /* assume length <= XMTU: someone should have handled that by now. */
@@ -476,7 +474,7 @@ static void load_pkt(struct net_device *dev, struct arc_hardware *hard,
476static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length, 474static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
477 int bufnum) 475 int bufnum)
478{ 476{
479 struct arcnet_local *lp = dev->priv; 477 struct arcnet_local *lp = netdev_priv(dev);
480 const int maxsegsize = XMTU - RFC1201_HDR_SIZE; 478 const int maxsegsize = XMTU - RFC1201_HDR_SIZE;
481 struct Outgoing *out; 479 struct Outgoing *out;
482 480
@@ -511,7 +509,7 @@ static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
511 509
512static int continue_tx(struct net_device *dev, int bufnum) 510static int continue_tx(struct net_device *dev, int bufnum)
513{ 511{
514 struct arcnet_local *lp = dev->priv; 512 struct arcnet_local *lp = netdev_priv(dev);
515 struct Outgoing *out = &lp->outgoing; 513 struct Outgoing *out = &lp->outgoing;
516 struct arc_hardware *hard = &out->pkt->hard; 514 struct arc_hardware *hard = &out->pkt->hard;
517 struct arc_rfc1201 *soft = &out->pkt->soft.rfc1201, *newsoft; 515 struct arc_rfc1201 *soft = &out->pkt->soft.rfc1201, *newsoft;
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index 29e53eb71c74..e1d72e06f3e1 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -165,7 +165,6 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
165 struct net_device *dev; 165 struct net_device *dev;
166 struct ariadne_private *priv; 166 struct ariadne_private *priv;
167 int err; 167 int err;
168 DECLARE_MAC_BUF(mac);
169 168
170 r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960"); 169 r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
171 if (!r1) 170 if (!r1)
@@ -215,9 +214,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z,
215 } 214 }
216 zorro_set_drvdata(z, dev); 215 zorro_set_drvdata(z, dev);
217 216
218 printk(KERN_INFO "%s: Ariadne at 0x%08lx, Ethernet Address " 217 printk(KERN_INFO "%s: Ariadne at 0x%08lx, Ethernet Address %pM\n",
219 "%s\n", dev->name, board, 218 dev->name, board, dev->dev_addr);
220 print_mac(mac, dev->dev_addr));
221 219
222 return 0; 220 return 0;
223} 221}
@@ -613,14 +611,10 @@ static int ariadne_start_xmit(struct sk_buff *skb, struct net_device *dev)
613 611
614#if 0 612#if 0
615{ 613{
616 DECLARE_MAC_BUF(mac); 614 printk(KERN_DEBUG "TX pkt type 0x%04x from %pM to %pM "
617 DECLARE_MAC_BUF(mac2);
618
619 printk(KERN_DEBUG "TX pkt type 0x%04x from %s to %s "
620 " data 0x%08x len %d\n", 615 " data 0x%08x len %d\n",
621 ((u_short *)skb->data)[6], 616 ((u_short *)skb->data)[6],
622 print_mac(mac, ((const u8 *)skb->data)+6), 617 skb->data + 6, skb->data,
623 print_mac(mac, (const u8 *)skb->data),
624 (int)skb->data, (int)skb->len); 618 (int)skb->data, (int)skb->len);
625} 619}
626#endif 620#endif
@@ -743,25 +737,22 @@ static int ariadne_rx(struct net_device *dev)
743 skb->protocol=eth_type_trans(skb,dev); 737 skb->protocol=eth_type_trans(skb,dev);
744#if 0 738#if 0
745{ 739{
746 DECLARE_MAC_BUF(mac);
747
748 printk(KERN_DEBUG "RX pkt type 0x%04x from ", 740 printk(KERN_DEBUG "RX pkt type 0x%04x from ",
749 ((u_short *)skb->data)[6]); 741 ((u_short *)skb->data)[6]);
750 { 742 {
751 u_char *ptr = &((u_char *)skb->data)[6]; 743 u_char *ptr = &((u_char *)skb->data)[6];
752 printk("%s", print_mac(mac, ptr)); 744 printk("%pM", ptr);
753 } 745 }
754 printk(" to "); 746 printk(" to ");
755 { 747 {
756 u_char *ptr = (u_char *)skb->data; 748 u_char *ptr = (u_char *)skb->data;
757 printk("%s", print_mac(mac, ptr)); 749 printk("%pM", ptr);
758 } 750 }
759 printk(" data 0x%08x len %d\n", (int)skb->data, (int)skb->len); 751 printk(" data 0x%08x len %d\n", (int)skb->data, (int)skb->len);
760} 752}
761#endif 753#endif
762 754
763 netif_rx(skb); 755 netif_rx(skb);
764 dev->last_rx = jiffies;
765 dev->stats.rx_packets++; 756 dev->stats.rx_packets++;
766 dev->stats.rx_bytes += pkt_len; 757 dev->stats.rx_bytes += pkt_len;
767 } 758 }
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index 8eda6eeb43b7..2895db13bfa4 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -40,6 +40,14 @@ config ARM_AT91_ETHER
40 If you wish to compile a kernel for the AT91RM9200 and enable 40 If you wish to compile a kernel for the AT91RM9200 and enable
41 ethernet support, then you should always answer Y to this. 41 ethernet support, then you should always answer Y to this.
42 42
43config ARM_KS8695_ETHER
44 tristate "KS8695 Ethernet support"
45 depends on ARM && ARCH_KS8695
46 select MII
47 help
48 If you wish to compile a kernel for the KS8695 and want to
49 use the internal ethernet then you should answer Y to this.
50
43config EP93XX_ETH 51config EP93XX_ETH
44 tristate "EP93xx Ethernet support" 52 tristate "EP93xx Ethernet support"
45 depends on ARM && ARCH_EP93XX 53 depends on ARM && ARCH_EP93XX
@@ -51,7 +59,7 @@ config EP93XX_ETH
51config IXP4XX_ETH 59config IXP4XX_ETH
52 tristate "Intel IXP4xx Ethernet support" 60 tristate "Intel IXP4xx Ethernet support"
53 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR 61 depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
54 select MII 62 select PHYLIB
55 help 63 help
56 Say Y here if you want to use built-in Ethernet ports 64 Say Y here if you want to use built-in Ethernet ports
57 on IXP4xx processor. 65 on IXP4xx processor.
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile
index 7c812ac2b6a5..c69c0cdba4a2 100644
--- a/drivers/net/arm/Makefile
+++ b/drivers/net/arm/Makefile
@@ -4,9 +4,10 @@
4# 4#
5 5
6obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o 6obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
7obj-$(CONFIG_ARM_ETHERH) += etherh.o 7obj-$(CONFIG_ARM_ETHERH) += etherh.o ../8390.o
8obj-$(CONFIG_ARM_ETHER3) += ether3.o 8obj-$(CONFIG_ARM_ETHER3) += ether3.o
9obj-$(CONFIG_ARM_ETHER1) += ether1.o 9obj-$(CONFIG_ARM_ETHER1) += ether1.o
10obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o 10obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
11obj-$(CONFIG_ARM_KS8695_ETHER) += ks8695net.o
11obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o 12obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o
12obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o 13obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index aa4a5246be53..0c628a9e5339 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -532,7 +532,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
532 am_writeword(dev, hdraddr + 2, RMD_OWN); 532 am_writeword(dev, hdraddr + 2, RMD_OWN);
533 skb->protocol = eth_type_trans(skb, dev); 533 skb->protocol = eth_type_trans(skb, dev);
534 netif_rx(skb); 534 netif_rx(skb);
535 dev->last_rx = jiffies;
536 priv->stats.rx_bytes += len; 535 priv->stats.rx_bytes += len;
537 priv->stats.rx_packets ++; 536 priv->stats.rx_packets ++;
538 } else { 537 } else {
@@ -745,10 +744,8 @@ static int __init am79c961_probe(struct platform_device *pdev)
745 744
746 ret = register_netdev(dev); 745 ret = register_netdev(dev);
747 if (ret == 0) { 746 if (ret == 0) {
748 DECLARE_MAC_BUF(mac); 747 printk(KERN_INFO "%s: ether address %pM\n",
749 748 dev->name, dev->dev_addr);
750 printk(KERN_INFO "%s: ether address %s\n",
751 dev->name, print_mac(mac, dev->dev_addr));
752 return 0; 749 return 0;
753 } 750 }
754 751
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 6f431a887e7e..442938d50380 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -485,7 +485,6 @@ static void update_mac_address(struct net_device *dev)
485static int set_mac_address(struct net_device *dev, void* addr) 485static int set_mac_address(struct net_device *dev, void* addr)
486{ 486{
487 struct sockaddr *address = addr; 487 struct sockaddr *address = addr;
488 DECLARE_MAC_BUF(mac);
489 488
490 if (!is_valid_ether_addr(address->sa_data)) 489 if (!is_valid_ether_addr(address->sa_data))
491 return -EADDRNOTAVAIL; 490 return -EADDRNOTAVAIL;
@@ -493,8 +492,8 @@ static int set_mac_address(struct net_device *dev, void* addr)
493 memcpy(dev->dev_addr, address->sa_data, dev->addr_len); 492 memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
494 update_mac_address(dev); 493 update_mac_address(dev);
495 494
496 printk("%s: Setting MAC address to %s\n", dev->name, 495 printk("%s: Setting MAC address to %pM\n", dev->name,
497 print_mac(mac, dev->dev_addr)); 496 dev->dev_addr);
498 497
499 return 0; 498 return 0;
500} 499}
@@ -894,7 +893,6 @@ static void at91ether_rx(struct net_device *dev)
894 memcpy(skb_put(skb, pktlen), p_recv, pktlen); 893 memcpy(skb_put(skb, pktlen), p_recv, pktlen);
895 894
896 skb->protocol = eth_type_trans(skb, dev); 895 skb->protocol = eth_type_trans(skb, dev);
897 dev->last_rx = jiffies;
898 dev->stats.rx_bytes += pktlen; 896 dev->stats.rx_bytes += pktlen;
899 netif_rx(skb); 897 netif_rx(skb);
900 } 898 }
@@ -978,7 +976,6 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
978 struct at91_private *lp; 976 struct at91_private *lp;
979 unsigned int val; 977 unsigned int val;
980 int res; 978 int res;
981 DECLARE_MAC_BUF(mac);
982 979
983 dev = alloc_etherdev(sizeof(struct at91_private)); 980 dev = alloc_etherdev(sizeof(struct at91_private));
984 if (!dev) 981 if (!dev)
@@ -1084,11 +1081,11 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
1084 gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy"); 1081 gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
1085 1082
1086 /* Display ethernet banner */ 1083 /* Display ethernet banner */
1087 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%s)\n", 1084 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
1088 dev->name, (uint) dev->base_addr, dev->irq, 1085 dev->name, (uint) dev->base_addr, dev->irq,
1089 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-", 1086 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
1090 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex", 1087 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
1091 print_mac(mac, dev->dev_addr)); 1088 dev->dev_addr);
1092 if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) 1089 if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
1093 printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)"); 1090 printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
1094 else if (phy_type == MII_LXT971A_ID) 1091 else if (phy_type == MII_LXT971A_ID)
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 1267444d79da..6ecc600c1bcc 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -259,8 +259,6 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
259 skb_put(skb, length); 259 skb_put(skb, length);
260 skb->protocol = eth_type_trans(skb, dev); 260 skb->protocol = eth_type_trans(skb, dev);
261 261
262 dev->last_rx = jiffies;
263
264 netif_receive_skb(skb); 262 netif_receive_skb(skb);
265 263
266 ep->stats.rx_packets++; 264 ep->stats.rx_packets++;
@@ -300,7 +298,7 @@ poll_some_more:
300 int more = 0; 298 int more = 0;
301 299
302 spin_lock_irq(&ep->rx_lock); 300 spin_lock_irq(&ep->rx_lock);
303 __netif_rx_complete(dev, napi); 301 __netif_rx_complete(napi);
304 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX); 302 wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
305 if (ep93xx_have_more_rx(ep)) { 303 if (ep93xx_have_more_rx(ep)) {
306 wrl(ep, REG_INTEN, REG_INTEN_TX); 304 wrl(ep, REG_INTEN, REG_INTEN_TX);
@@ -417,9 +415,9 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
417 415
418 if (status & REG_INTSTS_RX) { 416 if (status & REG_INTSTS_RX) {
419 spin_lock(&ep->rx_lock); 417 spin_lock(&ep->rx_lock);
420 if (likely(netif_rx_schedule_prep(dev, &ep->napi))) { 418 if (likely(netif_rx_schedule_prep(&ep->napi))) {
421 wrl(ep, REG_INTEN, REG_INTEN_TX); 419 wrl(ep, REG_INTEN, REG_INTEN_TX);
422 __netif_rx_schedule(dev, &ep->napi); 420 __netif_rx_schedule(&ep->napi);
423 } 421 }
424 spin_unlock(&ep->rx_lock); 422 spin_unlock(&ep->rx_lock);
425 } 423 }
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index 3bb9e293e2ef..e380de454463 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -996,7 +996,6 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
996{ 996{
997 struct net_device *dev; 997 struct net_device *dev;
998 int i, ret = 0; 998 int i, ret = 0;
999 DECLARE_MAC_BUF(mac);
1000 999
1001 ether1_banner(); 1000 ether1_banner();
1002 1001
@@ -1044,8 +1043,8 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
1044 if (ret) 1043 if (ret)
1045 goto free; 1044 goto free;
1046 1045
1047 printk(KERN_INFO "%s: ether1 in slot %d, %s\n", 1046 printk(KERN_INFO "%s: ether1 in slot %d, %pM\n",
1048 dev->name, ec->slot_no, print_mac(mac, dev->dev_addr)); 1047 dev->name, ec->slot_no, dev->dev_addr);
1049 1048
1050 ecard_set_drvdata(ec, dev); 1049 ecard_set_drvdata(ec, dev);
1051 return 0; 1050 return 0;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 67e96ae85035..21a7bef12d3b 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -776,7 +776,6 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
776 const struct ether3_data *data = id->data; 776 const struct ether3_data *data = id->data;
777 struct net_device *dev; 777 struct net_device *dev;
778 int bus_type, ret; 778 int bus_type, ret;
779 DECLARE_MAC_BUF(mac);
780 779
781 ether3_banner(); 780 ether3_banner();
782 781
@@ -859,8 +858,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
859 if (ret) 858 if (ret)
860 goto free; 859 goto free;
861 860
862 printk("%s: %s in slot %d, %s\n", 861 printk("%s: %s in slot %d, %pM\n",
863 dev->name, data->name, ec->slot_no, print_mac(mac, dev->dev_addr)); 862 dev->name, data->name, ec->slot_no, dev->dev_addr);
864 863
865 ecard_set_drvdata(ec, dev); 864 ecard_set_drvdata(ec, dev);
866 return 0; 865 return 0;
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 5c5f1e470d3c..6278606d1049 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -637,6 +637,21 @@ static const struct ethtool_ops etherh_ethtool_ops = {
637 .get_drvinfo = etherh_get_drvinfo, 637 .get_drvinfo = etherh_get_drvinfo,
638}; 638};
639 639
640static const struct net_device_ops etherh_netdev_ops = {
641 .ndo_open = etherh_open,
642 .ndo_stop = etherh_close,
643 .ndo_set_config = etherh_set_config,
644 .ndo_start_xmit = ei_start_xmit,
645 .ndo_tx_timeout = ei_tx_timeout,
646 .ndo_get_stats = ei_get_stats,
647 .ndo_set_multicast_list = ei_set_multicast_list,
648 .ndo_validate_addr = eth_validate_addr,
649 .ndo_change_mtu = eth_change_mtu,
650#ifdef CONFIG_NET_POLL_CONTROLLER
651 .ndo_poll_controller = ei_poll,
652#endif
653};
654
640static u32 etherh_regoffsets[16]; 655static u32 etherh_regoffsets[16];
641static u32 etherm_regoffsets[16]; 656static u32 etherm_regoffsets[16];
642 657
@@ -648,7 +663,6 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
648 struct net_device *dev; 663 struct net_device *dev;
649 struct etherh_priv *eh; 664 struct etherh_priv *eh;
650 int ret; 665 int ret;
651 DECLARE_MAC_BUF(mac);
652 666
653 etherh_banner(); 667 etherh_banner();
654 668
@@ -664,9 +678,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
664 678
665 SET_NETDEV_DEV(dev, &ec->dev); 679 SET_NETDEV_DEV(dev, &ec->dev);
666 680
667 dev->open = etherh_open; 681 dev->netdev_ops = &etherh_netdev_ops;
668 dev->stop = etherh_close;
669 dev->set_config = etherh_set_config;
670 dev->irq = ec->irq; 682 dev->irq = ec->irq;
671 dev->ethtool_ops = &etherh_ethtool_ops; 683 dev->ethtool_ops = &etherh_ethtool_ops;
672 684
@@ -746,8 +758,8 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
746 if (ret) 758 if (ret)
747 goto free; 759 goto free;
748 760
749 printk(KERN_INFO "%s: %s in slot %d, %s\n", 761 printk(KERN_INFO "%s: %s in slot %d, %pM\n",
750 dev->name, data->name, ec->slot_no, print_mac(mac, dev->dev_addr)); 762 dev->name, data->name, ec->slot_no, dev->dev_addr);
751 763
752 ecard_set_drvdata(ec, dev); 764 ecard_set_drvdata(ec, dev);
753 765
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index e2d702b8b2e4..26af411fc428 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -30,12 +30,11 @@
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/io.h> 31#include <linux/io.h>
32#include <linux/kernel.h> 32#include <linux/kernel.h>
33#include <linux/mii.h> 33#include <linux/phy.h>
34#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <mach/npe.h> 35#include <mach/npe.h>
36#include <mach/qmgr.h> 36#include <mach/qmgr.h>
37 37
38#define DEBUG_QUEUES 0
39#define DEBUG_DESC 0 38#define DEBUG_DESC 0
40#define DEBUG_RX 0 39#define DEBUG_RX 0
41#define DEBUG_TX 0 40#define DEBUG_TX 0
@@ -59,7 +58,6 @@
59#define NAPI_WEIGHT 16 58#define NAPI_WEIGHT 16
60#define MDIO_INTERVAL (3 * HZ) 59#define MDIO_INTERVAL (3 * HZ)
61#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ 60#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
62#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */
63#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ 61#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
64 62
65#define NPE_ID(port_id) ((port_id) >> 4) 63#define NPE_ID(port_id) ((port_id) >> 4)
@@ -164,15 +162,14 @@ struct port {
164 struct npe *npe; 162 struct npe *npe;
165 struct net_device *netdev; 163 struct net_device *netdev;
166 struct napi_struct napi; 164 struct napi_struct napi;
167 struct net_device_stats stat; 165 struct phy_device *phydev;
168 struct mii_if_info mii;
169 struct delayed_work mdio_thread;
170 struct eth_plat_info *plat; 166 struct eth_plat_info *plat;
171 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 167 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
172 struct desc *desc_tab; /* coherent */ 168 struct desc *desc_tab; /* coherent */
173 u32 desc_tab_phys; 169 u32 desc_tab_phys;
174 int id; /* logical port ID */ 170 int id; /* logical port ID */
175 u16 mii_bmcr; 171 int speed, duplex;
172 u8 firmware[4];
176}; 173};
177 174
178/* NPE message structure */ 175/* NPE message structure */
@@ -243,19 +240,20 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
243 240
244static spinlock_t mdio_lock; 241static spinlock_t mdio_lock;
245static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ 242static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
243struct mii_bus *mdio_bus;
246static int ports_open; 244static int ports_open;
247static struct port *npe_port_tab[MAX_NPES]; 245static struct port *npe_port_tab[MAX_NPES];
248static struct dma_pool *dma_pool; 246static struct dma_pool *dma_pool;
249 247
250 248
251static u16 mdio_cmd(struct net_device *dev, int phy_id, int location, 249static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
252 int write, u16 cmd) 250 int write, u16 cmd)
253{ 251{
254 int cycles = 0; 252 int cycles = 0;
255 253
256 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { 254 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
257 printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name); 255 printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
258 return 0; 256 return -1;
259 } 257 }
260 258
261 if (write) { 259 if (write) {
@@ -274,107 +272,119 @@ static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
274 } 272 }
275 273
276 if (cycles == MAX_MDIO_RETRIES) { 274 if (cycles == MAX_MDIO_RETRIES) {
277 printk(KERN_ERR "%s: MII write failed\n", dev->name); 275 printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
278 return 0; 276 phy_id);
277 return -1;
279 } 278 }
280 279
281#if DEBUG_MDIO 280#if DEBUG_MDIO
282 printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name, 281 printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
283 cycles); 282 phy_id, write ? "write" : "read", cycles);
284#endif 283#endif
285 284
286 if (write) 285 if (write)
287 return 0; 286 return 0;
288 287
289 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { 288 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
290 printk(KERN_ERR "%s: MII read failed\n", dev->name); 289#if DEBUG_MDIO
291 return 0; 290 printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
291 phy_id);
292#endif
293 return 0xFFFF; /* don't return error */
292 } 294 }
293 295
294 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | 296 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
295 (__raw_readl(&mdio_regs->mdio_status[1]) << 8); 297 ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
296} 298}
297 299
298static int mdio_read(struct net_device *dev, int phy_id, int location) 300static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
299{ 301{
300 unsigned long flags; 302 unsigned long flags;
301 u16 val; 303 int ret;
302 304
303 spin_lock_irqsave(&mdio_lock, flags); 305 spin_lock_irqsave(&mdio_lock, flags);
304 val = mdio_cmd(dev, phy_id, location, 0, 0); 306 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
305 spin_unlock_irqrestore(&mdio_lock, flags); 307 spin_unlock_irqrestore(&mdio_lock, flags);
306 return val; 308#if DEBUG_MDIO
309 printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
310 phy_id, location, ret);
311#endif
312 return ret;
307} 313}
308 314
309static void mdio_write(struct net_device *dev, int phy_id, int location, 315static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
310 int val) 316 u16 val)
311{ 317{
312 unsigned long flags; 318 unsigned long flags;
319 int ret;
313 320
314 spin_lock_irqsave(&mdio_lock, flags); 321 spin_lock_irqsave(&mdio_lock, flags);
315 mdio_cmd(dev, phy_id, location, 1, val); 322 ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
316 spin_unlock_irqrestore(&mdio_lock, flags); 323 spin_unlock_irqrestore(&mdio_lock, flags);
324#if DEBUG_MDIO
325 printk(KERN_DEBUG "%s #%i: MII read [%i] <- 0x%X, err = %i\n",
326 bus->name, phy_id, location, val, ret);
327#endif
328 return ret;
317} 329}
318 330
319static void phy_reset(struct net_device *dev, int phy_id) 331static int ixp4xx_mdio_register(void)
320{ 332{
321 struct port *port = netdev_priv(dev); 333 int err;
322 int cycles = 0;
323 334
324 mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET); 335 if (!(mdio_bus = mdiobus_alloc()))
336 return -ENOMEM;
325 337
326 while (cycles < MAX_MII_RESET_RETRIES) { 338 /* All MII PHY accesses use NPE-B Ethernet registers */
327 if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) { 339 spin_lock_init(&mdio_lock);
328#if DEBUG_MDIO 340 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
329 printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n", 341 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
330 dev->name, cycles); 342
331#endif 343 mdio_bus->name = "IXP4xx MII Bus";
332 return; 344 mdio_bus->read = &ixp4xx_mdio_read;
333 } 345 mdio_bus->write = &ixp4xx_mdio_write;
334 udelay(1); 346 strcpy(mdio_bus->id, "0");
335 cycles++;
336 }
337 347
338 printk(KERN_ERR "%s: MII reset failed\n", dev->name); 348 if ((err = mdiobus_register(mdio_bus)))
349 mdiobus_free(mdio_bus);
350 return err;
339} 351}
340 352
341static void eth_set_duplex(struct port *port) 353static void ixp4xx_mdio_remove(void)
342{ 354{
343 if (port->mii.full_duplex) 355 mdiobus_unregister(mdio_bus);
344 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, 356 mdiobus_free(mdio_bus);
345 &port->regs->tx_control[0]);
346 else
347 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
348 &port->regs->tx_control[0]);
349} 357}
350 358
351 359
352static void phy_check_media(struct port *port, int init) 360static void ixp4xx_adjust_link(struct net_device *dev)
353{ 361{
354 if (mii_check_media(&port->mii, 1, init)) 362 struct port *port = netdev_priv(dev);
355 eth_set_duplex(port); 363 struct phy_device *phydev = port->phydev;
356 if (port->mii.force_media) { /* mii_check_media() doesn't work */ 364
357 struct net_device *dev = port->netdev; 365 if (!phydev->link) {
358 int cur_link = mii_link_ok(&port->mii); 366 if (port->speed) {
359 int prev_link = netif_carrier_ok(dev); 367 port->speed = 0;
360
361 if (!prev_link && cur_link) {
362 printk(KERN_INFO "%s: link up\n", dev->name);
363 netif_carrier_on(dev);
364 } else if (prev_link && !cur_link) {
365 printk(KERN_INFO "%s: link down\n", dev->name); 368 printk(KERN_INFO "%s: link down\n", dev->name);
366 netif_carrier_off(dev);
367 } 369 }
370 return;
368 } 371 }
369}
370 372
373 if (port->speed == phydev->speed && port->duplex == phydev->duplex)
374 return;
371 375
372static void mdio_thread(struct work_struct *work) 376 port->speed = phydev->speed;
373{ 377 port->duplex = phydev->duplex;
374 struct port *port = container_of(work, struct port, mdio_thread.work); 378
379 if (port->duplex)
380 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
381 &port->regs->tx_control[0]);
382 else
383 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
384 &port->regs->tx_control[0]);
375 385
376 phy_check_media(port, 0); 386 printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
377 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL); 387 dev->name, port->speed, port->duplex ? "full" : "half");
378} 388}
379 389
380 390
@@ -412,47 +422,13 @@ static inline void debug_desc(u32 phys, struct desc *desc)
412#endif 422#endif
413} 423}
414 424
415static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
416{
417#if DEBUG_QUEUES
418 static struct {
419 int queue;
420 char *name;
421 } names[] = {
422 { TX_QUEUE(0x10), "TX#0 " },
423 { TX_QUEUE(0x20), "TX#1 " },
424 { TX_QUEUE(0x00), "TX#2 " },
425 { RXFREE_QUEUE(0x10), "RX-free#0 " },
426 { RXFREE_QUEUE(0x20), "RX-free#1 " },
427 { RXFREE_QUEUE(0x00), "RX-free#2 " },
428 { TXDONE_QUEUE, "TX-done " },
429 };
430 int i;
431
432 for (i = 0; i < ARRAY_SIZE(names); i++)
433 if (names[i].queue == queue)
434 break;
435
436 printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
437 i < ARRAY_SIZE(names) ? names[i].name : "",
438 is_get ? "->" : "<-", phys);
439#endif
440}
441
442static inline u32 queue_get_entry(unsigned int queue)
443{
444 u32 phys = qmgr_get_entry(queue);
445 debug_queue(queue, 1, phys);
446 return phys;
447}
448
449static inline int queue_get_desc(unsigned int queue, struct port *port, 425static inline int queue_get_desc(unsigned int queue, struct port *port,
450 int is_tx) 426 int is_tx)
451{ 427{
452 u32 phys, tab_phys, n_desc; 428 u32 phys, tab_phys, n_desc;
453 struct desc *tab; 429 struct desc *tab;
454 430
455 if (!(phys = queue_get_entry(queue))) 431 if (!(phys = qmgr_get_entry(queue)))
456 return -1; 432 return -1;
457 433
458 phys &= ~0x1F; /* mask out non-address bits */ 434 phys &= ~0x1F; /* mask out non-address bits */
@@ -468,7 +444,6 @@ static inline int queue_get_desc(unsigned int queue, struct port *port,
468static inline void queue_put_desc(unsigned int queue, u32 phys, 444static inline void queue_put_desc(unsigned int queue, u32 phys,
469 struct desc *desc) 445 struct desc *desc)
470{ 446{
471 debug_queue(queue, 0, phys);
472 debug_desc(phys, desc); 447 debug_desc(phys, desc);
473 BUG_ON(phys & 0x1F); 448 BUG_ON(phys & 0x1F);
474 qmgr_put_entry(queue, phys); 449 qmgr_put_entry(queue, phys);
@@ -498,7 +473,7 @@ static void eth_rx_irq(void *pdev)
498 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); 473 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
499#endif 474#endif
500 qmgr_disable_irq(port->plat->rxq); 475 qmgr_disable_irq(port->plat->rxq);
501 netif_rx_schedule(dev, &port->napi); 476 netif_rx_schedule(&port->napi);
502} 477}
503 478
504static int eth_poll(struct napi_struct *napi, int budget) 479static int eth_poll(struct napi_struct *napi, int budget)
@@ -526,7 +501,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
526 printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n", 501 printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
527 dev->name); 502 dev->name);
528#endif 503#endif
529 netif_rx_complete(dev, napi); 504 netif_rx_complete(napi);
530 qmgr_enable_irq(rxq); 505 qmgr_enable_irq(rxq);
531 if (!qmgr_stat_empty(rxq) && 506 if (!qmgr_stat_empty(rxq) &&
532 netif_rx_reschedule(dev, napi)) { 507 netif_rx_reschedule(dev, napi)) {
@@ -562,7 +537,7 @@ static int eth_poll(struct napi_struct *napi, int budget)
562#endif 537#endif
563 538
564 if (!skb) { 539 if (!skb) {
565 port->stat.rx_dropped++; 540 dev->stats.rx_dropped++;
566 /* put the desc back on RX-ready queue */ 541 /* put the desc back on RX-ready queue */
567 desc->buf_len = MAX_MRU; 542 desc->buf_len = MAX_MRU;
568 desc->pkt_len = 0; 543 desc->pkt_len = 0;
@@ -588,9 +563,8 @@ static int eth_poll(struct napi_struct *napi, int budget)
588 debug_pkt(dev, "eth_poll", skb->data, skb->len); 563 debug_pkt(dev, "eth_poll", skb->data, skb->len);
589 564
590 skb->protocol = eth_type_trans(skb, dev); 565 skb->protocol = eth_type_trans(skb, dev);
591 dev->last_rx = jiffies; 566 dev->stats.rx_packets++;
592 port->stat.rx_packets++; 567 dev->stats.rx_bytes += skb->len;
593 port->stat.rx_bytes += skb->len;
594 netif_receive_skb(skb); 568 netif_receive_skb(skb);
595 569
596 /* put the new buffer on RX-free queue */ 570 /* put the new buffer on RX-free queue */
@@ -618,7 +592,7 @@ static void eth_txdone_irq(void *unused)
618#if DEBUG_TX 592#if DEBUG_TX
619 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); 593 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
620#endif 594#endif
621 while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) { 595 while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
622 u32 npe_id, n_desc; 596 u32 npe_id, n_desc;
623 struct port *port; 597 struct port *port;
624 struct desc *desc; 598 struct desc *desc;
@@ -635,8 +609,8 @@ static void eth_txdone_irq(void *unused)
635 debug_desc(phys, desc); 609 debug_desc(phys, desc);
636 610
637 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ 611 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
638 port->stat.tx_packets++; 612 port->netdev->stats.tx_packets++;
639 port->stat.tx_bytes += desc->pkt_len; 613 port->netdev->stats.tx_bytes += desc->pkt_len;
640 614
641 dma_unmap_tx(port, desc); 615 dma_unmap_tx(port, desc);
642#if DEBUG_TX 616#if DEBUG_TX
@@ -674,7 +648,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
674 648
675 if (unlikely(skb->len > MAX_MRU)) { 649 if (unlikely(skb->len > MAX_MRU)) {
676 dev_kfree_skb(skb); 650 dev_kfree_skb(skb);
677 port->stat.tx_errors++; 651 dev->stats.tx_errors++;
678 return NETDEV_TX_OK; 652 return NETDEV_TX_OK;
679 } 653 }
680 654
@@ -690,7 +664,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
690 bytes = ALIGN(offset + len, 4); 664 bytes = ALIGN(offset + len, 4);
691 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { 665 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
692 dev_kfree_skb(skb); 666 dev_kfree_skb(skb);
693 port->stat.tx_dropped++; 667 dev->stats.tx_dropped++;
694 return NETDEV_TX_OK; 668 return NETDEV_TX_OK;
695 } 669 }
696 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); 670 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
@@ -704,7 +678,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
704#else 678#else
705 kfree(mem); 679 kfree(mem);
706#endif 680#endif
707 port->stat.tx_dropped++; 681 dev->stats.tx_dropped++;
708 return NETDEV_TX_OK; 682 return NETDEV_TX_OK;
709 } 683 }
710 684
@@ -747,12 +721,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
747} 721}
748 722
749 723
750static struct net_device_stats *eth_stats(struct net_device *dev)
751{
752 struct port *port = netdev_priv(dev);
753 return &port->stat;
754}
755
756static void eth_set_mcast_list(struct net_device *dev) 724static void eth_set_mcast_list(struct net_device *dev)
757{ 725{
758 struct port *port = netdev_priv(dev); 726 struct port *port = netdev_priv(dev);
@@ -786,41 +754,80 @@ static void eth_set_mcast_list(struct net_device *dev)
786static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 754static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
787{ 755{
788 struct port *port = netdev_priv(dev); 756 struct port *port = netdev_priv(dev);
789 unsigned int duplex_chg;
790 int err;
791 757
792 if (!netif_running(dev)) 758 if (!netif_running(dev))
793 return -EINVAL; 759 return -EINVAL;
794 err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg); 760 return phy_mii_ioctl(port->phydev, if_mii(req), cmd);
795 if (duplex_chg) 761}
796 eth_set_duplex(port); 762
797 return err; 763/* ethtool support */
764
765static void ixp4xx_get_drvinfo(struct net_device *dev,
766 struct ethtool_drvinfo *info)
767{
768 struct port *port = netdev_priv(dev);
769 strcpy(info->driver, DRV_NAME);
770 snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
771 port->firmware[0], port->firmware[1],
772 port->firmware[2], port->firmware[3]);
773 strcpy(info->bus_info, "internal");
798} 774}
799 775
776static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
777{
778 struct port *port = netdev_priv(dev);
779 return phy_ethtool_gset(port->phydev, cmd);
780}
781
782static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
783{
784 struct port *port = netdev_priv(dev);
785 return phy_ethtool_sset(port->phydev, cmd);
786}
787
788static int ixp4xx_nway_reset(struct net_device *dev)
789{
790 struct port *port = netdev_priv(dev);
791 return phy_start_aneg(port->phydev);
792}
793
794static struct ethtool_ops ixp4xx_ethtool_ops = {
795 .get_drvinfo = ixp4xx_get_drvinfo,
796 .get_settings = ixp4xx_get_settings,
797 .set_settings = ixp4xx_set_settings,
798 .nway_reset = ixp4xx_nway_reset,
799 .get_link = ethtool_op_get_link,
800};
801
800 802
801static int request_queues(struct port *port) 803static int request_queues(struct port *port)
802{ 804{
803 int err; 805 int err;
804 806
805 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0); 807 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
808 "%s:RX-free", port->netdev->name);
806 if (err) 809 if (err)
807 return err; 810 return err;
808 811
809 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0); 812 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
813 "%s:RX", port->netdev->name);
810 if (err) 814 if (err)
811 goto rel_rxfree; 815 goto rel_rxfree;
812 816
813 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0); 817 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
818 "%s:TX", port->netdev->name);
814 if (err) 819 if (err)
815 goto rel_rx; 820 goto rel_rx;
816 821
817 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0); 822 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
823 "%s:TX-ready", port->netdev->name);
818 if (err) 824 if (err)
819 goto rel_tx; 825 goto rel_tx;
820 826
821 /* TX-done queue handles skbs sent out by the NPEs */ 827 /* TX-done queue handles skbs sent out by the NPEs */
822 if (!ports_open) { 828 if (!ports_open) {
823 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0); 829 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
830 "%s:TX-done", DRV_NAME);
824 if (err) 831 if (err)
825 goto rel_txready; 832 goto rel_txready;
826 } 833 }
@@ -944,10 +951,12 @@ static int eth_open(struct net_device *dev)
944 npe_name(npe)); 951 npe_name(npe));
945 return -EIO; 952 return -EIO;
946 } 953 }
954 port->firmware[0] = msg.byte4;
955 port->firmware[1] = msg.byte5;
956 port->firmware[2] = msg.byte6;
957 port->firmware[3] = msg.byte7;
947 } 958 }
948 959
949 mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
950
951 memset(&msg, 0, sizeof(msg)); 960 memset(&msg, 0, sizeof(msg));
952 msg.cmd = NPE_VLAN_SETRXQOSENTRY; 961 msg.cmd = NPE_VLAN_SETRXQOSENTRY;
953 msg.eth_id = port->id; 962 msg.eth_id = port->id;
@@ -985,6 +994,9 @@ static int eth_open(struct net_device *dev)
985 return err; 994 return err;
986 } 995 }
987 996
997 port->speed = 0; /* force "link up" message */
998 phy_start(port->phydev);
999
988 for (i = 0; i < ETH_ALEN; i++) 1000 for (i = 0; i < ETH_ALEN; i++)
989 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); 1001 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
990 __raw_writel(0x08, &port->regs->random_seed); 1002 __raw_writel(0x08, &port->regs->random_seed);
@@ -1012,10 +1024,8 @@ static int eth_open(struct net_device *dev)
1012 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); 1024 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
1013 1025
1014 napi_enable(&port->napi); 1026 napi_enable(&port->napi);
1015 phy_check_media(port, 1);
1016 eth_set_mcast_list(dev); 1027 eth_set_mcast_list(dev);
1017 netif_start_queue(dev); 1028 netif_start_queue(dev);
1018 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
1019 1029
1020 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, 1030 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
1021 eth_rx_irq, dev); 1031 eth_rx_irq, dev);
@@ -1026,7 +1036,7 @@ static int eth_open(struct net_device *dev)
1026 } 1036 }
1027 ports_open++; 1037 ports_open++;
1028 /* we may already have RX data, enables IRQ */ 1038 /* we may already have RX data, enables IRQ */
1029 netif_rx_schedule(dev, &port->napi); 1039 netif_rx_schedule(&port->napi);
1030 return 0; 1040 return 0;
1031} 1041}
1032 1042
@@ -1106,25 +1116,31 @@ static int eth_close(struct net_device *dev)
1106 printk(KERN_CRIT "%s: unable to disable loopback\n", 1116 printk(KERN_CRIT "%s: unable to disable loopback\n",
1107 dev->name); 1117 dev->name);
1108 1118
1109 port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) & 1119 phy_stop(port->phydev);
1110 ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
1111 mdio_write(dev, port->plat->phy, MII_BMCR,
1112 port->mii_bmcr | BMCR_PDOWN);
1113 1120
1114 if (!ports_open) 1121 if (!ports_open)
1115 qmgr_disable_irq(TXDONE_QUEUE); 1122 qmgr_disable_irq(TXDONE_QUEUE);
1116 cancel_rearming_delayed_work(&port->mdio_thread);
1117 destroy_queues(port); 1123 destroy_queues(port);
1118 release_queues(port); 1124 release_queues(port);
1119 return 0; 1125 return 0;
1120} 1126}
1121 1127
1128static const struct net_device_ops ixp4xx_netdev_ops = {
1129 .ndo_open = eth_open,
1130 .ndo_stop = eth_close,
1131 .ndo_start_xmit = eth_xmit,
1132 .ndo_set_multicast_list = eth_set_mcast_list,
1133 .ndo_do_ioctl = eth_ioctl,
1134
1135};
1136
1122static int __devinit eth_init_one(struct platform_device *pdev) 1137static int __devinit eth_init_one(struct platform_device *pdev)
1123{ 1138{
1124 struct port *port; 1139 struct port *port;
1125 struct net_device *dev; 1140 struct net_device *dev;
1126 struct eth_plat_info *plat = pdev->dev.platform_data; 1141 struct eth_plat_info *plat = pdev->dev.platform_data;
1127 u32 regs_phys; 1142 u32 regs_phys;
1143 char phy_id[BUS_ID_SIZE];
1128 int err; 1144 int err;
1129 1145
1130 if (!(dev = alloc_etherdev(sizeof(struct port)))) 1146 if (!(dev = alloc_etherdev(sizeof(struct port))))
@@ -1153,12 +1169,8 @@ static int __devinit eth_init_one(struct platform_device *pdev)
1153 goto err_free; 1169 goto err_free;
1154 } 1170 }
1155 1171
1156 dev->open = eth_open; 1172 dev->netdev_ops = &ixp4xx_netdev_ops;
1157 dev->hard_start_xmit = eth_xmit; 1173 dev->ethtool_ops = &ixp4xx_ethtool_ops;
1158 dev->stop = eth_close;
1159 dev->get_stats = eth_stats;
1160 dev->do_ioctl = eth_ioctl;
1161 dev->set_multicast_list = eth_set_mcast_list;
1162 dev->tx_queue_len = 100; 1174 dev->tx_queue_len = 100;
1163 1175
1164 netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT); 1176 netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
@@ -1191,22 +1203,19 @@ static int __devinit eth_init_one(struct platform_device *pdev)
1191 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); 1203 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
1192 udelay(50); 1204 udelay(50);
1193 1205
1194 port->mii.dev = dev; 1206 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, "0", plat->phy);
1195 port->mii.mdio_read = mdio_read; 1207 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
1196 port->mii.mdio_write = mdio_write; 1208 PHY_INTERFACE_MODE_MII);
1197 port->mii.phy_id = plat->phy; 1209 if (IS_ERR(port->phydev)) {
1198 port->mii.phy_id_mask = 0x1F; 1210 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
1199 port->mii.reg_num_mask = 0x1F; 1211 return PTR_ERR(port->phydev);
1212 }
1213
1214 port->phydev->irq = PHY_POLL;
1200 1215
1201 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, 1216 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
1202 npe_name(port->npe)); 1217 npe_name(port->npe));
1203 1218
1204 phy_reset(dev, plat->phy);
1205 port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
1206 ~(BMCR_RESET | BMCR_PDOWN);
1207 mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
1208
1209 INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
1210 return 0; 1219 return 0;
1211 1220
1212err_unreg: 1221err_unreg:
@@ -1232,7 +1241,7 @@ static int __devexit eth_remove_one(struct platform_device *pdev)
1232 return 0; 1241 return 0;
1233} 1242}
1234 1243
1235static struct platform_driver drv = { 1244static struct platform_driver ixp4xx_eth_driver = {
1236 .driver.name = DRV_NAME, 1245 .driver.name = DRV_NAME,
1237 .probe = eth_init_one, 1246 .probe = eth_init_one,
1238 .remove = eth_remove_one, 1247 .remove = eth_remove_one,
@@ -1240,20 +1249,19 @@ static struct platform_driver drv = {
1240 1249
1241static int __init eth_init_module(void) 1250static int __init eth_init_module(void)
1242{ 1251{
1252 int err;
1243 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) 1253 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
1244 return -ENOSYS; 1254 return -ENOSYS;
1245 1255
1246 /* All MII PHY accesses use NPE-B Ethernet registers */ 1256 if ((err = ixp4xx_mdio_register()))
1247 spin_lock_init(&mdio_lock); 1257 return err;
1248 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; 1258 return platform_driver_register(&ixp4xx_eth_driver);
1249 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
1250
1251 return platform_driver_register(&drv);
1252} 1259}
1253 1260
1254static void __exit eth_cleanup_module(void) 1261static void __exit eth_cleanup_module(void)
1255{ 1262{
1256 platform_driver_unregister(&drv); 1263 platform_driver_unregister(&ixp4xx_eth_driver);
1264 ixp4xx_mdio_remove();
1257} 1265}
1258 1266
1259MODULE_AUTHOR("Krzysztof Halasa"); 1267MODULE_AUTHOR("Krzysztof Halasa");
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
new file mode 100644
index 000000000000..592daee9dc28
--- /dev/null
+++ b/drivers/net/arm/ks8695net.c
@@ -0,0 +1,1676 @@
1/*
2 * Micrel KS8695 (Centaur) Ethernet.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * Copyright 2008 Simtec Electronics
15 * Daniel Silverstone <dsilvers@simtec.co.uk>
16 * Vincent Sanders <vince@simtec.co.uk>
17 */
18
19#include <linux/module.h>
20#include <linux/ioport.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/init.h>
24#include <linux/skbuff.h>
25#include <linux/spinlock.h>
26#include <linux/crc32.h>
27#include <linux/mii.h>
28#include <linux/ethtool.h>
29#include <linux/delay.h>
30#include <linux/platform_device.h>
31#include <linux/irq.h>
32#include <linux/delay.h>
33#include <linux/io.h>
34
35#include <asm/irq.h>
36
37#include <mach/regs-switch.h>
38#include <mach/regs-misc.h>
39
40#include "ks8695net.h"
41
42#define MODULENAME "ks8695_ether"
43#define MODULEVERSION "1.01"
44
45/*
46 * Transmit and device reset timeout, default 5 seconds.
47 */
48static int watchdog = 5000;
49
50/* Hardware structures */
51
52/**
53 * struct rx_ring_desc - Receive descriptor ring element
54 * @status: The status of the descriptor element (E.g. who owns it)
55 * @length: The number of bytes in the block pointed to by data_ptr
56 * @data_ptr: The physical address of the data block to receive into
57 * @next_desc: The physical address of the next descriptor element.
58 */
59struct rx_ring_desc {
60 __le32 status;
61 __le32 length;
62 __le32 data_ptr;
63 __le32 next_desc;
64};
65
66/**
67 * struct tx_ring_desc - Transmit descriptor ring element
68 * @owner: Who owns the descriptor
69 * @status: The number of bytes in the block pointed to by data_ptr
70 * @data_ptr: The physical address of the data block to receive into
71 * @next_desc: The physical address of the next descriptor element.
72 */
73struct tx_ring_desc {
74 __le32 owner;
75 __le32 status;
76 __le32 data_ptr;
77 __le32 next_desc;
78};
79
80/**
81 * struct ks8695_skbuff - sk_buff wrapper for rx/tx rings.
82 * @skb: The buffer in the ring
83 * @dma_ptr: The mapped DMA pointer of the buffer
84 * @length: The number of bytes mapped to dma_ptr
85 */
86struct ks8695_skbuff {
87 struct sk_buff *skb;
88 dma_addr_t dma_ptr;
89 u32 length;
90};
91
92/* Private device structure */
93
94#define MAX_TX_DESC 8
95#define MAX_TX_DESC_MASK 0x7
96#define MAX_RX_DESC 16
97#define MAX_RX_DESC_MASK 0xf
98
99#define MAX_RXBUF_SIZE 0x700
100
101#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
102#define RX_RING_DMA_SIZE (sizeof(struct rx_ring_desc) * MAX_RX_DESC)
103#define RING_DMA_SIZE (TX_RING_DMA_SIZE + RX_RING_DMA_SIZE)
104
105/**
106 * enum ks8695_dtype - Device type
107 * @KS8695_DTYPE_WAN: This device is a WAN interface
108 * @KS8695_DTYPE_LAN: This device is a LAN interface
109 * @KS8695_DTYPE_HPNA: This device is an HPNA interface
110 */
111enum ks8695_dtype {
112 KS8695_DTYPE_WAN,
113 KS8695_DTYPE_LAN,
114 KS8695_DTYPE_HPNA,
115};
116
117/**
118 * struct ks8695_priv - Private data for the KS8695 Ethernet
119 * @in_suspend: Flag to indicate if we're suspending/resuming
120 * @ndev: The net_device for this interface
121 * @dev: The platform device object for this interface
122 * @dtype: The type of this device
123 * @io_regs: The ioremapped registers for this interface
124 * @rx_irq_name: The textual name of the RX IRQ from the platform data
125 * @tx_irq_name: The textual name of the TX IRQ from the platform data
126 * @link_irq_name: The textual name of the link IRQ from the
127 * platform data if available
128 * @rx_irq: The IRQ number for the RX IRQ
129 * @tx_irq: The IRQ number for the TX IRQ
130 * @link_irq: The IRQ number for the link IRQ if available
131 * @regs_req: The resource request for the registers region
132 * @phyiface_req: The resource request for the phy/switch region
133 * if available
134 * @phyiface_regs: The ioremapped registers for the phy/switch if available
135 * @ring_base: The base pointer of the dma coherent memory for the rings
136 * @ring_base_dma: The DMA mapped equivalent of ring_base
137 * @tx_ring: The pointer in ring_base of the TX ring
138 * @tx_ring_used: The number of slots in the TX ring which are occupied
139 * @tx_ring_next_slot: The next slot to fill in the TX ring
140 * @tx_ring_dma: The DMA mapped equivalent of tx_ring
141 * @tx_buffers: The sk_buff mappings for the TX ring
142 * @txq_lock: A lock to protect the tx_buffers tx_ring_used etc variables
143 * @rx_ring: The pointer in ring_base of the RX ring
144 * @rx_ring_dma: The DMA mapped equivalent of rx_ring
145 * @rx_buffers: The sk_buff mappings for the RX ring
146 * @next_rx_desc_read: The next RX descriptor to read from on IRQ
147 * @msg_enable: The flags for which messages to emit
148 */
149struct ks8695_priv {
150 int in_suspend;
151 struct net_device *ndev;
152 struct device *dev;
153 enum ks8695_dtype dtype;
154 void __iomem *io_regs;
155
156 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
157 int rx_irq, tx_irq, link_irq;
158
159 struct resource *regs_req, *phyiface_req;
160 void __iomem *phyiface_regs;
161
162 void *ring_base;
163 dma_addr_t ring_base_dma;
164
165 struct tx_ring_desc *tx_ring;
166 int tx_ring_used;
167 int tx_ring_next_slot;
168 dma_addr_t tx_ring_dma;
169 struct ks8695_skbuff tx_buffers[MAX_TX_DESC];
170 spinlock_t txq_lock;
171
172 struct rx_ring_desc *rx_ring;
173 dma_addr_t rx_ring_dma;
174 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
175 int next_rx_desc_read;
176
177 int msg_enable;
178};
179
180/* Register access */
181
182/**
183 * ks8695_readreg - Read from a KS8695 ethernet register
184 * @ksp: The device to read from
185 * @reg: The register to read
186 */
187static inline u32
188ks8695_readreg(struct ks8695_priv *ksp, int reg)
189{
190 return readl(ksp->io_regs + reg);
191}
192
193/**
194 * ks8695_writereg - Write to a KS8695 ethernet register
195 * @ksp: The device to write to
196 * @reg: The register to write
197 * @value: The value to write to the register
198 */
199static inline void
200ks8695_writereg(struct ks8695_priv *ksp, int reg, u32 value)
201{
202 writel(value, ksp->io_regs + reg);
203}
204
205/* Utility functions */
206
207/**
208 * ks8695_port_type - Retrieve port-type as user-friendly string
209 * @ksp: The device to return the type for
210 *
211 * Returns a string indicating which of the WAN, LAN or HPNA
212 * ports this device is likely to represent.
213 */
214static const char *
215ks8695_port_type(struct ks8695_priv *ksp)
216{
217 switch (ksp->dtype) {
218 case KS8695_DTYPE_LAN:
219 return "LAN";
220 case KS8695_DTYPE_WAN:
221 return "WAN";
222 case KS8695_DTYPE_HPNA:
223 return "HPNA";
224 }
225
226 return "UNKNOWN";
227}
228
229/**
230 * ks8695_update_mac - Update the MAC registers in the device
231 * @ksp: The device to update
232 *
233 * Updates the MAC registers in the KS8695 device from the address in the
234 * net_device structure associated with this interface.
235 */
236static void
237ks8695_update_mac(struct ks8695_priv *ksp)
238{
239 /* Update the HW with the MAC from the net_device */
240 struct net_device *ndev = ksp->ndev;
241 u32 machigh, maclow;
242
243 maclow = ((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
244 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5] << 0));
245 machigh = ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1] << 0));
246
247 ks8695_writereg(ksp, KS8695_MAL, maclow);
248 ks8695_writereg(ksp, KS8695_MAH, machigh);
249
250}
251
252/**
253 * ks8695_refill_rxbuffers - Re-fill the RX buffer ring
254 * @ksp: The device to refill
255 *
256 * Iterates the RX ring of the device looking for empty slots.
257 * For each empty slot, we allocate and map a new SKB and give it
258 * to the hardware.
259 * This can be called from interrupt context safely.
260 */
261static void
262ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
263{
264 /* Run around the RX ring, filling in any missing sk_buff's */
265 int buff_n;
266
267 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
268 if (!ksp->rx_buffers[buff_n].skb) {
269 struct sk_buff *skb = dev_alloc_skb(MAX_RXBUF_SIZE);
270 dma_addr_t mapping;
271
272 ksp->rx_buffers[buff_n].skb = skb;
273 if (skb == NULL) {
274 /* Failed to allocate one, perhaps
275 * we'll try again later.
276 */
277 break;
278 }
279
280 mapping = dma_map_single(ksp->dev, skb->data,
281 MAX_RXBUF_SIZE,
282 DMA_FROM_DEVICE);
283 if (unlikely(dma_mapping_error(ksp->dev, mapping))) {
284 /* Failed to DMA map this SKB, try later */
285 dev_kfree_skb_irq(skb);
286 ksp->rx_buffers[buff_n].skb = NULL;
287 break;
288 }
289 ksp->rx_buffers[buff_n].dma_ptr = mapping;
290 skb->dev = ksp->ndev;
291 ksp->rx_buffers[buff_n].length = MAX_RXBUF_SIZE;
292
293 /* Record this into the DMA ring */
294 ksp->rx_ring[buff_n].data_ptr = cpu_to_le32(mapping);
295 ksp->rx_ring[buff_n].length =
296 cpu_to_le32(MAX_RXBUF_SIZE);
297
298 wmb();
299
300 /* And give ownership over to the hardware */
301 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
302 }
303 }
304}
305
306/* Maximum number of multicast addresses which the KS8695 HW supports */
307#define KS8695_NR_ADDRESSES 16
308
309/**
310 * ks8695_init_partial_multicast - Init the mcast addr registers
311 * @ksp: The device to initialise
312 * @addr: The multicast address list to use
313 * @nr_addr: The number of addresses in the list
314 *
315 * This routine is a helper for ks8695_set_multicast - it writes
316 * the additional-address registers in the KS8695 ethernet device
317 * and cleans up any others left behind.
318 */
319static void
320ks8695_init_partial_multicast(struct ks8695_priv *ksp,
321 struct dev_mc_list *addr,
322 int nr_addr)
323{
324 u32 low, high;
325 int i;
326
327 for (i = 0; i < nr_addr; i++, addr = addr->next) {
328 /* Ran out of addresses? */
329 if (!addr)
330 break;
331 /* Ran out of space in chip? */
332 BUG_ON(i == KS8695_NR_ADDRESSES);
333
334 low = (addr->dmi_addr[2] << 24) | (addr->dmi_addr[3] << 16) |
335 (addr->dmi_addr[4] << 8) | (addr->dmi_addr[5]);
336 high = (addr->dmi_addr[0] << 8) | (addr->dmi_addr[1]);
337
338 ks8695_writereg(ksp, KS8695_AAL_(i), low);
339 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
340 }
341
342 /* Clear the remaining Additional Station Addresses */
343 for (; i < KS8695_NR_ADDRESSES; i++) {
344 ks8695_writereg(ksp, KS8695_AAL_(i), 0);
345 ks8695_writereg(ksp, KS8695_AAH_(i), 0);
346 }
347}
348
349/* Interrupt handling */
350
351/**
352 * ks8695_tx_irq - Transmit IRQ handler
353 * @irq: The IRQ which went off (ignored)
354 * @dev_id: The net_device for the interrupt
355 *
356 * Process the TX ring, clearing out any transmitted slots.
357 * Allows the net_device to pass us new packets once slots are
358 * freed.
359 */
360static irqreturn_t
361ks8695_tx_irq(int irq, void *dev_id)
362{
363 struct net_device *ndev = (struct net_device *)dev_id;
364 struct ks8695_priv *ksp = netdev_priv(ndev);
365 int buff_n;
366
367 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
368 if (ksp->tx_buffers[buff_n].skb &&
369 !(ksp->tx_ring[buff_n].owner & cpu_to_le32(TDES_OWN))) {
370 rmb();
371 /* An SKB which is not owned by HW is present */
372 /* Update the stats for the net_device */
373 ndev->stats.tx_packets++;
374 ndev->stats.tx_bytes += ksp->tx_buffers[buff_n].length;
375
376 /* Free the packet from the ring */
377 ksp->tx_ring[buff_n].data_ptr = 0;
378
379 /* Free the sk_buff */
380 dma_unmap_single(ksp->dev,
381 ksp->tx_buffers[buff_n].dma_ptr,
382 ksp->tx_buffers[buff_n].length,
383 DMA_TO_DEVICE);
384 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
385 ksp->tx_buffers[buff_n].skb = NULL;
386 ksp->tx_ring_used--;
387 }
388 }
389
390 netif_wake_queue(ndev);
391
392 return IRQ_HANDLED;
393}
394
395/**
396 * ks8695_rx_irq - Receive IRQ handler
397 * @irq: The IRQ which went off (ignored)
398 * @dev_id: The net_device for the interrupt
399 *
400 * Process the RX ring, passing any received packets up to the
401 * host. If we received anything other than errors, we then
402 * refill the ring.
403 */
404static irqreturn_t
405ks8695_rx_irq(int irq, void *dev_id)
406{
407 struct net_device *ndev = (struct net_device *)dev_id;
408 struct ks8695_priv *ksp = netdev_priv(ndev);
409 struct sk_buff *skb;
410 int buff_n;
411 u32 flags;
412 int pktlen;
413 int last_rx_processed = -1;
414
415 buff_n = ksp->next_rx_desc_read;
416 do {
417 if (ksp->rx_buffers[buff_n].skb &&
418 !(ksp->rx_ring[buff_n].status & cpu_to_le32(RDES_OWN))) {
419 rmb();
420 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
421 /* Found an SKB which we own, this means we
422 * received a packet
423 */
424 if ((flags & (RDES_FS | RDES_LS)) !=
425 (RDES_FS | RDES_LS)) {
426 /* This packet is not the first and
427 * the last segment. Therefore it is
428 * a "spanning" packet and we can't
429 * handle it
430 */
431 goto rx_failure;
432 }
433
434 if (flags & (RDES_ES | RDES_RE)) {
435 /* It's an error packet */
436 ndev->stats.rx_errors++;
437 if (flags & RDES_TL)
438 ndev->stats.rx_length_errors++;
439 if (flags & RDES_RF)
440 ndev->stats.rx_length_errors++;
441 if (flags & RDES_CE)
442 ndev->stats.rx_crc_errors++;
443 if (flags & RDES_RE)
444 ndev->stats.rx_missed_errors++;
445
446 goto rx_failure;
447 }
448
449 pktlen = flags & RDES_FLEN;
450 pktlen -= 4; /* Drop the CRC */
451
452 /* Retrieve the sk_buff */
453 skb = ksp->rx_buffers[buff_n].skb;
454
455 /* Clear it from the ring */
456 ksp->rx_buffers[buff_n].skb = NULL;
457 ksp->rx_ring[buff_n].data_ptr = 0;
458
459 /* Unmap the SKB */
460 dma_unmap_single(ksp->dev,
461 ksp->rx_buffers[buff_n].dma_ptr,
462 ksp->rx_buffers[buff_n].length,
463 DMA_FROM_DEVICE);
464
465 /* Relinquish the SKB to the network layer */
466 skb_put(skb, pktlen);
467 skb->protocol = eth_type_trans(skb, ndev);
468 netif_rx(skb);
469
470 /* Record stats */
471 ndev->last_rx = jiffies;
472 ndev->stats.rx_packets++;
473 ndev->stats.rx_bytes += pktlen;
474 goto rx_finished;
475
476rx_failure:
477 /* This ring entry is an error, but we can
478 * re-use the skb
479 */
480 /* Give the ring entry back to the hardware */
481 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
482rx_finished:
483 /* And note this as processed so we can start
484 * from here next time
485 */
486 last_rx_processed = buff_n;
487 } else {
488 /* Ran out of things to process, stop now */
489 break;
490 }
491 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
492 } while (buff_n != ksp->next_rx_desc_read);
493
494 /* And note which RX descriptor we last did anything with */
495 if (likely(last_rx_processed != -1))
496 ksp->next_rx_desc_read =
497 (last_rx_processed + 1) & MAX_RX_DESC_MASK;
498
499 /* And refill the buffers */
500 ks8695_refill_rxbuffers(ksp);
501
502 /* Kick the RX DMA engine, in case it became suspended */
503 ks8695_writereg(ksp, KS8695_DRSC, 0);
504
505 return IRQ_HANDLED;
506}
507
508/**
509 * ks8695_link_irq - Link change IRQ handler
510 * @irq: The IRQ which went off (ignored)
511 * @dev_id: The net_device for the interrupt
512 *
513 * The WAN interface can generate an IRQ when the link changes,
514 * report this to the net layer and the user.
515 */
516static irqreturn_t
517ks8695_link_irq(int irq, void *dev_id)
518{
519 struct net_device *ndev = (struct net_device *)dev_id;
520 struct ks8695_priv *ksp = netdev_priv(ndev);
521 u32 ctrl;
522
523 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
524 if (ctrl & WMC_WLS) {
525 netif_carrier_on(ndev);
526 if (netif_msg_link(ksp))
527 dev_info(ksp->dev,
528 "%s: Link is now up (10%sMbps/%s-duplex)\n",
529 ndev->name,
530 (ctrl & WMC_WSS) ? "0" : "",
531 (ctrl & WMC_WDS) ? "Full" : "Half");
532 } else {
533 netif_carrier_off(ndev);
534 if (netif_msg_link(ksp))
535 dev_info(ksp->dev, "%s: Link is now down.\n",
536 ndev->name);
537 }
538
539 return IRQ_HANDLED;
540}
541
542
543/* KS8695 Device functions */
544
545/**
546 * ks8695_reset - Reset a KS8695 ethernet interface
547 * @ksp: The interface to reset
548 *
549 * Perform an engine reset of the interface and re-program it
550 * with sensible defaults.
551 */
552static void
553ks8695_reset(struct ks8695_priv *ksp)
554{
555 int reset_timeout = watchdog;
556 /* Issue the reset via the TX DMA control register */
557 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TRST);
558 while (reset_timeout--) {
559 if (!(ks8695_readreg(ksp, KS8695_DTXC) & DTXC_TRST))
560 break;
561 msleep(1);
562 }
563
564 if (reset_timeout == 0) {
565 dev_crit(ksp->dev,
566 "Timeout waiting for DMA engines to reset\n");
567 /* And blithely carry on */
568 }
569
570 /* Definitely wait long enough before attempting to program
571 * the engines
572 */
573 msleep(10);
574
575 /* RX: unicast and broadcast */
576 ks8695_writereg(ksp, KS8695_DRXC, DRXC_RU | DRXC_RB);
577 /* TX: pad and add CRC */
578 ks8695_writereg(ksp, KS8695_DTXC, DTXC_TEP | DTXC_TAC);
579}
580
581/**
582 * ks8695_shutdown - Shut down a KS8695 ethernet interface
583 * @ksp: The interface to shut down
584 *
585 * This disables packet RX/TX, cleans up IRQs, drains the rings,
586 * and basically places the interface into a clean shutdown
587 * state.
588 */
589static void
590ks8695_shutdown(struct ks8695_priv *ksp)
591{
592 u32 ctrl;
593 int buff_n;
594
595 /* Disable packet transmission */
596 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
597 ks8695_writereg(ksp, KS8695_DTXC, ctrl & ~DTXC_TE);
598
599 /* Disable packet reception */
600 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
601 ks8695_writereg(ksp, KS8695_DRXC, ctrl & ~DRXC_RE);
602
603 /* Release the IRQs */
604 free_irq(ksp->rx_irq, ksp->ndev);
605 free_irq(ksp->tx_irq, ksp->ndev);
606 if (ksp->link_irq != -1)
607 free_irq(ksp->link_irq, ksp->ndev);
608
609 /* Throw away any pending TX packets */
610 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
611 if (ksp->tx_buffers[buff_n].skb) {
612 /* Remove this SKB from the TX ring */
613 ksp->tx_ring[buff_n].owner = 0;
614 ksp->tx_ring[buff_n].status = 0;
615 ksp->tx_ring[buff_n].data_ptr = 0;
616
617 /* Unmap and bin this SKB */
618 dma_unmap_single(ksp->dev,
619 ksp->tx_buffers[buff_n].dma_ptr,
620 ksp->tx_buffers[buff_n].length,
621 DMA_TO_DEVICE);
622 dev_kfree_skb_irq(ksp->tx_buffers[buff_n].skb);
623 ksp->tx_buffers[buff_n].skb = NULL;
624 }
625 }
626
627 /* Purge the RX buffers */
628 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
629 if (ksp->rx_buffers[buff_n].skb) {
630 /* Remove the SKB from the RX ring */
631 ksp->rx_ring[buff_n].status = 0;
632 ksp->rx_ring[buff_n].data_ptr = 0;
633
634 /* Unmap and bin the SKB */
635 dma_unmap_single(ksp->dev,
636 ksp->rx_buffers[buff_n].dma_ptr,
637 ksp->rx_buffers[buff_n].length,
638 DMA_FROM_DEVICE);
639 dev_kfree_skb_irq(ksp->rx_buffers[buff_n].skb);
640 ksp->rx_buffers[buff_n].skb = NULL;
641 }
642 }
643}
644
645
646/**
647 * ks8695_setup_irq - IRQ setup helper function
648 * @irq: The IRQ number to claim
649 * @irq_name: The name to give the IRQ claimant
650 * @handler: The function to call to handle the IRQ
651 * @ndev: The net_device to pass in as the dev_id argument to the handler
652 *
653 * Return 0 on success.
654 */
655static int
656ks8695_setup_irq(int irq, const char *irq_name,
657 irq_handler_t handler, struct net_device *ndev)
658{
659 int ret;
660
661 ret = request_irq(irq, handler, IRQF_SHARED, irq_name, ndev);
662
663 if (ret) {
664 dev_err(&ndev->dev, "failure to request IRQ %d\n", irq);
665 return ret;
666 }
667
668 return 0;
669}
670
671/**
672 * ks8695_init_net - Initialise a KS8695 ethernet interface
673 * @ksp: The interface to initialise
674 *
675 * This routine fills the RX ring, initialises the DMA engines,
676 * allocates the IRQs and then starts the packet TX and RX
677 * engines.
678 */
679static int
680ks8695_init_net(struct ks8695_priv *ksp)
681{
682 int ret;
683 u32 ctrl;
684
685 ks8695_refill_rxbuffers(ksp);
686
687 /* Initialise the DMA engines */
688 ks8695_writereg(ksp, KS8695_RDLB, (u32) ksp->rx_ring_dma);
689 ks8695_writereg(ksp, KS8695_TDLB, (u32) ksp->tx_ring_dma);
690
691 /* Request the IRQs */
692 ret = ks8695_setup_irq(ksp->rx_irq, ksp->rx_irq_name,
693 ks8695_rx_irq, ksp->ndev);
694 if (ret)
695 return ret;
696 ret = ks8695_setup_irq(ksp->tx_irq, ksp->tx_irq_name,
697 ks8695_tx_irq, ksp->ndev);
698 if (ret)
699 return ret;
700 if (ksp->link_irq != -1) {
701 ret = ks8695_setup_irq(ksp->link_irq, ksp->link_irq_name,
702 ks8695_link_irq, ksp->ndev);
703 if (ret)
704 return ret;
705 }
706
707 /* Set up the ring indices */
708 ksp->next_rx_desc_read = 0;
709 ksp->tx_ring_next_slot = 0;
710 ksp->tx_ring_used = 0;
711
712 /* Bring up transmission */
713 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
714 /* Enable packet transmission */
715 ks8695_writereg(ksp, KS8695_DTXC, ctrl | DTXC_TE);
716
717 /* Bring up the reception */
718 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
719 /* Enable packet reception */
720 ks8695_writereg(ksp, KS8695_DRXC, ctrl | DRXC_RE);
721 /* And start the DMA engine */
722 ks8695_writereg(ksp, KS8695_DRSC, 0);
723
724 /* All done */
725 return 0;
726}
727
728/**
729 * ks8695_release_device - HW resource release for KS8695 e-net
730 * @ksp: The device to be freed
731 *
732 * This unallocates io memory regions, dma-coherent regions etc
733 * which were allocated in ks8695_probe.
734 */
735static void
736ks8695_release_device(struct ks8695_priv *ksp)
737{
738 /* Unmap the registers */
739 iounmap(ksp->io_regs);
740 if (ksp->phyiface_regs)
741 iounmap(ksp->phyiface_regs);
742
743 /* And release the request */
744 release_resource(ksp->regs_req);
745 kfree(ksp->regs_req);
746 if (ksp->phyiface_req) {
747 release_resource(ksp->phyiface_req);
748 kfree(ksp->phyiface_req);
749 }
750
751 /* Free the ring buffers */
752 dma_free_coherent(ksp->dev, RING_DMA_SIZE,
753 ksp->ring_base, ksp->ring_base_dma);
754}
755
756/* Ethtool support */
757
758/**
759 * ks8695_get_msglevel - Get the messages enabled for emission
760 * @ndev: The network device to read from
761 */
762static u32
763ks8695_get_msglevel(struct net_device *ndev)
764{
765 struct ks8695_priv *ksp = netdev_priv(ndev);
766
767 return ksp->msg_enable;
768}
769
770/**
771 * ks8695_set_msglevel - Set the messages enabled for emission
772 * @ndev: The network device to configure
773 * @value: The messages to set for emission
774 */
775static void
776ks8695_set_msglevel(struct net_device *ndev, u32 value)
777{
778 struct ks8695_priv *ksp = netdev_priv(ndev);
779
780 ksp->msg_enable = value;
781}
782
783/**
784 * ks8695_get_settings - Get device-specific settings.
785 * @ndev: The network device to read settings from
786 * @cmd: The ethtool structure to read into
787 */
788static int
789ks8695_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
790{
791 struct ks8695_priv *ksp = netdev_priv(ndev);
792 u32 ctrl;
793
794 /* All ports on the KS8695 support these... */
795 cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
796 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
797 SUPPORTED_TP | SUPPORTED_MII);
798 cmd->transceiver = XCVR_INTERNAL;
799
800 /* Port specific extras */
801 switch (ksp->dtype) {
802 case KS8695_DTYPE_HPNA:
803 cmd->phy_address = 0;
804 /* not supported for HPNA */
805 cmd->autoneg = AUTONEG_DISABLE;
806
807 /* BUG: Erm, dtype hpna implies no phy regs */
808 /*
809 ctrl = readl(KS8695_MISC_VA + KS8695_HMC);
810 cmd->speed = (ctrl & HMC_HSS) ? SPEED_100 : SPEED_10;
811 cmd->duplex = (ctrl & HMC_HDS) ? DUPLEX_FULL : DUPLEX_HALF;
812 */
813 return -EOPNOTSUPP;
814 case KS8695_DTYPE_WAN:
815 cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
816 cmd->port = PORT_MII;
817 cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
818 cmd->phy_address = 0;
819
820 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
821 if ((ctrl & WMC_WAND) == 0) {
822 /* auto-negotiation is enabled */
823 cmd->advertising |= ADVERTISED_Autoneg;
824 if (ctrl & WMC_WANA100F)
825 cmd->advertising |= ADVERTISED_100baseT_Full;
826 if (ctrl & WMC_WANA100H)
827 cmd->advertising |= ADVERTISED_100baseT_Half;
828 if (ctrl & WMC_WANA10F)
829 cmd->advertising |= ADVERTISED_10baseT_Full;
830 if (ctrl & WMC_WANA10H)
831 cmd->advertising |= ADVERTISED_10baseT_Half;
832 if (ctrl & WMC_WANAP)
833 cmd->advertising |= ADVERTISED_Pause;
834 cmd->autoneg = AUTONEG_ENABLE;
835
836 cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
837 cmd->duplex = (ctrl & WMC_WDS) ?
838 DUPLEX_FULL : DUPLEX_HALF;
839 } else {
840 /* auto-negotiation is disabled */
841 cmd->autoneg = AUTONEG_DISABLE;
842
843 cmd->speed = (ctrl & WMC_WANF100) ?
844 SPEED_100 : SPEED_10;
845 cmd->duplex = (ctrl & WMC_WANFF) ?
846 DUPLEX_FULL : DUPLEX_HALF;
847 }
848 break;
849 case KS8695_DTYPE_LAN:
850 return -EOPNOTSUPP;
851 }
852
853 return 0;
854}
855
856/**
857 * ks8695_set_settings - Set device-specific settings.
858 * @ndev: The network device to configure
859 * @cmd: The settings to configure
860 */
861static int
862ks8695_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
863{
864 struct ks8695_priv *ksp = netdev_priv(ndev);
865 u32 ctrl;
866
867 if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
868 return -EINVAL;
869 if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
870 return -EINVAL;
871 if (cmd->port != PORT_MII)
872 return -EINVAL;
873 if (cmd->transceiver != XCVR_INTERNAL)
874 return -EINVAL;
875 if ((cmd->autoneg != AUTONEG_DISABLE) &&
876 (cmd->autoneg != AUTONEG_ENABLE))
877 return -EINVAL;
878
879 if (cmd->autoneg == AUTONEG_ENABLE) {
880 if ((cmd->advertising & (ADVERTISED_10baseT_Half |
881 ADVERTISED_10baseT_Full |
882 ADVERTISED_100baseT_Half |
883 ADVERTISED_100baseT_Full)) == 0)
884 return -EINVAL;
885
886 switch (ksp->dtype) {
887 case KS8695_DTYPE_HPNA:
888 /* HPNA does not support auto-negotiation. */
889 return -EINVAL;
890 case KS8695_DTYPE_WAN:
891 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
892
893 ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
894 WMC_WANA10F | WMC_WANA10H);
895 if (cmd->advertising & ADVERTISED_100baseT_Full)
896 ctrl |= WMC_WANA100F;
897 if (cmd->advertising & ADVERTISED_100baseT_Half)
898 ctrl |= WMC_WANA100H;
899 if (cmd->advertising & ADVERTISED_10baseT_Full)
900 ctrl |= WMC_WANA10F;
901 if (cmd->advertising & ADVERTISED_10baseT_Half)
902 ctrl |= WMC_WANA10H;
903
904 /* force a re-negotiation */
905 ctrl |= WMC_WANR;
906 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
907 break;
908 case KS8695_DTYPE_LAN:
909 return -EOPNOTSUPP;
910 }
911
912 } else {
913 switch (ksp->dtype) {
914 case KS8695_DTYPE_HPNA:
915 /* BUG: dtype_hpna implies no phy registers */
916 /*
917 ctrl = __raw_readl(KS8695_MISC_VA + KS8695_HMC);
918
919 ctrl &= ~(HMC_HSS | HMC_HDS);
920 if (cmd->speed == SPEED_100)
921 ctrl |= HMC_HSS;
922 if (cmd->duplex == DUPLEX_FULL)
923 ctrl |= HMC_HDS;
924
925 __raw_writel(ctrl, KS8695_MISC_VA + KS8695_HMC);
926 */
927 return -EOPNOTSUPP;
928 case KS8695_DTYPE_WAN:
929 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
930
931 /* disable auto-negotiation */
932 ctrl |= WMC_WAND;
933 ctrl &= ~(WMC_WANF100 | WMC_WANFF);
934
935 if (cmd->speed == SPEED_100)
936 ctrl |= WMC_WANF100;
937 if (cmd->duplex == DUPLEX_FULL)
938 ctrl |= WMC_WANFF;
939
940 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
941 break;
942 case KS8695_DTYPE_LAN:
943 return -EOPNOTSUPP;
944 }
945 }
946
947 return 0;
948}
949
950/**
951 * ks8695_nwayreset - Restart the autonegotiation on the port.
952 * @ndev: The network device to restart autoneotiation on
953 */
954static int
955ks8695_nwayreset(struct net_device *ndev)
956{
957 struct ks8695_priv *ksp = netdev_priv(ndev);
958 u32 ctrl;
959
960 switch (ksp->dtype) {
961 case KS8695_DTYPE_HPNA:
962 /* No phy means no autonegotiation on hpna */
963 return -EINVAL;
964 case KS8695_DTYPE_WAN:
965 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
966
967 if ((ctrl & WMC_WAND) == 0)
968 writel(ctrl | WMC_WANR,
969 ksp->phyiface_regs + KS8695_WMC);
970 else
971 /* auto-negotiation not enabled */
972 return -EINVAL;
973 break;
974 case KS8695_DTYPE_LAN:
975 return -EOPNOTSUPP;
976 }
977
978 return 0;
979}
980
981/**
982 * ks8695_get_link - Retrieve link status of network interface
983 * @ndev: The network interface to retrive the link status of.
984 */
985static u32
986ks8695_get_link(struct net_device *ndev)
987{
988 struct ks8695_priv *ksp = netdev_priv(ndev);
989 u32 ctrl;
990
991 switch (ksp->dtype) {
992 case KS8695_DTYPE_HPNA:
993 /* HPNA always has link */
994 return 1;
995 case KS8695_DTYPE_WAN:
996 /* WAN we can read the PHY for */
997 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
998 return ctrl & WMC_WLS;
999 case KS8695_DTYPE_LAN:
1000 return -EOPNOTSUPP;
1001 }
1002 return 0;
1003}
1004
1005/**
1006 * ks8695_get_pause - Retrieve network pause/flow-control advertising
1007 * @ndev: The device to retrieve settings from
1008 * @param: The structure to fill out with the information
1009 */
1010static void
1011ks8695_get_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1012{
1013 struct ks8695_priv *ksp = netdev_priv(ndev);
1014 u32 ctrl;
1015
1016 switch (ksp->dtype) {
1017 case KS8695_DTYPE_HPNA:
1018 /* No phy link on hpna to configure */
1019 return;
1020 case KS8695_DTYPE_WAN:
1021 ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
1022
1023 /* advertise Pause */
1024 param->autoneg = (ctrl & WMC_WANAP);
1025
1026 /* current Rx Flow-control */
1027 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1028 param->rx_pause = (ctrl & DRXC_RFCE);
1029
1030 /* current Tx Flow-control */
1031 ctrl = ks8695_readreg(ksp, KS8695_DTXC);
1032 param->tx_pause = (ctrl & DTXC_TFCE);
1033 break;
1034 case KS8695_DTYPE_LAN:
1035 /* The LAN's "phy" is a direct-attached switch */
1036 return;
1037 }
1038}
1039
1040/**
1041 * ks8695_set_pause - Configure pause/flow-control
1042 * @ndev: The device to configure
1043 * @param: The pause parameters to set
1044 *
1045 * TODO: Implement this
1046 */
1047static int
1048ks8695_set_pause(struct net_device *ndev, struct ethtool_pauseparam *param)
1049{
1050 return -EOPNOTSUPP;
1051}
1052
1053/**
1054 * ks8695_get_drvinfo - Retrieve driver information
1055 * @ndev: The network device to retrieve info about
1056 * @info: The info structure to fill out.
1057 */
1058static void
1059ks8695_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
1060{
1061 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1062 strlcpy(info->version, MODULEVERSION, sizeof(info->version));
1063 strlcpy(info->bus_info, ndev->dev.parent->bus_id,
1064 sizeof(info->bus_info));
1065}
1066
1067static struct ethtool_ops ks8695_ethtool_ops = {
1068 .get_msglevel = ks8695_get_msglevel,
1069 .set_msglevel = ks8695_set_msglevel,
1070 .get_settings = ks8695_get_settings,
1071 .set_settings = ks8695_set_settings,
1072 .nway_reset = ks8695_nwayreset,
1073 .get_link = ks8695_get_link,
1074 .get_pauseparam = ks8695_get_pause,
1075 .set_pauseparam = ks8695_set_pause,
1076 .get_drvinfo = ks8695_get_drvinfo,
1077};
1078
1079/* Network device interface functions */
1080
1081/**
1082 * ks8695_set_mac - Update MAC in net dev and HW
1083 * @ndev: The network device to update
1084 * @addr: The new MAC address to set
1085 */
1086static int
1087ks8695_set_mac(struct net_device *ndev, void *addr)
1088{
1089 struct ks8695_priv *ksp = netdev_priv(ndev);
1090 struct sockaddr *address = addr;
1091
1092 if (!is_valid_ether_addr(address->sa_data))
1093 return -EADDRNOTAVAIL;
1094
1095 memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
1096
1097 ks8695_update_mac(ksp);
1098
1099 dev_dbg(ksp->dev, "%s: Updated MAC address to %pM\n",
1100 ndev->name, ndev->dev_addr);
1101
1102 return 0;
1103}
1104
1105/**
1106 * ks8695_set_multicast - Set up the multicast behaviour of the interface
1107 * @ndev: The net_device to configure
1108 *
1109 * This routine, called by the net layer, configures promiscuity
1110 * and multicast reception behaviour for the interface.
1111 */
1112static void
1113ks8695_set_multicast(struct net_device *ndev)
1114{
1115 struct ks8695_priv *ksp = netdev_priv(ndev);
1116 u32 ctrl;
1117
1118 ctrl = ks8695_readreg(ksp, KS8695_DRXC);
1119
1120 if (ndev->flags & IFF_PROMISC) {
1121 /* enable promiscuous mode */
1122 ctrl |= DRXC_RA;
1123 } else if (ndev->flags & ~IFF_PROMISC) {
1124 /* disable promiscuous mode */
1125 ctrl &= ~DRXC_RA;
1126 }
1127
1128 if (ndev->flags & IFF_ALLMULTI) {
1129 /* enable all multicast mode */
1130 ctrl |= DRXC_RM;
1131 } else if (ndev->mc_count > KS8695_NR_ADDRESSES) {
1132 /* more specific multicast addresses than can be
1133 * handled in hardware
1134 */
1135 ctrl |= DRXC_RM;
1136 } else {
1137 /* enable specific multicasts */
1138 ctrl &= ~DRXC_RM;
1139 ks8695_init_partial_multicast(ksp, ndev->mc_list,
1140 ndev->mc_count);
1141 }
1142
1143 ks8695_writereg(ksp, KS8695_DRXC, ctrl);
1144}
1145
1146/**
1147 * ks8695_timeout - Handle a network tx/rx timeout.
1148 * @ndev: The net_device which timed out.
1149 *
1150 * A network transaction timed out, reset the device.
1151 */
1152static void
1153ks8695_timeout(struct net_device *ndev)
1154{
1155 struct ks8695_priv *ksp = netdev_priv(ndev);
1156
1157 netif_stop_queue(ndev);
1158 ks8695_shutdown(ksp);
1159
1160 ks8695_reset(ksp);
1161
1162 ks8695_update_mac(ksp);
1163
1164 /* We ignore the return from this since it managed to init
1165 * before it probably will be okay to init again.
1166 */
1167 ks8695_init_net(ksp);
1168
1169 /* Reconfigure promiscuity etc */
1170 ks8695_set_multicast(ndev);
1171
1172 /* And start the TX queue once more */
1173 netif_start_queue(ndev);
1174}
1175
1176/**
1177 * ks8695_start_xmit - Start a packet transmission
1178 * @skb: The packet to transmit
1179 * @ndev: The network device to send the packet on
1180 *
1181 * This routine, called by the net layer, takes ownership of the
1182 * sk_buff and adds it to the TX ring. It then kicks the TX DMA
1183 * engine to ensure transmission begins.
1184 */
1185static int
1186ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1187{
1188 struct ks8695_priv *ksp = netdev_priv(ndev);
1189 int buff_n;
1190 dma_addr_t dmap;
1191
1192 spin_lock_irq(&ksp->txq_lock);
1193
1194 if (ksp->tx_ring_used == MAX_TX_DESC) {
1195 /* Somehow we got entered when we have no room */
1196 spin_unlock_irq(&ksp->txq_lock);
1197 return NETDEV_TX_BUSY;
1198 }
1199
1200 buff_n = ksp->tx_ring_next_slot;
1201
1202 BUG_ON(ksp->tx_buffers[buff_n].skb);
1203
1204 dmap = dma_map_single(ksp->dev, skb->data, skb->len, DMA_TO_DEVICE);
1205 if (unlikely(dma_mapping_error(ksp->dev, dmap))) {
1206 /* Failed to DMA map this SKB, give it back for now */
1207 spin_unlock_irq(&ksp->txq_lock);
1208 dev_dbg(ksp->dev, "%s: Could not map DMA memory for "\
1209 "transmission, trying later\n", ndev->name);
1210 return NETDEV_TX_BUSY;
1211 }
1212
1213 ksp->tx_buffers[buff_n].dma_ptr = dmap;
1214 /* Mapped okay, store the buffer pointer and length for later */
1215 ksp->tx_buffers[buff_n].skb = skb;
1216 ksp->tx_buffers[buff_n].length = skb->len;
1217
1218 /* Fill out the TX descriptor */
1219 ksp->tx_ring[buff_n].data_ptr =
1220 cpu_to_le32(ksp->tx_buffers[buff_n].dma_ptr);
1221 ksp->tx_ring[buff_n].status =
1222 cpu_to_le32(TDES_IC | TDES_FS | TDES_LS |
1223 (skb->len & TDES_TBS));
1224
1225 wmb();
1226
1227 /* Hand it over to the hardware */
1228 ksp->tx_ring[buff_n].owner = cpu_to_le32(TDES_OWN);
1229
1230 if (++ksp->tx_ring_used == MAX_TX_DESC)
1231 netif_stop_queue(ndev);
1232
1233 ndev->trans_start = jiffies;
1234
1235 /* Kick the TX DMA in case it decided to go IDLE */
1236 ks8695_writereg(ksp, KS8695_DTSC, 0);
1237
1238 /* And update the next ring slot */
1239 ksp->tx_ring_next_slot = (buff_n + 1) & MAX_TX_DESC_MASK;
1240
1241 spin_unlock_irq(&ksp->txq_lock);
1242 return NETDEV_TX_OK;
1243}
1244
1245/**
1246 * ks8695_stop - Stop (shutdown) a KS8695 ethernet interface
1247 * @ndev: The net_device to stop
1248 *
1249 * This disables the TX queue and cleans up a KS8695 ethernet
1250 * device.
1251 */
1252static int
1253ks8695_stop(struct net_device *ndev)
1254{
1255 struct ks8695_priv *ksp = netdev_priv(ndev);
1256
1257 netif_stop_queue(ndev);
1258 netif_carrier_off(ndev);
1259
1260 ks8695_shutdown(ksp);
1261
1262 return 0;
1263}
1264
1265/**
1266 * ks8695_open - Open (bring up) a KS8695 ethernet interface
1267 * @ndev: The net_device to open
1268 *
1269 * This resets, configures the MAC, initialises the RX ring and
1270 * DMA engines and starts the TX queue for a KS8695 ethernet
1271 * device.
1272 */
1273static int
1274ks8695_open(struct net_device *ndev)
1275{
1276 struct ks8695_priv *ksp = netdev_priv(ndev);
1277 int ret;
1278
1279 if (!is_valid_ether_addr(ndev->dev_addr))
1280 return -EADDRNOTAVAIL;
1281
1282 ks8695_reset(ksp);
1283
1284 ks8695_update_mac(ksp);
1285
1286 ret = ks8695_init_net(ksp);
1287 if (ret) {
1288 ks8695_shutdown(ksp);
1289 return ret;
1290 }
1291
1292 netif_start_queue(ndev);
1293
1294 return 0;
1295}
1296
1297/* Platform device driver */
1298
1299/**
1300 * ks8695_init_switch - Init LAN switch to known good defaults.
1301 * @ksp: The device to initialise
1302 *
1303 * This initialises the LAN switch in the KS8695 to a known-good
1304 * set of defaults.
1305 */
1306static void __devinit
1307ks8695_init_switch(struct ks8695_priv *ksp)
1308{
1309 u32 ctrl;
1310
1311 /* Default value for SEC0 according to datasheet */
1312 ctrl = 0x40819e00;
1313
1314 /* LED0 = Speed LED1 = Link/Activity */
1315 ctrl &= ~(SEC0_LLED1S | SEC0_LLED0S);
1316 ctrl |= (LLED0S_LINK | LLED1S_LINK_ACTIVITY);
1317
1318 /* Enable Switch */
1319 ctrl |= SEC0_ENABLE;
1320
1321 writel(ctrl, ksp->phyiface_regs + KS8695_SEC0);
1322
1323 /* Defaults for SEC1 */
1324 writel(0x9400100, ksp->phyiface_regs + KS8695_SEC1);
1325}
1326
1327/**
1328 * ks8695_init_wan_phy - Initialise the WAN PHY to sensible defaults
1329 * @ksp: The device to initialise
1330 *
1331 * This initialises a KS8695's WAN phy to sensible values for
1332 * autonegotiation etc.
1333 */
1334static void __devinit
1335ks8695_init_wan_phy(struct ks8695_priv *ksp)
1336{
1337 u32 ctrl;
1338
1339 /* Support auto-negotiation */
1340 ctrl = (WMC_WANAP | WMC_WANA100F | WMC_WANA100H |
1341 WMC_WANA10F | WMC_WANA10H);
1342
1343 /* LED0 = Activity , LED1 = Link */
1344 ctrl |= (WLED0S_ACTIVITY | WLED1S_LINK);
1345
1346 /* Restart Auto-negotiation */
1347 ctrl |= WMC_WANR;
1348
1349 writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
1350
1351 writel(0, ksp->phyiface_regs + KS8695_WPPM);
1352 writel(0, ksp->phyiface_regs + KS8695_PPS);
1353}
1354
1355static const struct net_device_ops ks8695_netdev_ops = {
1356 .ndo_open = ks8695_open,
1357 .ndo_stop = ks8695_stop,
1358 .ndo_start_xmit = ks8695_start_xmit,
1359 .ndo_tx_timeout = ks8695_timeout,
1360 .ndo_set_mac_address = ks8695_set_mac,
1361 .ndo_set_multicast_list = ks8695_set_multicast,
1362};
1363
1364/**
1365 * ks8695_probe - Probe and initialise a KS8695 ethernet interface
1366 * @pdev: The platform device to probe
1367 *
1368 * Initialise a KS8695 ethernet device from platform data.
1369 *
1370 * This driver requires at least one IORESOURCE_MEM for the
1371 * registers and two IORESOURCE_IRQ for the RX and TX IRQs
1372 * respectively. It can optionally take an additional
1373 * IORESOURCE_MEM for the switch or phy in the case of the lan or
1374 * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan
1375 * port.
1376 */
1377static int __devinit
1378ks8695_probe(struct platform_device *pdev)
1379{
1380 struct ks8695_priv *ksp;
1381 struct net_device *ndev;
1382 struct resource *regs_res, *phyiface_res;
1383 struct resource *rxirq_res, *txirq_res, *linkirq_res;
1384 int ret = 0;
1385 int buff_n;
1386 u32 machigh, maclow;
1387
1388 /* Initialise a net_device */
1389 ndev = alloc_etherdev(sizeof(struct ks8695_priv));
1390 if (!ndev) {
1391 dev_err(&pdev->dev, "could not allocate device.\n");
1392 return -ENOMEM;
1393 }
1394
1395 SET_NETDEV_DEV(ndev, &pdev->dev);
1396
1397 dev_dbg(&pdev->dev, "ks8695_probe() called\n");
1398
1399 /* Configure our private structure a little */
1400 ksp = netdev_priv(ndev);
1401 memset(ksp, 0, sizeof(struct ks8695_priv));
1402
1403 ksp->dev = &pdev->dev;
1404 ksp->ndev = ndev;
1405 ksp->msg_enable = NETIF_MSG_LINK;
1406
1407 /* Retrieve resources */
1408 regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1409 phyiface_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1410
1411 rxirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1412 txirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
1413 linkirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
1414
1415 if (!(regs_res && rxirq_res && txirq_res)) {
1416 dev_err(ksp->dev, "insufficient resources\n");
1417 ret = -ENOENT;
1418 goto failure;
1419 }
1420
1421 ksp->regs_req = request_mem_region(regs_res->start,
1422 resource_size(regs_res),
1423 pdev->name);
1424
1425 if (!ksp->regs_req) {
1426 dev_err(ksp->dev, "cannot claim register space\n");
1427 ret = -EIO;
1428 goto failure;
1429 }
1430
1431 ksp->io_regs = ioremap(regs_res->start, resource_size(regs_res));
1432
1433 if (!ksp->io_regs) {
1434 dev_err(ksp->dev, "failed to ioremap registers\n");
1435 ret = -EINVAL;
1436 goto failure;
1437 }
1438
1439 if (phyiface_res) {
1440 ksp->phyiface_req =
1441 request_mem_region(phyiface_res->start,
1442 resource_size(phyiface_res),
1443 phyiface_res->name);
1444
1445 if (!ksp->phyiface_req) {
1446 dev_err(ksp->dev,
1447 "cannot claim switch register space\n");
1448 ret = -EIO;
1449 goto failure;
1450 }
1451
1452 ksp->phyiface_regs = ioremap(phyiface_res->start,
1453 resource_size(phyiface_res));
1454
1455 if (!ksp->phyiface_regs) {
1456 dev_err(ksp->dev,
1457 "failed to ioremap switch registers\n");
1458 ret = -EINVAL;
1459 goto failure;
1460 }
1461 }
1462
1463 ksp->rx_irq = rxirq_res->start;
1464 ksp->rx_irq_name = rxirq_res->name ? rxirq_res->name : "Ethernet RX";
1465 ksp->tx_irq = txirq_res->start;
1466 ksp->tx_irq_name = txirq_res->name ? txirq_res->name : "Ethernet TX";
1467 ksp->link_irq = (linkirq_res ? linkirq_res->start : -1);
1468 ksp->link_irq_name = (linkirq_res && linkirq_res->name) ?
1469 linkirq_res->name : "Ethernet Link";
1470
1471 /* driver system setup */
1472 ndev->netdev_ops = &ks8695_netdev_ops;
1473 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1474 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1475
1476 /* Retrieve the default MAC addr from the chip. */
1477 /* The bootloader should have left it in there for us. */
1478
1479 machigh = ks8695_readreg(ksp, KS8695_MAH);
1480 maclow = ks8695_readreg(ksp, KS8695_MAL);
1481
1482 ndev->dev_addr[0] = (machigh >> 8) & 0xFF;
1483 ndev->dev_addr[1] = machigh & 0xFF;
1484 ndev->dev_addr[2] = (maclow >> 24) & 0xFF;
1485 ndev->dev_addr[3] = (maclow >> 16) & 0xFF;
1486 ndev->dev_addr[4] = (maclow >> 8) & 0xFF;
1487 ndev->dev_addr[5] = maclow & 0xFF;
1488
1489 if (!is_valid_ether_addr(ndev->dev_addr))
1490 dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
1491 "set using ifconfig\n", ndev->name);
1492
1493 /* In order to be efficient memory-wise, we allocate both
1494 * rings in one go.
1495 */
1496 ksp->ring_base = dma_alloc_coherent(&pdev->dev, RING_DMA_SIZE,
1497 &ksp->ring_base_dma, GFP_KERNEL);
1498 if (!ksp->ring_base) {
1499 ret = -ENOMEM;
1500 goto failure;
1501 }
1502
1503 /* Specify the TX DMA ring buffer */
1504 ksp->tx_ring = ksp->ring_base;
1505 ksp->tx_ring_dma = ksp->ring_base_dma;
1506
1507 /* And initialise the queue's lock */
1508 spin_lock_init(&ksp->txq_lock);
1509
1510 /* Specify the RX DMA ring buffer */
1511 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
1512 ksp->rx_ring_dma = ksp->ring_base_dma + TX_RING_DMA_SIZE;
1513
1514 /* Zero the descriptor rings */
1515 memset(ksp->tx_ring, 0, TX_RING_DMA_SIZE);
1516 memset(ksp->rx_ring, 0, RX_RING_DMA_SIZE);
1517
1518 /* Build the rings */
1519 for (buff_n = 0; buff_n < MAX_TX_DESC; ++buff_n) {
1520 ksp->tx_ring[buff_n].next_desc =
1521 cpu_to_le32(ksp->tx_ring_dma +
1522 (sizeof(struct tx_ring_desc) *
1523 ((buff_n + 1) & MAX_TX_DESC_MASK)));
1524 }
1525
1526 for (buff_n = 0; buff_n < MAX_RX_DESC; ++buff_n) {
1527 ksp->rx_ring[buff_n].next_desc =
1528 cpu_to_le32(ksp->rx_ring_dma +
1529 (sizeof(struct rx_ring_desc) *
1530 ((buff_n + 1) & MAX_RX_DESC_MASK)));
1531 }
1532
1533 /* Initialise the port (physically) */
1534 if (ksp->phyiface_regs && ksp->link_irq == -1) {
1535 ks8695_init_switch(ksp);
1536 ksp->dtype = KS8695_DTYPE_LAN;
1537 } else if (ksp->phyiface_regs && ksp->link_irq != -1) {
1538 ks8695_init_wan_phy(ksp);
1539 ksp->dtype = KS8695_DTYPE_WAN;
1540 } else {
1541 /* No initialisation since HPNA does not have a PHY */
1542 ksp->dtype = KS8695_DTYPE_HPNA;
1543 }
1544
1545 /* And bring up the net_device with the net core */
1546 platform_set_drvdata(pdev, ndev);
1547 ret = register_netdev(ndev);
1548
1549 if (ret == 0) {
1550 dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
1551 ks8695_port_type(ksp), ndev->dev_addr);
1552 } else {
1553 /* Report the failure to register the net_device */
1554 dev_err(ksp->dev, "ks8695net: failed to register netdev.\n");
1555 goto failure;
1556 }
1557
1558 /* All is well */
1559 return 0;
1560
1561 /* Error exit path */
1562failure:
1563 ks8695_release_device(ksp);
1564 free_netdev(ndev);
1565
1566 return ret;
1567}
1568
1569/**
1570 * ks8695_drv_suspend - Suspend a KS8695 ethernet platform device.
1571 * @pdev: The device to suspend
1572 * @state: The suspend state
1573 *
1574 * This routine detaches and shuts down a KS8695 ethernet device.
1575 */
1576static int
1577ks8695_drv_suspend(struct platform_device *pdev, pm_message_t state)
1578{
1579 struct net_device *ndev = platform_get_drvdata(pdev);
1580 struct ks8695_priv *ksp = netdev_priv(ndev);
1581
1582 ksp->in_suspend = 1;
1583
1584 if (netif_running(ndev)) {
1585 netif_device_detach(ndev);
1586 ks8695_shutdown(ksp);
1587 }
1588
1589 return 0;
1590}
1591
1592/**
1593 * ks8695_drv_resume - Resume a KS8695 ethernet platform device.
1594 * @pdev: The device to resume
1595 *
1596 * This routine re-initialises and re-attaches a KS8695 ethernet
1597 * device.
1598 */
1599static int
1600ks8695_drv_resume(struct platform_device *pdev)
1601{
1602 struct net_device *ndev = platform_get_drvdata(pdev);
1603 struct ks8695_priv *ksp = netdev_priv(ndev);
1604
1605 if (netif_running(ndev)) {
1606 ks8695_reset(ksp);
1607 ks8695_init_net(ksp);
1608 ks8695_set_multicast(ndev);
1609 netif_device_attach(ndev);
1610 }
1611
1612 ksp->in_suspend = 0;
1613
1614 return 0;
1615}
1616
1617/**
1618 * ks8695_drv_remove - Remove a KS8695 net device on driver unload.
1619 * @pdev: The platform device to remove
1620 *
1621 * This unregisters and releases a KS8695 ethernet device.
1622 */
1623static int __devexit
1624ks8695_drv_remove(struct platform_device *pdev)
1625{
1626 struct net_device *ndev = platform_get_drvdata(pdev);
1627 struct ks8695_priv *ksp = netdev_priv(ndev);
1628
1629 platform_set_drvdata(pdev, NULL);
1630
1631 unregister_netdev(ndev);
1632 ks8695_release_device(ksp);
1633 free_netdev(ndev);
1634
1635 dev_dbg(&pdev->dev, "released and freed device\n");
1636 return 0;
1637}
1638
1639static struct platform_driver ks8695_driver = {
1640 .driver = {
1641 .name = MODULENAME,
1642 .owner = THIS_MODULE,
1643 },
1644 .probe = ks8695_probe,
1645 .remove = __devexit_p(ks8695_drv_remove),
1646 .suspend = ks8695_drv_suspend,
1647 .resume = ks8695_drv_resume,
1648};
1649
1650/* Module interface */
1651
1652static int __init
1653ks8695_init(void)
1654{
1655 printk(KERN_INFO "%s Ethernet driver, V%s\n",
1656 MODULENAME, MODULEVERSION);
1657
1658 return platform_driver_register(&ks8695_driver);
1659}
1660
1661static void __exit
1662ks8695_cleanup(void)
1663{
1664 platform_driver_unregister(&ks8695_driver);
1665}
1666
1667module_init(ks8695_init);
1668module_exit(ks8695_cleanup);
1669
1670MODULE_AUTHOR("Simtec Electronics")
1671MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
1672MODULE_LICENSE("GPL");
1673MODULE_ALIAS("platform:" MODULENAME);
1674
1675module_param(watchdog, int, 0400);
1676MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
diff --git a/drivers/net/arm/ks8695net.h b/drivers/net/arm/ks8695net.h
new file mode 100644
index 000000000000..80eff6ea5163
--- /dev/null
+++ b/drivers/net/arm/ks8695net.h
@@ -0,0 +1,107 @@
1/*
2 * Micrel KS8695 (Centaur) Ethernet.
3 *
4 * Copyright 2008 Simtec Electronics
5 * Daniel Silverstone <dsilvers@simtec.co.uk>
6 * Vincent Sanders <vince@simtec.co.uk>
7 */
8
9#ifndef KS8695NET_H
10#define KS8695NET_H
11
12/* Receive descriptor flags */
13#define RDES_OWN (1 << 31) /* Ownership */
14#define RDES_FS (1 << 30) /* First Descriptor */
15#define RDES_LS (1 << 29) /* Last Descriptor */
16#define RDES_IPE (1 << 28) /* IP Checksum error */
17#define RDES_TCPE (1 << 27) /* TCP Checksum error */
18#define RDES_UDPE (1 << 26) /* UDP Checksum error */
19#define RDES_ES (1 << 25) /* Error summary */
20#define RDES_MF (1 << 24) /* Multicast Frame */
21#define RDES_RE (1 << 19) /* MII Error reported */
22#define RDES_TL (1 << 18) /* Frame too Long */
23#define RDES_RF (1 << 17) /* Runt Frame */
24#define RDES_CE (1 << 16) /* CRC error */
25#define RDES_FT (1 << 15) /* Frame Type */
26#define RDES_FLEN (0x7ff) /* Frame Length */
27
28#define RDES_RER (1 << 25) /* Receive End of Ring */
29#define RDES_RBS (0x7ff) /* Receive Buffer Size */
30
31/* Transmit descriptor flags */
32
33#define TDES_OWN (1 << 31) /* Ownership */
34
35#define TDES_IC (1 << 31) /* Interrupt on Completion */
36#define TDES_FS (1 << 30) /* First Segment */
37#define TDES_LS (1 << 29) /* Last Segment */
38#define TDES_IPCKG (1 << 28) /* IP Checksum generate */
39#define TDES_TCPCKG (1 << 27) /* TCP Checksum generate */
40#define TDES_UDPCKG (1 << 26) /* UDP Checksum generate */
41#define TDES_TER (1 << 25) /* Transmit End of Ring */
42#define TDES_TBS (0x7ff) /* Transmit Buffer Size */
43
44/*
45 * Network controller register offsets
46 */
47#define KS8695_DTXC (0x00) /* DMA Transmit Control */
48#define KS8695_DRXC (0x04) /* DMA Receive Control */
49#define KS8695_DTSC (0x08) /* DMA Transmit Start Command */
50#define KS8695_DRSC (0x0c) /* DMA Receive Start Command */
51#define KS8695_TDLB (0x10) /* Transmit Descriptor List
52 * Base Address
53 */
54#define KS8695_RDLB (0x14) /* Receive Descriptor List
55 * Base Address
56 */
57#define KS8695_MAL (0x18) /* MAC Station Address Low */
58#define KS8695_MAH (0x1c) /* MAC Station Address High */
59#define KS8695_AAL_(n) (0x80 + ((n)*8)) /* MAC Additional
60 * Station Address
61 * (0..15) Low
62 */
63#define KS8695_AAH_(n) (0x84 + ((n)*8)) /* MAC Additional
64 * Station Address
65 * (0..15) High
66 */
67
68
69/* DMA Transmit Control Register */
70#define DTXC_TRST (1 << 31) /* Soft Reset */
71#define DTXC_TBS (0x3f << 24) /* Transmit Burst Size */
72#define DTXC_TUCG (1 << 18) /* Transmit UDP
73 * Checksum Generate
74 */
75#define DTXC_TTCG (1 << 17) /* Transmit TCP
76 * Checksum Generate
77 */
78#define DTXC_TICG (1 << 16) /* Transmit IP
79 * Checksum Generate
80 */
81#define DTXC_TFCE (1 << 9) /* Transmit Flow
82 * Control Enable
83 */
84#define DTXC_TLB (1 << 8) /* Loopback mode */
85#define DTXC_TEP (1 << 2) /* Transmit Enable Padding */
86#define DTXC_TAC (1 << 1) /* Transmit Add CRC */
87#define DTXC_TE (1 << 0) /* TX Enable */
88
89/* DMA Receive Control Register */
90#define DRXC_RBS (0x3f << 24) /* Receive Burst Size */
91#define DRXC_RUCC (1 << 18) /* Receive UDP Checksum check */
92#define DRXC_RTCG (1 << 17) /* Receive TCP Checksum check */
93#define DRXC_RICG (1 << 16) /* Receive IP Checksum check */
94#define DRXC_RFCE (1 << 9) /* Receive Flow Control
95 * Enable
96 */
97#define DRXC_RB (1 << 6) /* Receive Broadcast */
98#define DRXC_RM (1 << 5) /* Receive Multicast */
99#define DRXC_RU (1 << 4) /* Receive Unicast */
100#define DRXC_RERR (1 << 3) /* Receive Error Frame */
101#define DRXC_RA (1 << 2) /* Receive All */
102#define DRXC_RE (1 << 0) /* RX Enable */
103
104/* Additional Station Address High */
105#define AAH_E (1 << 31) /* Address Enabled */
106
107#endif /* KS8695NET_H */
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 7e874d485d24..72ea6e378f8d 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -265,7 +265,6 @@ static int __init at1700_probe1(struct net_device *dev, int ioaddr)
265 unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0; 265 unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
266 int slot, ret = -ENODEV; 266 int slot, ret = -ENODEV;
267 struct net_local *lp = netdev_priv(dev); 267 struct net_local *lp = netdev_priv(dev);
268 DECLARE_MAC_BUF(mac);
269 268
270 if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME)) 269 if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME))
271 return -EBUSY; 270 return -EBUSY;
@@ -397,7 +396,7 @@ found:
397 dev->dev_addr[i] = val; 396 dev->dev_addr[i] = val;
398 } 397 }
399 } 398 }
400 printk("%s", print_mac(mac, dev->dev_addr)); 399 printk("%pM", dev->dev_addr);
401 400
402 /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals, 401 /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
403 rather than 150 ohm shielded twisted pair compensation. 402 rather than 150 ohm shielded twisted pair compensation.
@@ -768,7 +767,6 @@ net_rx(struct net_device *dev)
768 insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1); 767 insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
769 skb->protocol=eth_type_trans(skb, dev); 768 skb->protocol=eth_type_trans(skb, dev);
770 netif_rx(skb); 769 netif_rx(skb);
771 dev->last_rx = jiffies;
772 dev->stats.rx_packets++; 770 dev->stats.rx_packets++;
773 dev->stats.rx_bytes += pkt_len; 771 dev->stats.rx_bytes += pkt_len;
774 } 772 }
@@ -901,15 +899,3 @@ module_init(at1700_module_init);
901module_exit(at1700_module_exit); 899module_exit(at1700_module_exit);
902#endif /* MODULE */ 900#endif /* MODULE */
903MODULE_LICENSE("GPL"); 901MODULE_LICENSE("GPL");
904
905
906/*
907 * Local variables:
908 * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c at1700.c"
909 * alt-compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c at1700.c"
910 * tab-width: 4
911 * c-basic-offset: 4
912 * c-indent-level: 4
913 * End:
914 */
915
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 0860cc280b01..2d81f6afcb58 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -466,7 +466,6 @@ static unsigned long __init lance_probe1( struct net_device *dev,
466 int i; 466 int i;
467 static int did_version; 467 static int did_version;
468 unsigned short save1, save2; 468 unsigned short save1, save2;
469 DECLARE_MAC_BUF(mac);
470 469
471 PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n", 470 PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
472 (long)memaddr, (long)ioaddr )); 471 (long)memaddr, (long)ioaddr ));
@@ -521,7 +520,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
521 return( 0 ); 520 return( 0 );
522 521
523 probe_ok: 522 probe_ok:
524 lp = (struct lance_private *)dev->priv; 523 lp = netdev_priv(dev);
525 MEM = (struct lance_memory *)memaddr; 524 MEM = (struct lance_memory *)memaddr;
526 IO = lp->iobase = (struct lance_ioreg *)ioaddr; 525 IO = lp->iobase = (struct lance_ioreg *)ioaddr;
527 dev->base_addr = (unsigned long)ioaddr; /* informational only */ 526 dev->base_addr = (unsigned long)ioaddr; /* informational only */
@@ -595,7 +594,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
595 i = IO->mem; 594 i = IO->mem;
596 break; 595 break;
597 } 596 }
598 printk("%s\n", print_mac(mac, dev->dev_addr)); 597 printk("%pM\n", dev->dev_addr);
599 if (lp->cardtype == OLD_RIEBL) { 598 if (lp->cardtype == OLD_RIEBL) {
600 printk( "%s: Warning: This is a default ethernet address!\n", 599 printk( "%s: Warning: This is a default ethernet address!\n",
601 dev->name ); 600 dev->name );
@@ -640,8 +639,8 @@ static unsigned long __init lance_probe1( struct net_device *dev,
640 639
641 640
642static int lance_open( struct net_device *dev ) 641static int lance_open( struct net_device *dev )
643 642{
644{ struct lance_private *lp = (struct lance_private *)dev->priv; 643 struct lance_private *lp = netdev_priv(dev);
645 struct lance_ioreg *IO = lp->iobase; 644 struct lance_ioreg *IO = lp->iobase;
646 int i; 645 int i;
647 646
@@ -681,8 +680,8 @@ static int lance_open( struct net_device *dev )
681/* Initialize the LANCE Rx and Tx rings. */ 680/* Initialize the LANCE Rx and Tx rings. */
682 681
683static void lance_init_ring( struct net_device *dev ) 682static void lance_init_ring( struct net_device *dev )
684 683{
685{ struct lance_private *lp = (struct lance_private *)dev->priv; 684 struct lance_private *lp = netdev_priv(dev);
686 int i; 685 int i;
687 unsigned offset; 686 unsigned offset;
688 687
@@ -730,7 +729,7 @@ static void lance_init_ring( struct net_device *dev )
730 729
731static void lance_tx_timeout (struct net_device *dev) 730static void lance_tx_timeout (struct net_device *dev)
732{ 731{
733 struct lance_private *lp = (struct lance_private *) dev->priv; 732 struct lance_private *lp = netdev_priv(dev);
734 struct lance_ioreg *IO = lp->iobase; 733 struct lance_ioreg *IO = lp->iobase;
735 734
736 AREG = CSR0; 735 AREG = CSR0;
@@ -772,14 +771,12 @@ static void lance_tx_timeout (struct net_device *dev)
772/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ 771/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
773 772
774static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) 773static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
775 774{
776{ struct lance_private *lp = (struct lance_private *)dev->priv; 775 struct lance_private *lp = netdev_priv(dev);
777 struct lance_ioreg *IO = lp->iobase; 776 struct lance_ioreg *IO = lp->iobase;
778 int entry, len; 777 int entry, len;
779 struct lance_tx_head *head; 778 struct lance_tx_head *head;
780 unsigned long flags; 779 unsigned long flags;
781 DECLARE_MAC_BUF(mac);
782 DECLARE_MAC_BUF(mac2);
783 780
784 DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", 781 DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
785 dev->name, DREG )); 782 dev->name, DREG ));
@@ -802,12 +799,10 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
802 799
803 /* Fill in a Tx ring entry */ 800 /* Fill in a Tx ring entry */
804 if (lance_debug >= 3) { 801 if (lance_debug >= 3) {
805 printk( "%s: TX pkt type 0x%04x from " 802 printk( "%s: TX pkt type 0x%04x from %pM to %pM"
806 "%s to %s"
807 " data at 0x%08x len %d\n", 803 " data at 0x%08x len %d\n",
808 dev->name, ((u_short *)skb->data)[6], 804 dev->name, ((u_short *)skb->data)[6],
809 print_mac(mac, &skb->data[6]), 805 &skb->data[6], skb->data,
810 print_mac(mac2, skb->data),
811 (int)skb->data, (int)skb->len ); 806 (int)skb->data, (int)skb->len );
812 } 807 }
813 808
@@ -865,7 +860,7 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id )
865 return IRQ_NONE; 860 return IRQ_NONE;
866 } 861 }
867 862
868 lp = (struct lance_private *)dev->priv; 863 lp = netdev_priv(dev);
869 IO = lp->iobase; 864 IO = lp->iobase;
870 spin_lock (&lp->devlock); 865 spin_lock (&lp->devlock);
871 866
@@ -965,8 +960,8 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id )
965 960
966 961
967static int lance_rx( struct net_device *dev ) 962static int lance_rx( struct net_device *dev )
968 963{
969{ struct lance_private *lp = (struct lance_private *)dev->priv; 964 struct lance_private *lp = netdev_priv(dev);
970 int entry = lp->cur_rx & RX_RING_MOD_MASK; 965 int entry = lp->cur_rx & RX_RING_MOD_MASK;
971 int i; 966 int i;
972 967
@@ -1019,14 +1014,12 @@ static int lance_rx( struct net_device *dev )
1019 1014
1020 if (lance_debug >= 3) { 1015 if (lance_debug >= 3) {
1021 u_char *data = PKTBUF_ADDR(head); 1016 u_char *data = PKTBUF_ADDR(head);
1022 DECLARE_MAC_BUF(mac);
1023 DECLARE_MAC_BUF(mac2);
1024 1017
1025 printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %s to %s " 1018 printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %pM to %pM "
1026 "data %02x %02x %02x %02x %02x %02x %02x %02x " 1019 "data %02x %02x %02x %02x %02x %02x %02x %02x "
1027 "len %d\n", 1020 "len %d\n",
1028 dev->name, ((u_short *)data)[6], 1021 dev->name, ((u_short *)data)[6],
1029 print_mac(mac, &data[6]), print_mac(mac2, data), 1022 &data[6], data,
1030 data[15], data[16], data[17], data[18], 1023 data[15], data[16], data[17], data[18],
1031 data[19], data[20], data[21], data[22], 1024 data[19], data[20], data[21], data[22],
1032 pkt_len); 1025 pkt_len);
@@ -1037,7 +1030,6 @@ static int lance_rx( struct net_device *dev )
1037 lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len ); 1030 lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
1038 skb->protocol = eth_type_trans( skb, dev ); 1031 skb->protocol = eth_type_trans( skb, dev );
1039 netif_rx( skb ); 1032 netif_rx( skb );
1040 dev->last_rx = jiffies;
1041 dev->stats.rx_packets++; 1033 dev->stats.rx_packets++;
1042 dev->stats.rx_bytes += pkt_len; 1034 dev->stats.rx_bytes += pkt_len;
1043 } 1035 }
@@ -1057,8 +1049,8 @@ static int lance_rx( struct net_device *dev )
1057 1049
1058 1050
1059static int lance_close( struct net_device *dev ) 1051static int lance_close( struct net_device *dev )
1060 1052{
1061{ struct lance_private *lp = (struct lance_private *)dev->priv; 1053 struct lance_private *lp = netdev_priv(dev);
1062 struct lance_ioreg *IO = lp->iobase; 1054 struct lance_ioreg *IO = lp->iobase;
1063 1055
1064 netif_stop_queue (dev); 1056 netif_stop_queue (dev);
@@ -1084,8 +1076,8 @@ static int lance_close( struct net_device *dev )
1084 */ 1076 */
1085 1077
1086static void set_multicast_list( struct net_device *dev ) 1078static void set_multicast_list( struct net_device *dev )
1087 1079{
1088{ struct lance_private *lp = (struct lance_private *)dev->priv; 1080 struct lance_private *lp = netdev_priv(dev);
1089 struct lance_ioreg *IO = lp->iobase; 1081 struct lance_ioreg *IO = lp->iobase;
1090 1082
1091 if (netif_running(dev)) 1083 if (netif_running(dev))
@@ -1126,8 +1118,8 @@ static void set_multicast_list( struct net_device *dev )
1126/* This is needed for old RieblCards and possible for new RieblCards */ 1118/* This is needed for old RieblCards and possible for new RieblCards */
1127 1119
1128static int lance_set_mac_address( struct net_device *dev, void *addr ) 1120static int lance_set_mac_address( struct net_device *dev, void *addr )
1129 1121{
1130{ struct lance_private *lp = (struct lance_private *)dev->priv; 1122 struct lance_private *lp = netdev_priv(dev);
1131 struct sockaddr *saddr = addr; 1123 struct sockaddr *saddr = addr;
1132 int i; 1124 int i;
1133 1125
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 9b603528143d..bb9094d4cbc9 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1326,9 +1326,9 @@ static irqreturn_t atl1e_intr(int irq, void *data)
1326 AT_WRITE_REG(hw, REG_IMR, 1326 AT_WRITE_REG(hw, REG_IMR,
1327 IMR_NORMAL_MASK & ~ISR_RX_EVENT); 1327 IMR_NORMAL_MASK & ~ISR_RX_EVENT);
1328 AT_WRITE_FLUSH(hw); 1328 AT_WRITE_FLUSH(hw);
1329 if (likely(netif_rx_schedule_prep(netdev, 1329 if (likely(netif_rx_schedule_prep(
1330 &adapter->napi))) 1330 &adapter->napi)))
1331 __netif_rx_schedule(netdev, &adapter->napi); 1331 __netif_rx_schedule(&adapter->napi);
1332 } 1332 }
1333 } while (--max_ints > 0); 1333 } while (--max_ints > 0);
1334 /* re-enable Interrupt*/ 1334 /* re-enable Interrupt*/
@@ -1460,7 +1460,6 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1460 netif_receive_skb(skb); 1460 netif_receive_skb(skb);
1461 } 1461 }
1462 1462
1463 netdev->last_rx = jiffies;
1464skip_pkt: 1463skip_pkt:
1465 /* skip current packet whether it's ok or not. */ 1464 /* skip current packet whether it's ok or not. */
1466 rx_page->read_offset += 1465 rx_page->read_offset +=
@@ -1502,7 +1501,6 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
1502{ 1501{
1503 struct atl1e_adapter *adapter = 1502 struct atl1e_adapter *adapter =
1504 container_of(napi, struct atl1e_adapter, napi); 1503 container_of(napi, struct atl1e_adapter, napi);
1505 struct net_device *netdev = adapter->netdev;
1506 struct pci_dev *pdev = adapter->pdev; 1504 struct pci_dev *pdev = adapter->pdev;
1507 u32 imr_data; 1505 u32 imr_data;
1508 int work_done = 0; 1506 int work_done = 0;
@@ -1516,7 +1514,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
1516 /* If no Tx and not enough Rx work done, exit the polling mode */ 1514 /* If no Tx and not enough Rx work done, exit the polling mode */
1517 if (work_done < budget) { 1515 if (work_done < budget) {
1518quit_polling: 1516quit_polling:
1519 netif_rx_complete(netdev, napi); 1517 netif_rx_complete(napi);
1520 imr_data = AT_READ_REG(&adapter->hw, REG_IMR); 1518 imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
1521 AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); 1519 AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
1522 /* test debug */ 1520 /* test debug */
@@ -2254,26 +2252,33 @@ static void atl1e_shutdown(struct pci_dev *pdev)
2254 atl1e_suspend(pdev, PMSG_SUSPEND); 2252 atl1e_suspend(pdev, PMSG_SUSPEND);
2255} 2253}
2256 2254
2255static const struct net_device_ops atl1e_netdev_ops = {
2256 .ndo_open = atl1e_open,
2257 .ndo_stop = atl1e_close,
2258 .ndo_start_xmit = atl1e_xmit_frame,
2259 .ndo_get_stats = atl1e_get_stats,
2260 .ndo_set_multicast_list = atl1e_set_multi,
2261 .ndo_validate_addr = eth_validate_addr,
2262 .ndo_set_mac_address = atl1e_set_mac_addr,
2263 .ndo_change_mtu = atl1e_change_mtu,
2264 .ndo_do_ioctl = atl1e_ioctl,
2265 .ndo_tx_timeout = atl1e_tx_timeout,
2266 .ndo_vlan_rx_register = atl1e_vlan_rx_register,
2267#ifdef CONFIG_NET_POLL_CONTROLLER
2268 .ndo_poll_controller = atl1e_netpoll,
2269#endif
2270
2271};
2272
2257static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) 2273static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2258{ 2274{
2259 SET_NETDEV_DEV(netdev, &pdev->dev); 2275 SET_NETDEV_DEV(netdev, &pdev->dev);
2260 pci_set_drvdata(pdev, netdev); 2276 pci_set_drvdata(pdev, netdev);
2261 2277
2262 netdev->irq = pdev->irq; 2278 netdev->irq = pdev->irq;
2263 netdev->open = &atl1e_open; 2279 netdev->netdev_ops = &atl1e_netdev_ops;
2264 netdev->stop = &atl1e_close; 2280
2265 netdev->hard_start_xmit = &atl1e_xmit_frame;
2266 netdev->get_stats = &atl1e_get_stats;
2267 netdev->set_multicast_list = &atl1e_set_multi;
2268 netdev->set_mac_address = &atl1e_set_mac_addr;
2269 netdev->change_mtu = &atl1e_change_mtu;
2270 netdev->do_ioctl = &atl1e_ioctl;
2271 netdev->tx_timeout = &atl1e_tx_timeout;
2272 netdev->watchdog_timeo = AT_TX_WATCHDOG; 2281 netdev->watchdog_timeo = AT_TX_WATCHDOG;
2273 netdev->vlan_rx_register = atl1e_vlan_rx_register;
2274#ifdef CONFIG_NET_POLL_CONTROLLER
2275 netdev->poll_controller = atl1e_netpoll;
2276#endif
2277 atl1e_set_ethtool_ops(netdev); 2282 atl1e_set_ethtool_ops(netdev);
2278 2283
2279 netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | 2284 netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM |
@@ -2488,7 +2493,7 @@ static pci_ers_result_t
2488atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 2493atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2489{ 2494{
2490 struct net_device *netdev = pci_get_drvdata(pdev); 2495 struct net_device *netdev = pci_get_drvdata(pdev);
2491 struct atl1e_adapter *adapter = netdev->priv; 2496 struct atl1e_adapter *adapter = netdev_priv(netdev);
2492 2497
2493 netif_device_detach(netdev); 2498 netif_device_detach(netdev);
2494 2499
@@ -2511,7 +2516,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2511static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev) 2516static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
2512{ 2517{
2513 struct net_device *netdev = pci_get_drvdata(pdev); 2518 struct net_device *netdev = pci_get_drvdata(pdev);
2514 struct atl1e_adapter *adapter = netdev->priv; 2519 struct atl1e_adapter *adapter = netdev_priv(netdev);
2515 2520
2516 if (pci_enable_device(pdev)) { 2521 if (pci_enable_device(pdev)) {
2517 dev_err(&pdev->dev, 2522 dev_err(&pdev->dev,
@@ -2539,7 +2544,7 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
2539static void atl1e_io_resume(struct pci_dev *pdev) 2544static void atl1e_io_resume(struct pci_dev *pdev)
2540{ 2545{
2541 struct net_device *netdev = pci_get_drvdata(pdev); 2546 struct net_device *netdev = pci_get_drvdata(pdev);
2542 struct atl1e_adapter *adapter = netdev->priv; 2547 struct atl1e_adapter *adapter = netdev_priv(netdev);
2543 2548
2544 if (netif_running(netdev)) { 2549 if (netif_running(netdev)) {
2545 if (atl1e_up(adapter)) { 2550 if (atl1e_up(adapter)) {
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index aef403d299ee..c0ceee0d7c80 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -195,7 +195,7 @@ static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
195 * value exists, a default value is used. The final value is stored 195 * value exists, a default value is used. The final value is stored
196 * in a variable in the adapter structure. 196 * in a variable in the adapter structure.
197 */ 197 */
198void __devinit atl1_check_options(struct atl1_adapter *adapter) 198static void __devinit atl1_check_options(struct atl1_adapter *adapter)
199{ 199{
200 struct pci_dev *pdev = adapter->pdev; 200 struct pci_dev *pdev = adapter->pdev;
201 int bd = adapter->bd_number; 201 int bd = adapter->bd_number;
@@ -523,7 +523,7 @@ static int atl1_get_permanent_address(struct atl1_hw *hw)
523 * Reads the adapter's MAC address from the EEPROM 523 * Reads the adapter's MAC address from the EEPROM
524 * hw - Struct containing variables accessed by shared code 524 * hw - Struct containing variables accessed by shared code
525 */ 525 */
526s32 atl1_read_mac_addr(struct atl1_hw *hw) 526static s32 atl1_read_mac_addr(struct atl1_hw *hw)
527{ 527{
528 u16 i; 528 u16 i;
529 529
@@ -1390,7 +1390,8 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
1390 /* auto-neg, insert timer to re-config phy */ 1390 /* auto-neg, insert timer to re-config phy */
1391 if (!adapter->phy_timer_pending) { 1391 if (!adapter->phy_timer_pending) {
1392 adapter->phy_timer_pending = true; 1392 adapter->phy_timer_pending = true;
1393 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ); 1393 mod_timer(&adapter->phy_config_timer,
1394 round_jiffies(jiffies + 3 * HZ));
1394 } 1395 }
1395 1396
1396 return 0; 1397 return 0;
@@ -1662,6 +1663,7 @@ static void atl1_via_workaround(struct atl1_adapter *adapter)
1662 1663
1663static void atl1_inc_smb(struct atl1_adapter *adapter) 1664static void atl1_inc_smb(struct atl1_adapter *adapter)
1664{ 1665{
1666 struct net_device *netdev = adapter->netdev;
1665 struct stats_msg_block *smb = adapter->smb.smb; 1667 struct stats_msg_block *smb = adapter->smb.smb;
1666 1668
1667 /* Fill out the OS statistics structure */ 1669 /* Fill out the OS statistics structure */
@@ -1704,30 +1706,30 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
1704 adapter->soft_stats.tx_trunc += smb->tx_trunc; 1706 adapter->soft_stats.tx_trunc += smb->tx_trunc;
1705 adapter->soft_stats.tx_pause += smb->tx_pause; 1707 adapter->soft_stats.tx_pause += smb->tx_pause;
1706 1708
1707 adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets; 1709 netdev->stats.rx_packets = adapter->soft_stats.rx_packets;
1708 adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets; 1710 netdev->stats.tx_packets = adapter->soft_stats.tx_packets;
1709 adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes; 1711 netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes;
1710 adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes; 1712 netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes;
1711 adapter->net_stats.multicast = adapter->soft_stats.multicast; 1713 netdev->stats.multicast = adapter->soft_stats.multicast;
1712 adapter->net_stats.collisions = adapter->soft_stats.collisions; 1714 netdev->stats.collisions = adapter->soft_stats.collisions;
1713 adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; 1715 netdev->stats.rx_errors = adapter->soft_stats.rx_errors;
1714 adapter->net_stats.rx_over_errors = 1716 netdev->stats.rx_over_errors =
1715 adapter->soft_stats.rx_missed_errors; 1717 adapter->soft_stats.rx_missed_errors;
1716 adapter->net_stats.rx_length_errors = 1718 netdev->stats.rx_length_errors =
1717 adapter->soft_stats.rx_length_errors; 1719 adapter->soft_stats.rx_length_errors;
1718 adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; 1720 netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
1719 adapter->net_stats.rx_frame_errors = 1721 netdev->stats.rx_frame_errors =
1720 adapter->soft_stats.rx_frame_errors; 1722 adapter->soft_stats.rx_frame_errors;
1721 adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; 1723 netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
1722 adapter->net_stats.rx_missed_errors = 1724 netdev->stats.rx_missed_errors =
1723 adapter->soft_stats.rx_missed_errors; 1725 adapter->soft_stats.rx_missed_errors;
1724 adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; 1726 netdev->stats.tx_errors = adapter->soft_stats.tx_errors;
1725 adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; 1727 netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
1726 adapter->net_stats.tx_aborted_errors = 1728 netdev->stats.tx_aborted_errors =
1727 adapter->soft_stats.tx_aborted_errors; 1729 adapter->soft_stats.tx_aborted_errors;
1728 adapter->net_stats.tx_window_errors = 1730 netdev->stats.tx_window_errors =
1729 adapter->soft_stats.tx_window_errors; 1731 adapter->soft_stats.tx_window_errors;
1730 adapter->net_stats.tx_carrier_errors = 1732 netdev->stats.tx_carrier_errors =
1731 adapter->soft_stats.tx_carrier_errors; 1733 adapter->soft_stats.tx_carrier_errors;
1732} 1734}
1733 1735
@@ -1860,7 +1862,7 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1860 adapter->rx_buffer_len + NET_IP_ALIGN); 1862 adapter->rx_buffer_len + NET_IP_ALIGN);
1861 if (unlikely(!skb)) { 1863 if (unlikely(!skb)) {
1862 /* Better luck next round */ 1864 /* Better luck next round */
1863 adapter->net_stats.rx_dropped++; 1865 adapter->netdev->stats.rx_dropped++;
1864 break; 1866 break;
1865 } 1867 }
1866 1868
@@ -2026,8 +2028,6 @@ rrd_ok:
2026 buffer_info->skb = NULL; 2028 buffer_info->skb = NULL;
2027 buffer_info->alloced = 0; 2029 buffer_info->alloced = 0;
2028 rrd->xsz.valid = 0; 2030 rrd->xsz.valid = 0;
2029
2030 adapter->netdev->last_rx = jiffies;
2031 } 2031 }
2032 2032
2033 atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); 2033 atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
@@ -2524,17 +2524,6 @@ static irqreturn_t atl1_intr(int irq, void *data)
2524 return IRQ_HANDLED; 2524 return IRQ_HANDLED;
2525} 2525}
2526 2526
2527/*
2528 * atl1_watchdog - Timer Call-back
2529 * @data: pointer to netdev cast into an unsigned long
2530 */
2531static void atl1_watchdog(unsigned long data)
2532{
2533 struct atl1_adapter *adapter = (struct atl1_adapter *)data;
2534
2535 /* Reset the timer */
2536 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
2537}
2538 2527
2539/* 2528/*
2540 * atl1_phy_config - Timer Call-back 2529 * atl1_phy_config - Timer Call-back
@@ -2607,7 +2596,6 @@ static s32 atl1_up(struct atl1_adapter *adapter)
2607 if (unlikely(err)) 2596 if (unlikely(err))
2608 goto err_up; 2597 goto err_up;
2609 2598
2610 mod_timer(&adapter->watchdog_timer, jiffies);
2611 atlx_irq_enable(adapter); 2599 atlx_irq_enable(adapter);
2612 atl1_check_link(adapter); 2600 atl1_check_link(adapter);
2613 netif_start_queue(netdev); 2601 netif_start_queue(netdev);
@@ -2625,7 +2613,6 @@ static void atl1_down(struct atl1_adapter *adapter)
2625 struct net_device *netdev = adapter->netdev; 2613 struct net_device *netdev = adapter->netdev;
2626 2614
2627 netif_stop_queue(netdev); 2615 netif_stop_queue(netdev);
2628 del_timer_sync(&adapter->watchdog_timer);
2629 del_timer_sync(&adapter->phy_config_timer); 2616 del_timer_sync(&adapter->phy_config_timer);
2630 adapter->phy_timer_pending = false; 2617 adapter->phy_timer_pending = false;
2631 2618
@@ -2893,6 +2880,22 @@ static void atl1_poll_controller(struct net_device *netdev)
2893} 2880}
2894#endif 2881#endif
2895 2882
2883static const struct net_device_ops atl1_netdev_ops = {
2884 .ndo_open = atl1_open,
2885 .ndo_stop = atl1_close,
2886 .ndo_start_xmit = atl1_xmit_frame,
2887 .ndo_set_multicast_list = atlx_set_multi,
2888 .ndo_validate_addr = eth_validate_addr,
2889 .ndo_set_mac_address = atl1_set_mac,
2890 .ndo_change_mtu = atl1_change_mtu,
2891 .ndo_do_ioctl = atlx_ioctl,
2892 .ndo_tx_timeout = atlx_tx_timeout,
2893 .ndo_vlan_rx_register = atlx_vlan_rx_register,
2894#ifdef CONFIG_NET_POLL_CONTROLLER
2895 .ndo_poll_controller = atl1_poll_controller,
2896#endif
2897};
2898
2896/* 2899/*
2897 * atl1_probe - Device Initialization Routine 2900 * atl1_probe - Device Initialization Routine
2898 * @pdev: PCI device information struct 2901 * @pdev: PCI device information struct
@@ -2980,20 +2983,8 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2980 adapter->mii.phy_id_mask = 0x1f; 2983 adapter->mii.phy_id_mask = 0x1f;
2981 adapter->mii.reg_num_mask = 0x1f; 2984 adapter->mii.reg_num_mask = 0x1f;
2982 2985
2983 netdev->open = &atl1_open; 2986 netdev->netdev_ops = &atl1_netdev_ops;
2984 netdev->stop = &atl1_close;
2985 netdev->hard_start_xmit = &atl1_xmit_frame;
2986 netdev->get_stats = &atlx_get_stats;
2987 netdev->set_multicast_list = &atlx_set_multi;
2988 netdev->set_mac_address = &atl1_set_mac;
2989 netdev->change_mtu = &atl1_change_mtu;
2990 netdev->do_ioctl = &atlx_ioctl;
2991 netdev->tx_timeout = &atlx_tx_timeout;
2992 netdev->watchdog_timeo = 5 * HZ; 2987 netdev->watchdog_timeo = 5 * HZ;
2993#ifdef CONFIG_NET_POLL_CONTROLLER
2994 netdev->poll_controller = atl1_poll_controller;
2995#endif
2996 netdev->vlan_rx_register = atlx_vlan_rx_register;
2997 2988
2998 netdev->ethtool_ops = &atl1_ethtool_ops; 2989 netdev->ethtool_ops = &atl1_ethtool_ops;
2999 adapter->bd_number = cards_found; 2990 adapter->bd_number = cards_found;
@@ -3049,13 +3040,8 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
3049 netif_carrier_off(netdev); 3040 netif_carrier_off(netdev);
3050 netif_stop_queue(netdev); 3041 netif_stop_queue(netdev);
3051 3042
3052 init_timer(&adapter->watchdog_timer); 3043 setup_timer(&adapter->phy_config_timer, &atl1_phy_config,
3053 adapter->watchdog_timer.function = &atl1_watchdog; 3044 (unsigned long)adapter);
3054 adapter->watchdog_timer.data = (unsigned long)adapter;
3055
3056 init_timer(&adapter->phy_config_timer);
3057 adapter->phy_config_timer.function = &atl1_phy_config;
3058 adapter->phy_config_timer.data = (unsigned long)adapter;
3059 adapter->phy_timer_pending = false; 3045 adapter->phy_timer_pending = false;
3060 3046
3061 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); 3047 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
@@ -3173,8 +3159,6 @@ static struct atl1_stats atl1_gstrings_stats[] = {
3173 {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, 3159 {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
3174 {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, 3160 {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
3175 {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, 3161 {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
3176 {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
3177 {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
3178 {"multicast", ATL1_STAT(soft_stats.multicast)}, 3162 {"multicast", ATL1_STAT(soft_stats.multicast)},
3179 {"collisions", ATL1_STAT(soft_stats.collisions)}, 3163 {"collisions", ATL1_STAT(soft_stats.collisions)},
3180 {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, 3164 {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index ffa73fc8d95e..146372fd6683 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -754,7 +754,7 @@ struct atl1_hw {
754struct atl1_adapter { 754struct atl1_adapter {
755 struct net_device *netdev; 755 struct net_device *netdev;
756 struct pci_dev *pdev; 756 struct pci_dev *pdev;
757 struct net_device_stats net_stats; 757
758 struct atl1_sft_stats soft_stats; 758 struct atl1_sft_stats soft_stats;
759 struct vlan_group *vlgrp; 759 struct vlan_group *vlgrp;
760 u32 rx_buffer_len; 760 u32 rx_buffer_len;
@@ -765,7 +765,7 @@ struct atl1_adapter {
765 struct work_struct tx_timeout_task; 765 struct work_struct tx_timeout_task;
766 struct work_struct link_chg_task; 766 struct work_struct link_chg_task;
767 struct work_struct pcie_dma_to_rst_task; 767 struct work_struct pcie_dma_to_rst_task;
768 struct timer_list watchdog_timer; 768
769 struct timer_list phy_config_timer; 769 struct timer_list phy_config_timer;
770 bool phy_timer_pending; 770 bool phy_timer_pending;
771 771
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 8571e8c0bc67..bc394491b63b 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -418,7 +418,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
418 * Check that some rx space is free. If not, 418 * Check that some rx space is free. If not,
419 * free one and mark stats->rx_dropped++. 419 * free one and mark stats->rx_dropped++.
420 */ 420 */
421 adapter->net_stats.rx_dropped++; 421 netdev->stats.rx_dropped++;
422 break; 422 break;
423 } 423 }
424 skb_reserve(skb, NET_IP_ALIGN); 424 skb_reserve(skb, NET_IP_ALIGN);
@@ -435,20 +435,19 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
435 } else 435 } else
436#endif 436#endif
437 netif_rx(skb); 437 netif_rx(skb);
438 adapter->net_stats.rx_bytes += rx_size; 438 netdev->stats.rx_bytes += rx_size;
439 adapter->net_stats.rx_packets++; 439 netdev->stats.rx_packets++;
440 netdev->last_rx = jiffies;
441 } else { 440 } else {
442 adapter->net_stats.rx_errors++; 441 netdev->stats.rx_errors++;
443 442
444 if (rxd->status.ok && rxd->status.pkt_size <= 60) 443 if (rxd->status.ok && rxd->status.pkt_size <= 60)
445 adapter->net_stats.rx_length_errors++; 444 netdev->stats.rx_length_errors++;
446 if (rxd->status.mcast) 445 if (rxd->status.mcast)
447 adapter->net_stats.multicast++; 446 netdev->stats.multicast++;
448 if (rxd->status.crc) 447 if (rxd->status.crc)
449 adapter->net_stats.rx_crc_errors++; 448 netdev->stats.rx_crc_errors++;
450 if (rxd->status.align) 449 if (rxd->status.align)
451 adapter->net_stats.rx_frame_errors++; 450 netdev->stats.rx_frame_errors++;
452 } 451 }
453 452
454 /* advance write ptr */ 453 /* advance write ptr */
@@ -463,6 +462,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
463 462
464static void atl2_intr_tx(struct atl2_adapter *adapter) 463static void atl2_intr_tx(struct atl2_adapter *adapter)
465{ 464{
465 struct net_device *netdev = adapter->netdev;
466 u32 txd_read_ptr; 466 u32 txd_read_ptr;
467 u32 txs_write_ptr; 467 u32 txs_write_ptr;
468 struct tx_pkt_status *txs; 468 struct tx_pkt_status *txs;
@@ -522,20 +522,20 @@ static void atl2_intr_tx(struct atl2_adapter *adapter)
522 522
523 /* tx statistics: */ 523 /* tx statistics: */
524 if (txs->ok) { 524 if (txs->ok) {
525 adapter->net_stats.tx_bytes += txs->pkt_size; 525 netdev->stats.tx_bytes += txs->pkt_size;
526 adapter->net_stats.tx_packets++; 526 netdev->stats.tx_packets++;
527 } 527 }
528 else 528 else
529 adapter->net_stats.tx_errors++; 529 netdev->stats.tx_errors++;
530 530
531 if (txs->defer) 531 if (txs->defer)
532 adapter->net_stats.collisions++; 532 netdev->stats.collisions++;
533 if (txs->abort_col) 533 if (txs->abort_col)
534 adapter->net_stats.tx_aborted_errors++; 534 netdev->stats.tx_aborted_errors++;
535 if (txs->late_col) 535 if (txs->late_col)
536 adapter->net_stats.tx_window_errors++; 536 netdev->stats.tx_window_errors++;
537 if (txs->underun) 537 if (txs->underun)
538 adapter->net_stats.tx_fifo_errors++; 538 netdev->stats.tx_fifo_errors++;
539 } while (1); 539 } while (1);
540 540
541 if (free_hole) { 541 if (free_hole) {
@@ -621,7 +621,7 @@ static irqreturn_t atl2_intr(int irq, void *data)
621 621
622 /* link event */ 622 /* link event */
623 if (status & (ISR_PHY | ISR_MANUAL)) { 623 if (status & (ISR_PHY | ISR_MANUAL)) {
624 adapter->net_stats.tx_carrier_errors++; 624 adapter->netdev->stats.tx_carrier_errors++;
625 atl2_check_for_link(adapter); 625 atl2_check_for_link(adapter);
626 } 626 }
627 627
@@ -644,7 +644,6 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
644 int flags, err = 0; 644 int flags, err = 0;
645 645
646 flags = IRQF_SHARED; 646 flags = IRQF_SHARED;
647#ifdef CONFIG_PCI_MSI
648 adapter->have_msi = true; 647 adapter->have_msi = true;
649 err = pci_enable_msi(adapter->pdev); 648 err = pci_enable_msi(adapter->pdev);
650 if (err) 649 if (err)
@@ -652,7 +651,6 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
652 651
653 if (adapter->have_msi) 652 if (adapter->have_msi)
654 flags &= ~IRQF_SHARED; 653 flags &= ~IRQF_SHARED;
655#endif
656 654
657 return request_irq(adapter->pdev->irq, &atl2_intr, flags, netdev->name, 655 return request_irq(adapter->pdev->irq, &atl2_intr, flags, netdev->name,
658 netdev); 656 netdev);
@@ -723,7 +721,7 @@ static int atl2_open(struct net_device *netdev)
723 721
724 clear_bit(__ATL2_DOWN, &adapter->flags); 722 clear_bit(__ATL2_DOWN, &adapter->flags);
725 723
726 mod_timer(&adapter->watchdog_timer, jiffies + 4*HZ); 724 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 4*HZ));
727 725
728 val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL); 726 val = ATL2_READ_REG(&adapter->hw, REG_MASTER_CTRL);
729 ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, 727 ATL2_WRITE_REG(&adapter->hw, REG_MASTER_CTRL,
@@ -900,19 +898,6 @@ static int atl2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
900} 898}
901 899
902/* 900/*
903 * atl2_get_stats - Get System Network Statistics
904 * @netdev: network interface device structure
905 *
906 * Returns the address of the device statistics structure.
907 * The statistics are actually updated from the timer callback.
908 */
909static struct net_device_stats *atl2_get_stats(struct net_device *netdev)
910{
911 struct atl2_adapter *adapter = netdev_priv(netdev);
912 return &adapter->net_stats;
913}
914
915/*
916 * atl2_change_mtu - Change the Maximum Transfer Unit 901 * atl2_change_mtu - Change the Maximum Transfer Unit
917 * @netdev: network interface device structure 902 * @netdev: network interface device structure
918 * @new_mtu: new value for maximum frame size 903 * @new_mtu: new value for maximum frame size
@@ -1050,18 +1035,21 @@ static void atl2_tx_timeout(struct net_device *netdev)
1050static void atl2_watchdog(unsigned long data) 1035static void atl2_watchdog(unsigned long data)
1051{ 1036{
1052 struct atl2_adapter *adapter = (struct atl2_adapter *) data; 1037 struct atl2_adapter *adapter = (struct atl2_adapter *) data;
1053 u32 drop_rxd, drop_rxs;
1054 unsigned long flags;
1055 1038
1056 if (!test_bit(__ATL2_DOWN, &adapter->flags)) { 1039 if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
1040 u32 drop_rxd, drop_rxs;
1041 unsigned long flags;
1042
1057 spin_lock_irqsave(&adapter->stats_lock, flags); 1043 spin_lock_irqsave(&adapter->stats_lock, flags);
1058 drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV); 1044 drop_rxd = ATL2_READ_REG(&adapter->hw, REG_STS_RXD_OV);
1059 drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV); 1045 drop_rxs = ATL2_READ_REG(&adapter->hw, REG_STS_RXS_OV);
1060 adapter->net_stats.rx_over_errors += (drop_rxd+drop_rxs);
1061 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1046 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1062 1047
1048 adapter->netdev->stats.rx_over_errors += drop_rxd + drop_rxs;
1049
1063 /* Reset the timer */ 1050 /* Reset the timer */
1064 mod_timer(&adapter->watchdog_timer, jiffies + 4 * HZ); 1051 mod_timer(&adapter->watchdog_timer,
1052 round_jiffies(jiffies + 4 * HZ));
1065 } 1053 }
1066} 1054}
1067 1055
@@ -1265,7 +1253,8 @@ static int atl2_check_link(struct atl2_adapter *adapter)
1265 * (if interval smaller than 5 seconds, something strange) */ 1253 * (if interval smaller than 5 seconds, something strange) */
1266 if (!test_bit(__ATL2_DOWN, &adapter->flags)) { 1254 if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
1267 if (!test_and_set_bit(0, &adapter->cfg_phy)) 1255 if (!test_and_set_bit(0, &adapter->cfg_phy))
1268 mod_timer(&adapter->phy_config_timer, jiffies + 5 * HZ); 1256 mod_timer(&adapter->phy_config_timer,
1257 round_jiffies(jiffies + 5 * HZ));
1269 } 1258 }
1270 1259
1271 return 0; 1260 return 0;
@@ -1320,6 +1309,23 @@ static void atl2_poll_controller(struct net_device *netdev)
1320} 1309}
1321#endif 1310#endif
1322 1311
1312
1313static const struct net_device_ops atl2_netdev_ops = {
1314 .ndo_open = atl2_open,
1315 .ndo_stop = atl2_close,
1316 .ndo_start_xmit = atl2_xmit_frame,
1317 .ndo_set_multicast_list = atl2_set_multi,
1318 .ndo_validate_addr = eth_validate_addr,
1319 .ndo_set_mac_address = atl2_set_mac,
1320 .ndo_change_mtu = atl2_change_mtu,
1321 .ndo_do_ioctl = atl2_ioctl,
1322 .ndo_tx_timeout = atl2_tx_timeout,
1323 .ndo_vlan_rx_register = atl2_vlan_rx_register,
1324#ifdef CONFIG_NET_POLL_CONTROLLER
1325 .ndo_poll_controller = atl2_poll_controller,
1326#endif
1327};
1328
1323/* 1329/*
1324 * atl2_probe - Device Initialization Routine 1330 * atl2_probe - Device Initialization Routine
1325 * @pdev: PCI device information struct 1331 * @pdev: PCI device information struct
@@ -1393,26 +1399,9 @@ static int __devinit atl2_probe(struct pci_dev *pdev,
1393 1399
1394 atl2_setup_pcicmd(pdev); 1400 atl2_setup_pcicmd(pdev);
1395 1401
1396 netdev->open = &atl2_open; 1402 netdev->netdev_ops = &atl2_netdev_ops;
1397 netdev->stop = &atl2_close;
1398 netdev->hard_start_xmit = &atl2_xmit_frame;
1399 netdev->get_stats = &atl2_get_stats;
1400 netdev->set_multicast_list = &atl2_set_multi;
1401 netdev->set_mac_address = &atl2_set_mac;
1402 netdev->change_mtu = &atl2_change_mtu;
1403 netdev->do_ioctl = &atl2_ioctl;
1404 atl2_set_ethtool_ops(netdev); 1403 atl2_set_ethtool_ops(netdev);
1405
1406#ifdef CONFIG_NET_POLL_CONTROLLER
1407 netdev->poll_controller = atl2_poll_controller;
1408#endif
1409#ifdef HAVE_TX_TIMEOUT
1410 netdev->tx_timeout = &atl2_tx_timeout;
1411 netdev->watchdog_timeo = 5 * HZ; 1404 netdev->watchdog_timeo = 5 * HZ;
1412#endif
1413#ifdef NETIF_F_HW_VLAN_TX
1414 netdev->vlan_rx_register = atl2_vlan_rx_register;
1415#endif
1416 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1405 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1417 1406
1418 netdev->mem_start = mmio_start; 1407 netdev->mem_start = mmio_start;
diff --git a/drivers/net/atlx/atl2.h b/drivers/net/atlx/atl2.h
index 09974df76b18..d918bbe621ea 100644
--- a/drivers/net/atlx/atl2.h
+++ b/drivers/net/atlx/atl2.h
@@ -453,7 +453,6 @@ struct atl2_adapter {
453 /* OS defined structs */ 453 /* OS defined structs */
454 struct net_device *netdev; 454 struct net_device *netdev;
455 struct pci_dev *pdev; 455 struct pci_dev *pdev;
456 struct net_device_stats net_stats;
457#ifdef NETIF_F_HW_VLAN_TX 456#ifdef NETIF_F_HW_VLAN_TX
458 struct vlan_group *vlgrp; 457 struct vlan_group *vlgrp;
459#endif 458#endif
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index 3cc9d1089ca1..3dc014215679 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -182,19 +182,6 @@ static void atlx_clear_phy_int(struct atlx_adapter *adapter)
182} 182}
183 183
184/* 184/*
185 * atlx_get_stats - Get System Network Statistics
186 * @netdev: network interface device structure
187 *
188 * Returns the address of the device statistics structure.
189 * The statistics are actually updated from the timer callback.
190 */
191static struct net_device_stats *atlx_get_stats(struct net_device *netdev)
192{
193 struct atlx_adapter *adapter = netdev_priv(netdev);
194 return &adapter->net_stats;
195}
196
197/*
198 * atlx_tx_timeout - Respond to a Tx Hang 185 * atlx_tx_timeout - Respond to a Tx Hang
199 * @netdev: network interface device structure 186 * @netdev: network interface device structure
200 */ 187 */
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index c10cd8058e23..ea493ce23982 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -248,7 +248,6 @@ static int __init atp_probe1(long ioaddr)
248 struct net_local *lp; 248 struct net_local *lp;
249 int saved_ctrl_reg, status, i; 249 int saved_ctrl_reg, status, i;
250 int res; 250 int res;
251 DECLARE_MAC_BUF(mac);
252 251
253 outb(0xff, ioaddr + PAR_DATA); 252 outb(0xff, ioaddr + PAR_DATA);
254 /* Save the original value of the Control register, in case we guessed 253 /* Save the original value of the Control register, in case we guessed
@@ -324,8 +323,8 @@ static int __init atp_probe1(long ioaddr)
324#endif 323#endif
325 324
326 printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, " 325 printk(KERN_NOTICE "%s: Pocket adapter found at %#3lx, IRQ %d, "
327 "SAPROM %s.\n", 326 "SAPROM %pM.\n",
328 dev->name, dev->base_addr, dev->irq, print_mac(mac, dev->dev_addr)); 327 dev->name, dev->base_addr, dev->irq, dev->dev_addr);
329 328
330 /* Reset the ethernet hardware and activate the printer pass-through. */ 329 /* Reset the ethernet hardware and activate the printer pass-through. */
331 write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX); 330 write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
@@ -421,7 +420,7 @@ static unsigned short __init eeprom_op(long ioaddr, u32 cmd)
421 registers that "should" only need to be set once at boot, so that 420 registers that "should" only need to be set once at boot, so that
422 there is non-reboot way to recover if something goes wrong. 421 there is non-reboot way to recover if something goes wrong.
423 422
424 This is an attachable device: if there is no dev->priv entry then it wasn't 423 This is an attachable device: if there is no private entry then it wasn't
425 probed for at boot-time, and we need to probe for it again. 424 probed for at boot-time, and we need to probe for it again.
426 */ 425 */
427static int net_open(struct net_device *dev) 426static int net_open(struct net_device *dev)
@@ -803,21 +802,22 @@ static void net_rx(struct net_device *dev)
803 802
804static void read_block(long ioaddr, int length, unsigned char *p, int data_mode) 803static void read_block(long ioaddr, int length, unsigned char *p, int data_mode)
805{ 804{
806
807 if (data_mode <= 3) { /* Mode 0 or 1 */ 805 if (data_mode <= 3) { /* Mode 0 or 1 */
808 outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL); 806 outb(Ctrl_LNibRead, ioaddr + PAR_CONTROL);
809 outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR, 807 outb(length == 8 ? RdAddr | HNib | MAR : RdAddr | MAR,
810 ioaddr + PAR_DATA); 808 ioaddr + PAR_DATA);
811 if (data_mode <= 1) { /* Mode 0 or 1 */ 809 if (data_mode <= 1) { /* Mode 0 or 1 */
812 do *p++ = read_byte_mode0(ioaddr); while (--length > 0); 810 do { *p++ = read_byte_mode0(ioaddr); } while (--length > 0);
813 } else /* Mode 2 or 3 */ 811 } else { /* Mode 2 or 3 */
814 do *p++ = read_byte_mode2(ioaddr); while (--length > 0); 812 do { *p++ = read_byte_mode2(ioaddr); } while (--length > 0);
815 } else if (data_mode <= 5) 813 }
816 do *p++ = read_byte_mode4(ioaddr); while (--length > 0); 814 } else if (data_mode <= 5) {
817 else 815 do { *p++ = read_byte_mode4(ioaddr); } while (--length > 0);
818 do *p++ = read_byte_mode6(ioaddr); while (--length > 0); 816 } else {
817 do { *p++ = read_byte_mode6(ioaddr); } while (--length > 0);
818 }
819 819
820 outb(EOC+HNib+MAR, ioaddr + PAR_DATA); 820 outb(EOC+HNib+MAR, ioaddr + PAR_DATA);
821 outb(Ctrl_SelData, ioaddr + PAR_CONTROL); 821 outb(Ctrl_SelData, ioaddr + PAR_CONTROL);
822} 822}
823 823
@@ -913,7 +913,8 @@ static void __exit atp_cleanup_module(void) {
913 struct net_device *next_dev; 913 struct net_device *next_dev;
914 914
915 while (root_atp_dev) { 915 while (root_atp_dev) {
916 next_dev = ((struct net_local *)root_atp_dev->priv)->next_module; 916 struct net_local *atp_local = netdev_priv(root_atp_dev);
917 next_dev = atp_local->next_module;
917 unregister_netdev(root_atp_dev); 918 unregister_netdev(root_atp_dev);
918 /* No need to release_region(), since we never snarf it. */ 919 /* No need to release_region(), since we never snarf it. */
919 free_netdev(root_atp_dev); 920 free_netdev(root_atp_dev);
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 019b13c08ae6..9c875bb3f76c 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -193,7 +193,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
193 */ 193 */
194static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg) 194static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
195{ 195{
196 struct au1000_private *aup = (struct au1000_private *) dev->priv; 196 struct au1000_private *aup = netdev_priv(dev);
197 volatile u32 *const mii_control_reg = &aup->mac->mii_control; 197 volatile u32 *const mii_control_reg = &aup->mac->mii_control;
198 volatile u32 *const mii_data_reg = &aup->mac->mii_data; 198 volatile u32 *const mii_data_reg = &aup->mac->mii_data;
199 u32 timedout = 20; 199 u32 timedout = 20;
@@ -228,7 +228,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
228static void au1000_mdio_write(struct net_device *dev, int phy_addr, 228static void au1000_mdio_write(struct net_device *dev, int phy_addr,
229 int reg, u16 value) 229 int reg, u16 value)
230{ 230{
231 struct au1000_private *aup = (struct au1000_private *) dev->priv; 231 struct au1000_private *aup = netdev_priv(dev);
232 volatile u32 *const mii_control_reg = &aup->mac->mii_control; 232 volatile u32 *const mii_control_reg = &aup->mac->mii_control;
233 volatile u32 *const mii_data_reg = &aup->mac->mii_data; 233 volatile u32 *const mii_data_reg = &aup->mac->mii_data;
234 u32 timedout = 20; 234 u32 timedout = 20;
@@ -283,7 +283,7 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
283 283
284static int mii_probe (struct net_device *dev) 284static int mii_probe (struct net_device *dev)
285{ 285{
286 struct au1000_private *const aup = (struct au1000_private *) dev->priv; 286 struct au1000_private *const aup = netdev_priv(dev);
287 struct phy_device *phydev = NULL; 287 struct phy_device *phydev = NULL;
288 288
289#if defined(AU1XXX_PHY_STATIC_CONFIG) 289#if defined(AU1XXX_PHY_STATIC_CONFIG)
@@ -353,7 +353,6 @@ static int mii_probe (struct net_device *dev)
353 } 353 }
354 354
355 /* now we are supposed to have a proper phydev, to attach to... */ 355 /* now we are supposed to have a proper phydev, to attach to... */
356 BUG_ON(!phydev);
357 BUG_ON(phydev->attached_dev); 356 BUG_ON(phydev->attached_dev);
358 357
359 phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0, 358 phydev = phy_connect(dev, phydev->dev.bus_id, &au1000_adjust_link, 0,
@@ -415,7 +414,7 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
415 414
416static void enable_rx_tx(struct net_device *dev) 415static void enable_rx_tx(struct net_device *dev)
417{ 416{
418 struct au1000_private *aup = (struct au1000_private *) dev->priv; 417 struct au1000_private *aup = netdev_priv(dev);
419 418
420 if (au1000_debug > 4) 419 if (au1000_debug > 4)
421 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name); 420 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
@@ -426,7 +425,7 @@ static void enable_rx_tx(struct net_device *dev)
426 425
427static void hard_stop(struct net_device *dev) 426static void hard_stop(struct net_device *dev)
428{ 427{
429 struct au1000_private *aup = (struct au1000_private *) dev->priv; 428 struct au1000_private *aup = netdev_priv(dev);
430 429
431 if (au1000_debug > 4) 430 if (au1000_debug > 4)
432 printk(KERN_INFO "%s: hard stop\n", dev->name); 431 printk(KERN_INFO "%s: hard stop\n", dev->name);
@@ -438,7 +437,7 @@ static void hard_stop(struct net_device *dev)
438static void enable_mac(struct net_device *dev, int force_reset) 437static void enable_mac(struct net_device *dev, int force_reset)
439{ 438{
440 unsigned long flags; 439 unsigned long flags;
441 struct au1000_private *aup = (struct au1000_private *) dev->priv; 440 struct au1000_private *aup = netdev_priv(dev);
442 441
443 spin_lock_irqsave(&aup->lock, flags); 442 spin_lock_irqsave(&aup->lock, flags);
444 443
@@ -457,7 +456,7 @@ static void enable_mac(struct net_device *dev, int force_reset)
457 456
458static void reset_mac_unlocked(struct net_device *dev) 457static void reset_mac_unlocked(struct net_device *dev)
459{ 458{
460 struct au1000_private *const aup = (struct au1000_private *) dev->priv; 459 struct au1000_private *const aup = netdev_priv(dev);
461 int i; 460 int i;
462 461
463 hard_stop(dev); 462 hard_stop(dev);
@@ -483,7 +482,7 @@ static void reset_mac_unlocked(struct net_device *dev)
483 482
484static void reset_mac(struct net_device *dev) 483static void reset_mac(struct net_device *dev)
485{ 484{
486 struct au1000_private *const aup = (struct au1000_private *) dev->priv; 485 struct au1000_private *const aup = netdev_priv(dev);
487 unsigned long flags; 486 unsigned long flags;
488 487
489 if (au1000_debug > 4) 488 if (au1000_debug > 4)
@@ -572,7 +571,7 @@ static int __init au1000_init_module(void)
572 571
573static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 572static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
574{ 573{
575 struct au1000_private *aup = (struct au1000_private *)dev->priv; 574 struct au1000_private *aup = netdev_priv(dev);
576 575
577 if (aup->phy_dev) 576 if (aup->phy_dev)
578 return phy_ethtool_gset(aup->phy_dev, cmd); 577 return phy_ethtool_gset(aup->phy_dev, cmd);
@@ -582,7 +581,7 @@ static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
582 581
583static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 582static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
584{ 583{
585 struct au1000_private *aup = (struct au1000_private *)dev->priv; 584 struct au1000_private *aup = netdev_priv(dev);
586 585
587 if (!capable(CAP_NET_ADMIN)) 586 if (!capable(CAP_NET_ADMIN))
588 return -EPERM; 587 return -EPERM;
@@ -596,7 +595,7 @@ static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
596static void 595static void
597au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 596au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
598{ 597{
599 struct au1000_private *aup = (struct au1000_private *)dev->priv; 598 struct au1000_private *aup = netdev_priv(dev);
600 599
601 strcpy(info->driver, DRV_NAME); 600 strcpy(info->driver, DRV_NAME);
602 strcpy(info->version, DRV_VERSION); 601 strcpy(info->version, DRV_VERSION);
@@ -652,7 +651,7 @@ static struct net_device * au1000_probe(int port_num)
652 printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n", 651 printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
653 dev->name, base, irq); 652 dev->name, base, irq);
654 653
655 aup = dev->priv; 654 aup = netdev_priv(dev);
656 655
657 spin_lock_init(&aup->lock); 656 spin_lock_init(&aup->lock);
658 657
@@ -817,7 +816,7 @@ err_out:
817 */ 816 */
818static int au1000_init(struct net_device *dev) 817static int au1000_init(struct net_device *dev)
819{ 818{
820 struct au1000_private *aup = (struct au1000_private *) dev->priv; 819 struct au1000_private *aup = netdev_priv(dev);
821 unsigned long flags; 820 unsigned long flags;
822 int i; 821 int i;
823 u32 control; 822 u32 control;
@@ -868,7 +867,7 @@ static int au1000_init(struct net_device *dev)
868static void 867static void
869au1000_adjust_link(struct net_device *dev) 868au1000_adjust_link(struct net_device *dev)
870{ 869{
871 struct au1000_private *aup = (struct au1000_private *) dev->priv; 870 struct au1000_private *aup = netdev_priv(dev);
872 struct phy_device *phydev = aup->phy_dev; 871 struct phy_device *phydev = aup->phy_dev;
873 unsigned long flags; 872 unsigned long flags;
874 873
@@ -947,7 +946,7 @@ au1000_adjust_link(struct net_device *dev)
947static int au1000_open(struct net_device *dev) 946static int au1000_open(struct net_device *dev)
948{ 947{
949 int retval; 948 int retval;
950 struct au1000_private *aup = (struct au1000_private *) dev->priv; 949 struct au1000_private *aup = netdev_priv(dev);
951 950
952 if (au1000_debug > 4) 951 if (au1000_debug > 4)
953 printk("%s: open: dev=%p\n", dev->name, dev); 952 printk("%s: open: dev=%p\n", dev->name, dev);
@@ -982,7 +981,7 @@ static int au1000_open(struct net_device *dev)
982static int au1000_close(struct net_device *dev) 981static int au1000_close(struct net_device *dev)
983{ 982{
984 unsigned long flags; 983 unsigned long flags;
985 struct au1000_private *const aup = (struct au1000_private *) dev->priv; 984 struct au1000_private *const aup = netdev_priv(dev);
986 985
987 if (au1000_debug > 4) 986 if (au1000_debug > 4)
988 printk("%s: close: dev=%p\n", dev->name, dev); 987 printk("%s: close: dev=%p\n", dev->name, dev);
@@ -1013,7 +1012,7 @@ static void __exit au1000_cleanup_module(void)
1013 for (i = 0; i < num_ifs; i++) { 1012 for (i = 0; i < num_ifs; i++) {
1014 dev = iflist[i].dev; 1013 dev = iflist[i].dev;
1015 if (dev) { 1014 if (dev) {
1016 aup = (struct au1000_private *) dev->priv; 1015 aup = netdev_priv(dev);
1017 unregister_netdev(dev); 1016 unregister_netdev(dev);
1018 mdiobus_unregister(aup->mii_bus); 1017 mdiobus_unregister(aup->mii_bus);
1019 mdiobus_free(aup->mii_bus); 1018 mdiobus_free(aup->mii_bus);
@@ -1035,7 +1034,7 @@ static void __exit au1000_cleanup_module(void)
1035 1034
1036static void update_tx_stats(struct net_device *dev, u32 status) 1035static void update_tx_stats(struct net_device *dev, u32 status)
1037{ 1036{
1038 struct au1000_private *aup = (struct au1000_private *) dev->priv; 1037 struct au1000_private *aup = netdev_priv(dev);
1039 struct net_device_stats *ps = &dev->stats; 1038 struct net_device_stats *ps = &dev->stats;
1040 1039
1041 if (status & TX_FRAME_ABORTED) { 1040 if (status & TX_FRAME_ABORTED) {
@@ -1064,7 +1063,7 @@ static void update_tx_stats(struct net_device *dev, u32 status)
1064 */ 1063 */
1065static void au1000_tx_ack(struct net_device *dev) 1064static void au1000_tx_ack(struct net_device *dev)
1066{ 1065{
1067 struct au1000_private *aup = (struct au1000_private *) dev->priv; 1066 struct au1000_private *aup = netdev_priv(dev);
1068 volatile tx_dma_t *ptxd; 1067 volatile tx_dma_t *ptxd;
1069 1068
1070 ptxd = aup->tx_dma_ring[aup->tx_tail]; 1069 ptxd = aup->tx_dma_ring[aup->tx_tail];
@@ -1091,7 +1090,7 @@ static void au1000_tx_ack(struct net_device *dev)
1091 */ 1090 */
1092static int au1000_tx(struct sk_buff *skb, struct net_device *dev) 1091static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1093{ 1092{
1094 struct au1000_private *aup = (struct au1000_private *) dev->priv; 1093 struct au1000_private *aup = netdev_priv(dev);
1095 struct net_device_stats *ps = &dev->stats; 1094 struct net_device_stats *ps = &dev->stats;
1096 volatile tx_dma_t *ptxd; 1095 volatile tx_dma_t *ptxd;
1097 u32 buff_stat; 1096 u32 buff_stat;
@@ -1145,7 +1144,7 @@ static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1145 1144
1146static inline void update_rx_stats(struct net_device *dev, u32 status) 1145static inline void update_rx_stats(struct net_device *dev, u32 status)
1147{ 1146{
1148 struct au1000_private *aup = (struct au1000_private *) dev->priv; 1147 struct au1000_private *aup = netdev_priv(dev);
1149 struct net_device_stats *ps = &dev->stats; 1148 struct net_device_stats *ps = &dev->stats;
1150 1149
1151 ps->rx_packets++; 1150 ps->rx_packets++;
@@ -1173,7 +1172,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
1173 */ 1172 */
1174static int au1000_rx(struct net_device *dev) 1173static int au1000_rx(struct net_device *dev)
1175{ 1174{
1176 struct au1000_private *aup = (struct au1000_private *) dev->priv; 1175 struct au1000_private *aup = netdev_priv(dev);
1177 struct sk_buff *skb; 1176 struct sk_buff *skb;
1178 volatile rx_dma_t *prxd; 1177 volatile rx_dma_t *prxd;
1179 u32 buff_stat, status; 1178 u32 buff_stat, status;
@@ -1240,7 +1239,6 @@ static int au1000_rx(struct net_device *dev)
1240 /* next descriptor */ 1239 /* next descriptor */
1241 prxd = aup->rx_dma_ring[aup->rx_head]; 1240 prxd = aup->rx_dma_ring[aup->rx_head];
1242 buff_stat = prxd->buff_stat; 1241 buff_stat = prxd->buff_stat;
1243 dev->last_rx = jiffies;
1244 } 1242 }
1245 return 0; 1243 return 0;
1246} 1244}
@@ -1276,7 +1274,7 @@ static void au1000_tx_timeout(struct net_device *dev)
1276 1274
1277static void set_rx_mode(struct net_device *dev) 1275static void set_rx_mode(struct net_device *dev)
1278{ 1276{
1279 struct au1000_private *aup = (struct au1000_private *) dev->priv; 1277 struct au1000_private *aup = netdev_priv(dev);
1280 1278
1281 if (au1000_debug > 4) 1279 if (au1000_debug > 4)
1282 printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags); 1280 printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags);
@@ -1308,7 +1306,7 @@ static void set_rx_mode(struct net_device *dev)
1308 1306
1309static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1307static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1310{ 1308{
1311 struct au1000_private *aup = (struct au1000_private *)dev->priv; 1309 struct au1000_private *aup = netdev_priv(dev);
1312 1310
1313 if (!netif_running(dev)) return -EINVAL; 1311 if (!netif_running(dev)) return -EINVAL;
1314 1312
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 9a314d88e7b6..337488ec707c 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -758,13 +758,10 @@ static int ax_init_dev(struct net_device *dev, int first_init)
758#endif 758#endif
759 ax_NS8390_init(dev, 0); 759 ax_NS8390_init(dev, 0);
760 760
761 if (first_init) { 761 if (first_init)
762 DECLARE_MAC_BUF(mac); 762 dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %pM\n",
763
764 dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %s\n",
765 ei_status.word16 ? 16:8, dev->irq, dev->base_addr, 763 ei_status.word16 ? 16:8, dev->irq, dev->base_addr,
766 print_mac(mac, dev->dev_addr)); 764 dev->dev_addr);
767 }
768 765
769 ret = register_netdev(dev); 766 ret = register_netdev(dev);
770 if (ret) 767 if (ret)
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c3bda5ce67c4..0e7470a201f0 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -829,7 +829,6 @@ static int b44_rx(struct b44 *bp, int budget)
829 skb->ip_summed = CHECKSUM_NONE; 829 skb->ip_summed = CHECKSUM_NONE;
830 skb->protocol = eth_type_trans(skb, bp->dev); 830 skb->protocol = eth_type_trans(skb, bp->dev);
831 netif_receive_skb(skb); 831 netif_receive_skb(skb);
832 bp->dev->last_rx = jiffies;
833 received++; 832 received++;
834 budget--; 833 budget--;
835 next_pkt: 834 next_pkt:
@@ -847,7 +846,6 @@ static int b44_rx(struct b44 *bp, int budget)
847static int b44_poll(struct napi_struct *napi, int budget) 846static int b44_poll(struct napi_struct *napi, int budget)
848{ 847{
849 struct b44 *bp = container_of(napi, struct b44, napi); 848 struct b44 *bp = container_of(napi, struct b44, napi);
850 struct net_device *netdev = bp->dev;
851 int work_done; 849 int work_done;
852 850
853 spin_lock_irq(&bp->lock); 851 spin_lock_irq(&bp->lock);
@@ -876,7 +874,7 @@ static int b44_poll(struct napi_struct *napi, int budget)
876 } 874 }
877 875
878 if (work_done < budget) { 876 if (work_done < budget) {
879 netif_rx_complete(netdev, napi); 877 netif_rx_complete(napi);
880 b44_enable_ints(bp); 878 b44_enable_ints(bp);
881 } 879 }
882 880
@@ -908,13 +906,13 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
908 goto irq_ack; 906 goto irq_ack;
909 } 907 }
910 908
911 if (netif_rx_schedule_prep(dev, &bp->napi)) { 909 if (netif_rx_schedule_prep(&bp->napi)) {
912 /* NOTE: These writes are posted by the readback of 910 /* NOTE: These writes are posted by the readback of
913 * the ISTAT register below. 911 * the ISTAT register below.
914 */ 912 */
915 bp->istat = istat; 913 bp->istat = istat;
916 __b44_disable_ints(bp); 914 __b44_disable_ints(bp);
917 __netif_rx_schedule(dev, &bp->napi); 915 __netif_rx_schedule(&bp->napi);
918 } else { 916 } else {
919 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", 917 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
920 dev->name); 918 dev->name);
@@ -2117,7 +2115,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2117 struct net_device *dev; 2115 struct net_device *dev;
2118 struct b44 *bp; 2116 struct b44 *bp;
2119 int err; 2117 int err;
2120 DECLARE_MAC_BUF(mac);
2121 2118
2122 instance++; 2119 instance++;
2123 2120
@@ -2213,8 +2210,8 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
2213 */ 2210 */
2214 b44_chip_reset(bp, B44_CHIP_RESET_FULL); 2211 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2215 2212
2216 printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %s\n", 2213 printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2217 dev->name, print_mac(mac, dev->dev_addr)); 2214 dev->name, dev->dev_addr);
2218 2215
2219 return 0; 2216 return 0;
2220 2217
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index b458d607a9c6..78e31aa861e0 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -741,7 +741,6 @@ static void bfin_mac_rx(struct net_device *dev)
741 blackfin_dcache_invalidate_range((unsigned long)skb->head, 741 blackfin_dcache_invalidate_range((unsigned long)skb->head,
742 (unsigned long)skb->tail); 742 (unsigned long)skb->tail);
743 743
744 dev->last_rx = jiffies;
745 skb->protocol = eth_type_trans(skb, dev); 744 skb->protocol = eth_type_trans(skb, dev);
746#if defined(BFIN_MAC_CSUM_OFFLOAD) 745#if defined(BFIN_MAC_CSUM_OFFLOAD)
747 skb->csum = current_rx_ptr->status.ip_payload_csum; 746 skb->csum = current_rx_ptr->status.ip_payload_csum;
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index a42bd19646d3..8a546a33d581 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -716,13 +716,11 @@ static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
716 skb_put(skb, nb); 716 skb_put(skb, nb);
717 skb->protocol = eth_type_trans(skb, dev); 717 skb->protocol = eth_type_trans(skb, dev);
718 netif_rx(skb); 718 netif_rx(skb);
719 dev->last_rx = jiffies;
720 ++dev->stats.rx_packets; 719 ++dev->stats.rx_packets;
721 dev->stats.rx_bytes += nb; 720 dev->stats.rx_bytes += nb;
722 } else { 721 } else {
723 ++dev->stats.rx_dropped; 722 ++dev->stats.rx_dropped;
724 } 723 }
725 dev->last_rx = jiffies;
726 if ((skb = bp->rx_bufs[i]) == NULL) { 724 if ((skb = bp->rx_bufs[i]) == NULL) {
727 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); 725 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
728 if (skb != NULL) 726 if (skb != NULL)
@@ -1258,7 +1256,6 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
1258 unsigned char addr[6]; 1256 unsigned char addr[6];
1259 struct net_device *dev; 1257 struct net_device *dev;
1260 int is_bmac_plus = ((int)match->data) != 0; 1258 int is_bmac_plus = ((int)match->data) != 0;
1261 DECLARE_MAC_BUF(mac);
1262 1259
1263 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { 1260 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1264 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); 1261 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
@@ -1368,8 +1365,8 @@ static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_i
1368 goto err_out_irq2; 1365 goto err_out_irq2;
1369 } 1366 }
1370 1367
1371 printk(KERN_INFO "%s: BMAC%s at %s", 1368 printk(KERN_INFO "%s: BMAC%s at %pM",
1372 dev->name, (is_bmac_plus ? "+" : ""), print_mac(mac, dev->dev_addr)); 1369 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1373 XXDEBUG((", base_addr=%#0lx", dev->base_addr)); 1370 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1374 printk("\n"); 1371 printk("\n");
1375 1372
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 9e8222f9e90e..d4a3dac21dcf 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -57,8 +57,8 @@
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": " 59#define PFX DRV_MODULE_NAME ": "
60#define DRV_MODULE_VERSION "1.8.1" 60#define DRV_MODULE_VERSION "1.9.0"
61#define DRV_MODULE_RELDATE "Oct 7, 2008" 61#define DRV_MODULE_RELDATE "Dec 16, 2008"
62 62
63#define RUN_AT(x) (jiffies + (x)) 63#define RUN_AT(x) (jiffies + (x))
64 64
@@ -89,6 +89,7 @@ typedef enum {
89 BCM5709, 89 BCM5709,
90 BCM5709S, 90 BCM5709S,
91 BCM5716, 91 BCM5716,
92 BCM5716S,
92} board_t; 93} board_t;
93 94
94/* indexed by board_t, above */ 95/* indexed by board_t, above */
@@ -105,6 +106,7 @@ static struct {
105 { "Broadcom NetXtreme II BCM5709 1000Base-T" }, 106 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
106 { "Broadcom NetXtreme II BCM5709 1000Base-SX" }, 107 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
107 { "Broadcom NetXtreme II BCM5716 1000Base-T" }, 108 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
109 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
108 }; 110 };
109 111
110static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = { 112static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
@@ -128,6 +130,8 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S }, 130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
129 { PCI_VENDOR_ID_BROADCOM, 0x163b, 131 { PCI_VENDOR_ID_BROADCOM, 0x163b,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 }, 132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
133 { PCI_VENDOR_ID_BROADCOM, 0x163c,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
131 { 0, } 135 { 0, }
132}; 136};
133 137
@@ -1652,7 +1656,7 @@ bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1652 * exchanging base pages plus 3 next pages and 1656 * exchanging base pages plus 3 next pages and
1653 * normally completes in about 120 msec. 1657 * normally completes in about 120 msec.
1654 */ 1658 */
1655 bp->current_interval = SERDES_AN_TIMEOUT; 1659 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1656 bp->serdes_an_pending = 1; 1660 bp->serdes_an_pending = 1;
1657 mod_timer(&bp->timer, jiffies + bp->current_interval); 1661 mod_timer(&bp->timer, jiffies + bp->current_interval);
1658 } else { 1662 } else {
@@ -2274,7 +2278,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2274 return 0; 2278 return 0;
2275 2279
2276 /* wait for an acknowledgement. */ 2280 /* wait for an acknowledgement. */
2277 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) { 2281 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2278 msleep(10); 2282 msleep(10);
2279 2283
2280 val = bnx2_shmem_rd(bp, BNX2_FW_MB); 2284 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
@@ -3000,7 +3004,6 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3000#endif 3004#endif
3001 netif_receive_skb(skb); 3005 netif_receive_skb(skb);
3002 3006
3003 bp->dev->last_rx = jiffies;
3004 rx_pkt++; 3007 rx_pkt++;
3005 3008
3006next_rx: 3009next_rx:
@@ -3040,7 +3043,6 @@ bnx2_msi(int irq, void *dev_instance)
3040{ 3043{
3041 struct bnx2_napi *bnapi = dev_instance; 3044 struct bnx2_napi *bnapi = dev_instance;
3042 struct bnx2 *bp = bnapi->bp; 3045 struct bnx2 *bp = bnapi->bp;
3043 struct net_device *dev = bp->dev;
3044 3046
3045 prefetch(bnapi->status_blk.msi); 3047 prefetch(bnapi->status_blk.msi);
3046 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3048 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
@@ -3051,7 +3053,7 @@ bnx2_msi(int irq, void *dev_instance)
3051 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3053 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3052 return IRQ_HANDLED; 3054 return IRQ_HANDLED;
3053 3055
3054 netif_rx_schedule(dev, &bnapi->napi); 3056 netif_rx_schedule(&bnapi->napi);
3055 3057
3056 return IRQ_HANDLED; 3058 return IRQ_HANDLED;
3057} 3059}
@@ -3061,7 +3063,6 @@ bnx2_msi_1shot(int irq, void *dev_instance)
3061{ 3063{
3062 struct bnx2_napi *bnapi = dev_instance; 3064 struct bnx2_napi *bnapi = dev_instance;
3063 struct bnx2 *bp = bnapi->bp; 3065 struct bnx2 *bp = bnapi->bp;
3064 struct net_device *dev = bp->dev;
3065 3066
3066 prefetch(bnapi->status_blk.msi); 3067 prefetch(bnapi->status_blk.msi);
3067 3068
@@ -3069,7 +3070,7 @@ bnx2_msi_1shot(int irq, void *dev_instance)
3069 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3070 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3070 return IRQ_HANDLED; 3071 return IRQ_HANDLED;
3071 3072
3072 netif_rx_schedule(dev, &bnapi->napi); 3073 netif_rx_schedule(&bnapi->napi);
3073 3074
3074 return IRQ_HANDLED; 3075 return IRQ_HANDLED;
3075} 3076}
@@ -3079,7 +3080,6 @@ bnx2_interrupt(int irq, void *dev_instance)
3079{ 3080{
3080 struct bnx2_napi *bnapi = dev_instance; 3081 struct bnx2_napi *bnapi = dev_instance;
3081 struct bnx2 *bp = bnapi->bp; 3082 struct bnx2 *bp = bnapi->bp;
3082 struct net_device *dev = bp->dev;
3083 struct status_block *sblk = bnapi->status_blk.msi; 3083 struct status_block *sblk = bnapi->status_blk.msi;
3084 3084
3085 /* When using INTx, it is possible for the interrupt to arrive 3085 /* When using INTx, it is possible for the interrupt to arrive
@@ -3106,9 +3106,9 @@ bnx2_interrupt(int irq, void *dev_instance)
3106 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3106 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3107 return IRQ_HANDLED; 3107 return IRQ_HANDLED;
3108 3108
3109 if (netif_rx_schedule_prep(dev, &bnapi->napi)) { 3109 if (netif_rx_schedule_prep(&bnapi->napi)) {
3110 bnapi->last_status_idx = sblk->status_idx; 3110 bnapi->last_status_idx = sblk->status_idx;
3111 __netif_rx_schedule(dev, &bnapi->napi); 3111 __netif_rx_schedule(&bnapi->napi);
3112 } 3112 }
3113 3113
3114 return IRQ_HANDLED; 3114 return IRQ_HANDLED;
@@ -3218,7 +3218,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3218 rmb(); 3218 rmb();
3219 if (likely(!bnx2_has_fast_work(bnapi))) { 3219 if (likely(!bnx2_has_fast_work(bnapi))) {
3220 3220
3221 netif_rx_complete(bp->dev, napi); 3221 netif_rx_complete(napi);
3222 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 3222 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3223 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3223 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3224 bnapi->last_status_idx); 3224 bnapi->last_status_idx);
@@ -3251,7 +3251,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3251 3251
3252 rmb(); 3252 rmb();
3253 if (likely(!bnx2_has_work(bnapi))) { 3253 if (likely(!bnx2_has_work(bnapi))) {
3254 netif_rx_complete(bp->dev, napi); 3254 netif_rx_complete(napi);
3255 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { 3255 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3256 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3256 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3257 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3257 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
@@ -4493,7 +4493,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4493static int 4493static int
4494bnx2_init_chip(struct bnx2 *bp) 4494bnx2_init_chip(struct bnx2 *bp)
4495{ 4495{
4496 u32 val; 4496 u32 val, mtu;
4497 int rc, i; 4497 int rc, i;
4498 4498
4499 /* Make sure the interrupt is not active. */ 4499 /* Make sure the interrupt is not active. */
@@ -4585,11 +4585,19 @@ bnx2_init_chip(struct bnx2 *bp)
4585 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); 4585 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4586 4586
4587 /* Program the MTU. Also include 4 bytes for CRC32. */ 4587 /* Program the MTU. Also include 4 bytes for CRC32. */
4588 val = bp->dev->mtu + ETH_HLEN + 4; 4588 mtu = bp->dev->mtu;
4589 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4589 if (val > (MAX_ETHERNET_PACKET_SIZE + 4)) 4590 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4590 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; 4591 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4591 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); 4592 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4592 4593
4594 if (mtu < 1500)
4595 mtu = 1500;
4596
4597 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4598 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4599 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4600
4593 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) 4601 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4594 bp->bnx2_napi[i].last_status_idx = 0; 4602 bp->bnx2_napi[i].last_status_idx = 0;
4595 4603
@@ -5719,7 +5727,7 @@ bnx2_5708_serdes_timer(struct bnx2 *bp)
5719 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 5727 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5720 if (bmcr & BMCR_ANENABLE) { 5728 if (bmcr & BMCR_ANENABLE) {
5721 bnx2_enable_forced_2g5(bp); 5729 bnx2_enable_forced_2g5(bp);
5722 bp->current_interval = SERDES_FORCED_TIMEOUT; 5730 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
5723 } else { 5731 } else {
5724 bnx2_disable_forced_2g5(bp); 5732 bnx2_disable_forced_2g5(bp);
5725 bp->serdes_an_pending = 2; 5733 bp->serdes_an_pending = 2;
@@ -5816,6 +5824,8 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5816{ 5824{
5817 int i, rc; 5825 int i, rc;
5818 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC]; 5826 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
5827 struct net_device *dev = bp->dev;
5828 const int len = sizeof(bp->irq_tbl[0].name);
5819 5829
5820 bnx2_setup_msix_tbl(bp); 5830 bnx2_setup_msix_tbl(bp);
5821 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1); 5831 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
@@ -5826,7 +5836,7 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
5826 msix_ent[i].entry = i; 5836 msix_ent[i].entry = i;
5827 msix_ent[i].vector = 0; 5837 msix_ent[i].vector = 0;
5828 5838
5829 strcpy(bp->irq_tbl[i].name, bp->dev->name); 5839 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5830 bp->irq_tbl[i].handler = bnx2_msi_1shot; 5840 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5831 } 5841 }
5832 5842
@@ -6173,7 +6183,7 @@ bnx2_get_stats(struct net_device *dev)
6173{ 6183{
6174 struct bnx2 *bp = netdev_priv(dev); 6184 struct bnx2 *bp = netdev_priv(dev);
6175 struct statistics_block *stats_blk = bp->stats_blk; 6185 struct statistics_block *stats_blk = bp->stats_blk;
6176 struct net_device_stats *net_stats = &bp->net_stats; 6186 struct net_device_stats *net_stats = &dev->stats;
6177 6187
6178 if (bp->stats_blk == NULL) { 6188 if (bp->stats_blk == NULL) {
6179 return net_stats; 6189 return net_stats;
@@ -6540,7 +6550,7 @@ bnx2_nway_reset(struct net_device *dev)
6540 6550
6541 spin_lock_bh(&bp->phy_lock); 6551 spin_lock_bh(&bp->phy_lock);
6542 6552
6543 bp->current_interval = SERDES_AN_TIMEOUT; 6553 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6544 bp->serdes_an_pending = 1; 6554 bp->serdes_an_pending = 1;
6545 mod_timer(&bp->timer, jiffies + bp->current_interval); 6555 mod_timer(&bp->timer, jiffies + bp->current_interval);
6546 } 6556 }
@@ -7615,7 +7625,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7615 7625
7616 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || 7626 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7617 (CHIP_ID(bp) == CHIP_ID_5708_B0) || 7627 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7618 (CHIP_ID(bp) == CHIP_ID_5708_B1)) { 7628 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7629 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7619 bp->flags |= BNX2_FLAG_NO_WOL; 7630 bp->flags |= BNX2_FLAG_NO_WOL;
7620 bp->wol = 0; 7631 bp->wol = 0;
7621 } 7632 }
@@ -7724,6 +7735,25 @@ bnx2_init_napi(struct bnx2 *bp)
7724 } 7735 }
7725} 7736}
7726 7737
7738static const struct net_device_ops bnx2_netdev_ops = {
7739 .ndo_open = bnx2_open,
7740 .ndo_start_xmit = bnx2_start_xmit,
7741 .ndo_stop = bnx2_close,
7742 .ndo_get_stats = bnx2_get_stats,
7743 .ndo_set_rx_mode = bnx2_set_rx_mode,
7744 .ndo_do_ioctl = bnx2_ioctl,
7745 .ndo_validate_addr = eth_validate_addr,
7746 .ndo_set_mac_address = bnx2_change_mac_addr,
7747 .ndo_change_mtu = bnx2_change_mtu,
7748 .ndo_tx_timeout = bnx2_tx_timeout,
7749#ifdef BCM_VLAN
7750 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
7751#endif
7752#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7753 .ndo_poll_controller = poll_bnx2,
7754#endif
7755};
7756
7727static int __devinit 7757static int __devinit
7728bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 7758bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7729{ 7759{
@@ -7732,7 +7762,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7732 struct bnx2 *bp; 7762 struct bnx2 *bp;
7733 int rc; 7763 int rc;
7734 char str[40]; 7764 char str[40];
7735 DECLARE_MAC_BUF(mac);
7736 7765
7737 if (version_printed++ == 0) 7766 if (version_printed++ == 0)
7738 printk(KERN_INFO "%s", version); 7767 printk(KERN_INFO "%s", version);
@@ -7749,28 +7778,13 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7749 return rc; 7778 return rc;
7750 } 7779 }
7751 7780
7752 dev->open = bnx2_open; 7781 dev->netdev_ops = &bnx2_netdev_ops;
7753 dev->hard_start_xmit = bnx2_start_xmit;
7754 dev->stop = bnx2_close;
7755 dev->get_stats = bnx2_get_stats;
7756 dev->set_rx_mode = bnx2_set_rx_mode;
7757 dev->do_ioctl = bnx2_ioctl;
7758 dev->set_mac_address = bnx2_change_mac_addr;
7759 dev->change_mtu = bnx2_change_mtu;
7760 dev->tx_timeout = bnx2_tx_timeout;
7761 dev->watchdog_timeo = TX_TIMEOUT; 7782 dev->watchdog_timeo = TX_TIMEOUT;
7762#ifdef BCM_VLAN
7763 dev->vlan_rx_register = bnx2_vlan_rx_register;
7764#endif
7765 dev->ethtool_ops = &bnx2_ethtool_ops; 7783 dev->ethtool_ops = &bnx2_ethtool_ops;
7766 7784
7767 bp = netdev_priv(dev); 7785 bp = netdev_priv(dev);
7768 bnx2_init_napi(bp); 7786 bnx2_init_napi(bp);
7769 7787
7770#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7771 dev->poll_controller = poll_bnx2;
7772#endif
7773
7774 pci_set_drvdata(pdev, dev); 7788 pci_set_drvdata(pdev, dev);
7775 7789
7776 memcpy(dev->dev_addr, bp->mac_addr, 6); 7790 memcpy(dev->dev_addr, bp->mac_addr, 6);
@@ -7799,14 +7813,14 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7799 } 7813 }
7800 7814
7801 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, " 7815 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
7802 "IRQ %d, node addr %s\n", 7816 "IRQ %d, node addr %pM\n",
7803 dev->name, 7817 dev->name,
7804 board_info[ent->driver_data].name, 7818 board_info[ent->driver_data].name,
7805 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', 7819 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7806 ((CHIP_ID(bp) & 0x0ff0) >> 4), 7820 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7807 bnx2_bus_string(bp, str), 7821 bnx2_bus_string(bp, str),
7808 dev->base_addr, 7822 dev->base_addr,
7809 bp->pdev->irq, print_mac(mac, dev->dev_addr)); 7823 bp->pdev->irq, dev->dev_addr);
7810 7824
7811 return 0; 7825 return 0;
7812} 7826}
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 0b032c3c7b61..900641ac63e0 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -4202,7 +4202,14 @@ struct l2_fhdr {
4202 4202
4203#define BNX2_RBUF_CONFIG 0x0020000c 4203#define BNX2_RBUF_CONFIG 0x0020000c
4204#define BNX2_RBUF_CONFIG_XOFF_TRIP (0x3ffL<<0) 4204#define BNX2_RBUF_CONFIG_XOFF_TRIP (0x3ffL<<0)
4205#define BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) \
4206 ((((mtu) - 1500) * 31 / 1000) + 54)
4205#define BNX2_RBUF_CONFIG_XON_TRIP (0x3ffL<<16) 4207#define BNX2_RBUF_CONFIG_XON_TRIP (0x3ffL<<16)
4208#define BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) \
4209 ((((mtu) - 1500) * 39 / 1000) + 66)
4210#define BNX2_RBUF_CONFIG_VAL(mtu) \
4211 (BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) | \
4212 (BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) << 16))
4206 4213
4207#define BNX2_RBUF_FW_BUF_ALLOC 0x00200010 4214#define BNX2_RBUF_FW_BUF_ALLOC 0x00200010
4208#define BNX2_RBUF_FW_BUF_ALLOC_VALUE (0x1ffL<<7) 4215#define BNX2_RBUF_FW_BUF_ALLOC_VALUE (0x1ffL<<7)
@@ -4224,11 +4231,25 @@ struct l2_fhdr {
4224 4231
4225#define BNX2_RBUF_CONFIG2 0x0020001c 4232#define BNX2_RBUF_CONFIG2 0x0020001c
4226#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP (0x3ffL<<0) 4233#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP (0x3ffL<<0)
4234#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) \
4235 ((((mtu) - 1500) * 4 / 1000) + 5)
4227#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP (0x3ffL<<16) 4236#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP (0x3ffL<<16)
4237#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) \
4238 ((((mtu) - 1500) * 2 / 100) + 30)
4239#define BNX2_RBUF_CONFIG2_VAL(mtu) \
4240 (BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) | \
4241 (BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) << 16))
4228 4242
4229#define BNX2_RBUF_CONFIG3 0x00200020 4243#define BNX2_RBUF_CONFIG3 0x00200020
4230#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP (0x3ffL<<0) 4244#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP (0x3ffL<<0)
4245#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) \
4246 ((((mtu) - 1500) * 12 / 1000) + 18)
4231#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP (0x3ffL<<16) 4247#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP (0x3ffL<<16)
4248#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) \
4249 ((((mtu) - 1500) * 2 / 100) + 30)
4250#define BNX2_RBUF_CONFIG3_VAL(mtu) \
4251 (BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) | \
4252 (BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) << 16))
4232 4253
4233#define BNX2_RBUF_PKT_DATA 0x00208000 4254#define BNX2_RBUF_PKT_DATA 0x00208000
4234#define BNX2_RBUF_CLIST_DATA 0x00210000 4255#define BNX2_RBUF_CLIST_DATA 0x00210000
@@ -6606,7 +6627,7 @@ struct bnx2_irq {
6606 irq_handler_t handler; 6627 irq_handler_t handler;
6607 unsigned int vector; 6628 unsigned int vector;
6608 u8 requested; 6629 u8 requested;
6609 char name[16]; 6630 char name[IFNAMSIZ + 2];
6610}; 6631};
6611 6632
6612struct bnx2_tx_ring_info { 6633struct bnx2_tx_ring_info {
@@ -6661,8 +6682,6 @@ struct bnx2_napi {
6661 struct bnx2_tx_ring_info tx_ring; 6682 struct bnx2_tx_ring_info tx_ring;
6662}; 6683};
6663 6684
6664#define BNX2_TIMER_INTERVAL HZ
6665
6666struct bnx2 { 6685struct bnx2 {
6667 /* Fields used in the tx and intr/napi performance paths are grouped */ 6686 /* Fields used in the tx and intr/napi performance paths are grouped */
6668 /* together in the beginning of the structure. */ 6687 /* together in the beginning of the structure. */
@@ -6710,7 +6729,11 @@ struct bnx2 {
6710 6729
6711 /* End of fields used in the performance code paths. */ 6730 /* End of fields used in the performance code paths. */
6712 6731
6713 int current_interval; 6732 unsigned int current_interval;
6733#define BNX2_TIMER_INTERVAL HZ
6734#define BNX2_SERDES_AN_TIMEOUT (HZ / 3)
6735#define BNX2_SERDES_FORCED_TIMEOUT (HZ / 10)
6736
6714 struct timer_list timer; 6737 struct timer_list timer;
6715 struct work_struct reset_task; 6738 struct work_struct reset_task;
6716 6739
@@ -6825,9 +6848,6 @@ struct bnx2 {
6825 u8 flow_ctrl; /* actual flow ctrl settings */ 6848 u8 flow_ctrl; /* actual flow ctrl settings */
6826 /* may be different from */ 6849 /* may be different from */
6827 /* req_flow_ctrl if autoneg */ 6850 /* req_flow_ctrl if autoneg */
6828#define FLOW_CTRL_TX 1
6829#define FLOW_CTRL_RX 2
6830
6831 u32 advertising; 6851 u32 advertising;
6832 6852
6833 u8 req_flow_ctrl; /* flow ctrl advertisement */ 6853 u8 req_flow_ctrl; /* flow ctrl advertisement */
@@ -6842,8 +6862,6 @@ struct bnx2 {
6842#define PHY_LOOPBACK 2 6862#define PHY_LOOPBACK 2
6843 6863
6844 u8 serdes_an_pending; 6864 u8 serdes_an_pending;
6845#define SERDES_AN_TIMEOUT (HZ / 3)
6846#define SERDES_FORCED_TIMEOUT (HZ / 10)
6847 6865
6848 u8 mac_addr[8]; 6866 u8 mac_addr[8];
6849 6867
@@ -6854,8 +6872,6 @@ struct bnx2 {
6854 int pm_cap; 6872 int pm_cap;
6855 int pcix_cap; 6873 int pcix_cap;
6856 6874
6857 struct net_device_stats net_stats;
6858
6859 struct flash_spec *flash_info; 6875 struct flash_spec *flash_info;
6860 u32 flash_size; 6876 u32 flash_size;
6861 6877
@@ -6944,14 +6960,14 @@ struct fw_info {
6944/* This value (in milliseconds) determines the frequency of the driver 6960/* This value (in milliseconds) determines the frequency of the driver
6945 * issuing the PULSE message code. The firmware monitors this periodic 6961 * issuing the PULSE message code. The firmware monitors this periodic
6946 * pulse to determine when to switch to an OS-absent mode. */ 6962 * pulse to determine when to switch to an OS-absent mode. */
6947#define DRV_PULSE_PERIOD_MS 250 6963#define BNX2_DRV_PULSE_PERIOD_MS 250
6948 6964
6949/* This value (in milliseconds) determines how long the driver should 6965/* This value (in milliseconds) determines how long the driver should
6950 * wait for an acknowledgement from the firmware before timing out. Once 6966 * wait for an acknowledgement from the firmware before timing out. Once
6951 * the firmware has timed out, the driver will assume there is no firmware 6967 * the firmware has timed out, the driver will assume there is no firmware
6952 * running and there won't be any firmware-driver synchronization during a 6968 * running and there won't be any firmware-driver synchronization during a
6953 * driver reset. */ 6969 * driver reset. */
6954#define FW_ACK_TIME_OUT_MS 1000 6970#define BNX2_FW_ACK_TIME_OUT_MS 1000
6955 6971
6956 6972
6957#define BNX2_DRV_RESET_SIGNATURE 0x00000000 6973#define BNX2_DRV_RESET_SIGNATURE 0x00000000
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index 4ce7fe9c5251..67de94f1f30e 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -289,7 +289,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
289 /* pause enable/disable */ 289 /* pause enable/disable */
290 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 290 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
291 EMAC_RX_MODE_FLOW_EN); 291 EMAC_RX_MODE_FLOW_EN);
292 if (vars->flow_ctrl & FLOW_CTRL_RX) 292 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
293 bnx2x_bits_en(bp, emac_base + 293 bnx2x_bits_en(bp, emac_base +
294 EMAC_REG_EMAC_RX_MODE, 294 EMAC_REG_EMAC_RX_MODE,
295 EMAC_RX_MODE_FLOW_EN); 295 EMAC_RX_MODE_FLOW_EN);
@@ -297,7 +297,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
297 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 297 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
298 (EMAC_TX_MODE_EXT_PAUSE_EN | 298 (EMAC_TX_MODE_EXT_PAUSE_EN |
299 EMAC_TX_MODE_FLOW_EN)); 299 EMAC_TX_MODE_FLOW_EN));
300 if (vars->flow_ctrl & FLOW_CTRL_TX) 300 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
301 bnx2x_bits_en(bp, emac_base + 301 bnx2x_bits_en(bp, emac_base +
302 EMAC_REG_EMAC_TX_MODE, 302 EMAC_REG_EMAC_TX_MODE,
303 (EMAC_TX_MODE_EXT_PAUSE_EN | 303 (EMAC_TX_MODE_EXT_PAUSE_EN |
@@ -333,7 +333,7 @@ static u8 bnx2x_emac_enable(struct link_params *params,
333 /* enable the NIG in/out to the emac */ 333 /* enable the NIG in/out to the emac */
334 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); 334 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
335 val = 0; 335 val = 0;
336 if (vars->flow_ctrl & FLOW_CTRL_TX) 336 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
337 val = 1; 337 val = 1;
338 338
339 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); 339 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
@@ -396,7 +396,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
396 396
397 /* tx control */ 397 /* tx control */
398 val = 0xc0; 398 val = 0xc0;
399 if (vars->flow_ctrl & FLOW_CTRL_TX) 399 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
400 val |= 0x800000; 400 val |= 0x800000;
401 wb_data[0] = val; 401 wb_data[0] = val;
402 wb_data[1] = 0; 402 wb_data[1] = 0;
@@ -423,7 +423,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
423 423
424 /* rx control set to don't strip crc */ 424 /* rx control set to don't strip crc */
425 val = 0x14; 425 val = 0x14;
426 if (vars->flow_ctrl & FLOW_CTRL_RX) 426 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
427 val |= 0x20; 427 val |= 0x20;
428 wb_data[0] = val; 428 wb_data[0] = val;
429 wb_data[1] = 0; 429 wb_data[1] = 0;
@@ -460,7 +460,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars,
460 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0); 460 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
461 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0); 461 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
462 val = 0; 462 val = 0;
463 if (vars->flow_ctrl & FLOW_CTRL_TX) 463 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
464 val = 1; 464 val = 1;
465 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val); 465 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
466 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0); 466 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
@@ -580,14 +580,14 @@ void bnx2x_link_status_update(struct link_params *params,
580 } 580 }
581 581
582 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) 582 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
583 vars->flow_ctrl |= FLOW_CTRL_TX; 583 vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
584 else 584 else
585 vars->flow_ctrl &= ~FLOW_CTRL_TX; 585 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_TX;
586 586
587 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED) 587 if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
588 vars->flow_ctrl |= FLOW_CTRL_RX; 588 vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
589 else 589 else
590 vars->flow_ctrl &= ~FLOW_CTRL_RX; 590 vars->flow_ctrl &= ~BNX2X_FLOW_CTRL_RX;
591 591
592 if (vars->phy_flags & PHY_XGXS_FLAG) { 592 if (vars->phy_flags & PHY_XGXS_FLAG) {
593 if (vars->line_speed && 593 if (vars->line_speed &&
@@ -618,7 +618,7 @@ void bnx2x_link_status_update(struct link_params *params,
618 618
619 vars->line_speed = 0; 619 vars->line_speed = 0;
620 vars->duplex = DUPLEX_FULL; 620 vars->duplex = DUPLEX_FULL;
621 vars->flow_ctrl = FLOW_CTRL_NONE; 621 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
622 622
623 /* indicate no mac active */ 623 /* indicate no mac active */
624 vars->mac_type = MAC_TYPE_NONE; 624 vars->mac_type = MAC_TYPE_NONE;
@@ -691,7 +691,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
691 return -EINVAL; 691 return -EINVAL;
692 } 692 }
693 693
694 if (flow_ctrl & FLOW_CTRL_RX || 694 if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
695 line_speed == SPEED_10 || 695 line_speed == SPEED_10 ||
696 line_speed == SPEED_100 || 696 line_speed == SPEED_100 ||
697 line_speed == SPEED_1000 || 697 line_speed == SPEED_1000 ||
@@ -1300,8 +1300,8 @@ static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u32 *ieee_fc)
1300 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 1300 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
1301 1301
1302 switch (params->req_flow_ctrl) { 1302 switch (params->req_flow_ctrl) {
1303 case FLOW_CTRL_AUTO: 1303 case BNX2X_FLOW_CTRL_AUTO:
1304 if (params->req_fc_auto_adv == FLOW_CTRL_BOTH) { 1304 if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
1305 *ieee_fc |= 1305 *ieee_fc |=
1306 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 1306 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1307 } else { 1307 } else {
@@ -1309,17 +1309,17 @@ static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u32 *ieee_fc)
1309 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 1309 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1310 } 1310 }
1311 break; 1311 break;
1312 case FLOW_CTRL_TX: 1312 case BNX2X_FLOW_CTRL_TX:
1313 *ieee_fc |= 1313 *ieee_fc |=
1314 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; 1314 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
1315 break; 1315 break;
1316 1316
1317 case FLOW_CTRL_RX: 1317 case BNX2X_FLOW_CTRL_RX:
1318 case FLOW_CTRL_BOTH: 1318 case BNX2X_FLOW_CTRL_BOTH:
1319 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 1319 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
1320 break; 1320 break;
1321 1321
1322 case FLOW_CTRL_NONE: 1322 case BNX2X_FLOW_CTRL_NONE:
1323 default: 1323 default:
1324 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; 1324 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
1325 break; 1325 break;
@@ -1463,18 +1463,18 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
1463{ /* LD LP */ 1463{ /* LD LP */
1464 switch (pause_result) { /* ASYM P ASYM P */ 1464 switch (pause_result) { /* ASYM P ASYM P */
1465 case 0xb: /* 1 0 1 1 */ 1465 case 0xb: /* 1 0 1 1 */
1466 vars->flow_ctrl = FLOW_CTRL_TX; 1466 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
1467 break; 1467 break;
1468 1468
1469 case 0xe: /* 1 1 1 0 */ 1469 case 0xe: /* 1 1 1 0 */
1470 vars->flow_ctrl = FLOW_CTRL_RX; 1470 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
1471 break; 1471 break;
1472 1472
1473 case 0x5: /* 0 1 0 1 */ 1473 case 0x5: /* 0 1 0 1 */
1474 case 0x7: /* 0 1 1 1 */ 1474 case 0x7: /* 0 1 1 1 */
1475 case 0xd: /* 1 1 0 1 */ 1475 case 0xd: /* 1 1 0 1 */
1476 case 0xf: /* 1 1 1 1 */ 1476 case 0xf: /* 1 1 1 1 */
1477 vars->flow_ctrl = FLOW_CTRL_BOTH; 1477 vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
1478 break; 1478 break;
1479 1479
1480 default: 1480 default:
@@ -1531,7 +1531,7 @@ static u8 bnx2x_ext_phy_resove_fc(struct link_params *params,
1531 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n", 1531 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1532 pause_result); 1532 pause_result);
1533 bnx2x_pause_resolve(vars, pause_result); 1533 bnx2x_pause_resolve(vars, pause_result);
1534 if (vars->flow_ctrl == FLOW_CTRL_NONE && 1534 if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
1535 ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) { 1535 ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073) {
1536 bnx2x_cl45_read(bp, port, 1536 bnx2x_cl45_read(bp, port,
1537 ext_phy_type, 1537 ext_phy_type,
@@ -1567,10 +1567,10 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1567 u16 lp_pause; /* link partner */ 1567 u16 lp_pause; /* link partner */
1568 u16 pause_result; 1568 u16 pause_result;
1569 1569
1570 vars->flow_ctrl = FLOW_CTRL_NONE; 1570 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1571 1571
1572 /* resolve from gp_status in case of AN complete and not sgmii */ 1572 /* resolve from gp_status in case of AN complete and not sgmii */
1573 if ((params->req_flow_ctrl == FLOW_CTRL_AUTO) && 1573 if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1574 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) && 1574 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1575 (!(vars->phy_flags & PHY_SGMII_FLAG)) && 1575 (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
1576 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1576 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
@@ -1591,11 +1591,11 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1591 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 1591 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1592 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result); 1592 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1593 bnx2x_pause_resolve(vars, pause_result); 1593 bnx2x_pause_resolve(vars, pause_result);
1594 } else if ((params->req_flow_ctrl == FLOW_CTRL_AUTO) && 1594 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1595 (bnx2x_ext_phy_resove_fc(params, vars))) { 1595 (bnx2x_ext_phy_resove_fc(params, vars))) {
1596 return; 1596 return;
1597 } else { 1597 } else {
1598 if (params->req_flow_ctrl == FLOW_CTRL_AUTO) 1598 if (params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
1599 vars->flow_ctrl = params->req_fc_auto_adv; 1599 vars->flow_ctrl = params->req_fc_auto_adv;
1600 else 1600 else
1601 vars->flow_ctrl = params->req_flow_ctrl; 1601 vars->flow_ctrl = params->req_flow_ctrl;
@@ -1728,11 +1728,11 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1728 LINK_STATUS_PARALLEL_DETECTION_USED; 1728 LINK_STATUS_PARALLEL_DETECTION_USED;
1729 1729
1730 } 1730 }
1731 if (vars->flow_ctrl & FLOW_CTRL_TX) 1731 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1732 vars->link_status |= 1732 vars->link_status |=
1733 LINK_STATUS_TX_FLOW_CONTROL_ENABLED; 1733 LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1734 1734
1735 if (vars->flow_ctrl & FLOW_CTRL_RX) 1735 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1736 vars->link_status |= 1736 vars->link_status |=
1737 LINK_STATUS_RX_FLOW_CONTROL_ENABLED; 1737 LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1738 1738
@@ -1742,7 +1742,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1742 vars->phy_link_up = 0; 1742 vars->phy_link_up = 0;
1743 1743
1744 vars->duplex = DUPLEX_FULL; 1744 vars->duplex = DUPLEX_FULL;
1745 vars->flow_ctrl = FLOW_CTRL_NONE; 1745 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
1746 vars->autoneg = AUTO_NEG_DISABLED; 1746 vars->autoneg = AUTO_NEG_DISABLED;
1747 vars->mac_type = MAC_TYPE_NONE; 1747 vars->mac_type = MAC_TYPE_NONE;
1748 } 1748 }
@@ -3924,7 +3924,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
3924 vars->link_up = 0; 3924 vars->link_up = 0;
3925 vars->line_speed = 0; 3925 vars->line_speed = 0;
3926 vars->duplex = DUPLEX_FULL; 3926 vars->duplex = DUPLEX_FULL;
3927 vars->flow_ctrl = FLOW_CTRL_NONE; 3927 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
3928 vars->mac_type = MAC_TYPE_NONE; 3928 vars->mac_type = MAC_TYPE_NONE;
3929 3929
3930 if (params->switch_cfg == SWITCH_CFG_1G) 3930 if (params->switch_cfg == SWITCH_CFG_1G)
@@ -3946,12 +3946,12 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
3946 vars->link_up = 1; 3946 vars->link_up = 1;
3947 vars->line_speed = SPEED_10000; 3947 vars->line_speed = SPEED_10000;
3948 vars->duplex = DUPLEX_FULL; 3948 vars->duplex = DUPLEX_FULL;
3949 vars->flow_ctrl = FLOW_CTRL_NONE; 3949 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
3950 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD); 3950 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
3951 /* enable on E1.5 FPGA */ 3951 /* enable on E1.5 FPGA */
3952 if (CHIP_IS_E1H(bp)) { 3952 if (CHIP_IS_E1H(bp)) {
3953 vars->flow_ctrl |= 3953 vars->flow_ctrl |=
3954 (FLOW_CTRL_TX | FLOW_CTRL_RX); 3954 (BNX2X_FLOW_CTRL_TX | BNX2X_FLOW_CTRL_RX);
3955 vars->link_status |= 3955 vars->link_status |=
3956 (LINK_STATUS_TX_FLOW_CONTROL_ENABLED | 3956 (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
3957 LINK_STATUS_RX_FLOW_CONTROL_ENABLED); 3957 LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
@@ -3974,7 +3974,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
3974 vars->link_up = 1; 3974 vars->link_up = 1;
3975 vars->line_speed = SPEED_10000; 3975 vars->line_speed = SPEED_10000;
3976 vars->duplex = DUPLEX_FULL; 3976 vars->duplex = DUPLEX_FULL;
3977 vars->flow_ctrl = FLOW_CTRL_NONE; 3977 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
3978 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD); 3978 vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
3979 3979
3980 bnx2x_bmac_enable(params, vars, 0); 3980 bnx2x_bmac_enable(params, vars, 0);
@@ -3994,7 +3994,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
3994 vars->link_up = 1; 3994 vars->link_up = 1;
3995 vars->line_speed = SPEED_10000; 3995 vars->line_speed = SPEED_10000;
3996 vars->duplex = DUPLEX_FULL; 3996 vars->duplex = DUPLEX_FULL;
3997 vars->flow_ctrl = FLOW_CTRL_NONE; 3997 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
3998 vars->mac_type = MAC_TYPE_BMAC; 3998 vars->mac_type = MAC_TYPE_BMAC;
3999 3999
4000 vars->phy_flags = PHY_XGXS_FLAG; 4000 vars->phy_flags = PHY_XGXS_FLAG;
@@ -4009,7 +4009,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
4009 vars->link_up = 1; 4009 vars->link_up = 1;
4010 vars->line_speed = SPEED_1000; 4010 vars->line_speed = SPEED_1000;
4011 vars->duplex = DUPLEX_FULL; 4011 vars->duplex = DUPLEX_FULL;
4012 vars->flow_ctrl = FLOW_CTRL_NONE; 4012 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
4013 vars->mac_type = MAC_TYPE_EMAC; 4013 vars->mac_type = MAC_TYPE_EMAC;
4014 4014
4015 vars->phy_flags = PHY_XGXS_FLAG; 4015 vars->phy_flags = PHY_XGXS_FLAG;
@@ -4026,7 +4026,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
4026 vars->link_up = 1; 4026 vars->link_up = 1;
4027 vars->line_speed = SPEED_10000; 4027 vars->line_speed = SPEED_10000;
4028 vars->duplex = DUPLEX_FULL; 4028 vars->duplex = DUPLEX_FULL;
4029 vars->flow_ctrl = FLOW_CTRL_NONE; 4029 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
4030 4030
4031 vars->phy_flags = PHY_XGXS_FLAG; 4031 vars->phy_flags = PHY_XGXS_FLAG;
4032 4032
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index 86d54a17b411..47cb585f4278 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -26,11 +26,11 @@
26 26
27 27
28 28
29#define FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO 29#define BNX2X_FLOW_CTRL_AUTO PORT_FEATURE_FLOW_CONTROL_AUTO
30#define FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX 30#define BNX2X_FLOW_CTRL_TX PORT_FEATURE_FLOW_CONTROL_TX
31#define FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX 31#define BNX2X_FLOW_CTRL_RX PORT_FEATURE_FLOW_CONTROL_RX
32#define FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH 32#define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH
33#define FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE 33#define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE
34 34
35#define SPEED_AUTO_NEG 0 35#define SPEED_AUTO_NEG 0
36#define SPEED_12000 12000 36#define SPEED_12000 12000
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 600210d7eff9..ef8103b3523e 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1328,7 +1328,6 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1328 dev_kfree_skb(skb); 1328 dev_kfree_skb(skb);
1329 } 1329 }
1330 1330
1331 bp->dev->last_rx = jiffies;
1332 1331
1333 /* put new skb in bin */ 1332 /* put new skb in bin */
1334 fp->tpa_pool[queue].skb = new_skb; 1333 fp->tpa_pool[queue].skb = new_skb;
@@ -1557,7 +1556,6 @@ reuse_rx:
1557#endif 1556#endif
1558 netif_receive_skb(skb); 1557 netif_receive_skb(skb);
1559 1558
1560 bp->dev->last_rx = jiffies;
1561 1559
1562next_rx: 1560next_rx:
1563 rx_buf->skb = NULL; 1561 rx_buf->skb = NULL;
@@ -1594,7 +1592,6 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1594{ 1592{
1595 struct bnx2x_fastpath *fp = fp_cookie; 1593 struct bnx2x_fastpath *fp = fp_cookie;
1596 struct bnx2x *bp = fp->bp; 1594 struct bnx2x *bp = fp->bp;
1597 struct net_device *dev = bp->dev;
1598 int index = FP_IDX(fp); 1595 int index = FP_IDX(fp);
1599 1596
1600 /* Return here if interrupt is disabled */ 1597 /* Return here if interrupt is disabled */
@@ -1617,7 +1614,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1617 prefetch(&fp->status_blk->c_status_block.status_block_index); 1614 prefetch(&fp->status_blk->c_status_block.status_block_index);
1618 prefetch(&fp->status_blk->u_status_block.status_block_index); 1615 prefetch(&fp->status_blk->u_status_block.status_block_index);
1619 1616
1620 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi)); 1617 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
1621 1618
1622 return IRQ_HANDLED; 1619 return IRQ_HANDLED;
1623} 1620}
@@ -1656,7 +1653,7 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1656 prefetch(&fp->status_blk->c_status_block.status_block_index); 1653 prefetch(&fp->status_blk->c_status_block.status_block_index);
1657 prefetch(&fp->status_blk->u_status_block.status_block_index); 1654 prefetch(&fp->status_blk->u_status_block.status_block_index);
1658 1655
1659 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi)); 1656 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
1660 1657
1661 status &= ~mask; 1658 status &= ~mask;
1662 } 1659 }
@@ -1923,10 +1920,10 @@ static void bnx2x_link_report(struct bnx2x *bp)
1923 else 1920 else
1924 printk("half duplex"); 1921 printk("half duplex");
1925 1922
1926 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) { 1923 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1927 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) { 1924 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1928 printk(", receive "); 1925 printk(", receive ");
1929 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX) 1926 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1930 printk("& transmit "); 1927 printk("& transmit ");
1931 } else { 1928 } else {
1932 printk(", transmit "); 1929 printk(", transmit ");
@@ -1950,11 +1947,11 @@ static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1950 /* It is recommended to turn off RX FC for jumbo frames 1947 /* It is recommended to turn off RX FC for jumbo frames
1951 for better performance */ 1948 for better performance */
1952 if (IS_E1HMF(bp)) 1949 if (IS_E1HMF(bp))
1953 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH; 1950 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1954 else if (bp->dev->mtu > 5000) 1951 else if (bp->dev->mtu > 5000)
1955 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX; 1952 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1956 else 1953 else
1957 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH; 1954 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1958 1955
1959 bnx2x_acquire_phy_lock(bp); 1956 bnx2x_acquire_phy_lock(bp);
1960 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); 1957 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
@@ -7364,9 +7361,9 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7364 7361
7365 bp->link_params.req_flow_ctrl = (bp->port.link_config & 7362 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7366 PORT_FEATURE_FLOW_CONTROL_MASK); 7363 PORT_FEATURE_FLOW_CONTROL_MASK);
7367 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) && 7364 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7368 !(bp->port.supported & SUPPORTED_Autoneg)) 7365 !(bp->port.supported & SUPPORTED_Autoneg))
7369 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; 7366 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7370 7367
7371 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x" 7368 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7372 " advertising 0x%x\n", 7369 " advertising 0x%x\n",
@@ -8355,13 +8352,13 @@ static void bnx2x_get_pauseparam(struct net_device *dev,
8355{ 8352{
8356 struct bnx2x *bp = netdev_priv(dev); 8353 struct bnx2x *bp = netdev_priv(dev);
8357 8354
8358 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) && 8355 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8359 (bp->link_params.req_line_speed == SPEED_AUTO_NEG); 8356 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8360 8357
8361 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) == 8358 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8362 FLOW_CTRL_RX); 8359 BNX2X_FLOW_CTRL_RX);
8363 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) == 8360 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8364 FLOW_CTRL_TX); 8361 BNX2X_FLOW_CTRL_TX);
8365 8362
8366 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n" 8363 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8367 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", 8364 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
@@ -8380,16 +8377,16 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8380 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n", 8377 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8381 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause); 8378 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8382 8379
8383 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO; 8380 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8384 8381
8385 if (epause->rx_pause) 8382 if (epause->rx_pause)
8386 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX; 8383 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8387 8384
8388 if (epause->tx_pause) 8385 if (epause->tx_pause)
8389 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX; 8386 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8390 8387
8391 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) 8388 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8392 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE; 8389 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8393 8390
8394 if (epause->autoneg) { 8391 if (epause->autoneg) {
8395 if (!(bp->port.supported & SUPPORTED_Autoneg)) { 8392 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
@@ -8398,7 +8395,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
8398 } 8395 }
8399 8396
8400 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG) 8397 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8401 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO; 8398 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8402 } 8399 }
8403 8400
8404 DP(NETIF_MSG_LINK, 8401 DP(NETIF_MSG_LINK,
@@ -8769,7 +8766,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8769 rc = 0; 8766 rc = 0;
8770 8767
8771test_loopback_rx_exit: 8768test_loopback_rx_exit:
8772 bp->dev->last_rx = jiffies;
8773 8769
8774 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons); 8770 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8775 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod); 8771 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
@@ -9287,7 +9283,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
9287#ifdef BNX2X_STOP_ON_ERROR 9283#ifdef BNX2X_STOP_ON_ERROR
9288poll_panic: 9284poll_panic:
9289#endif 9285#endif
9290 netif_rx_complete(bp->dev, napi); 9286 netif_rx_complete(napi);
9291 9287
9292 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 9288 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9293 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); 9289 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
@@ -9853,11 +9849,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
9853 mclist && (i < dev->mc_count); 9849 mclist && (i < dev->mc_count);
9854 i++, mclist = mclist->next) { 9850 i++, mclist = mclist->next) {
9855 9851
9856 DP(NETIF_MSG_IFUP, "Adding mcast MAC: " 9852 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9857 "%02x:%02x:%02x:%02x:%02x:%02x\n", 9853 mclist->dmi_addr);
9858 mclist->dmi_addr[0], mclist->dmi_addr[1],
9859 mclist->dmi_addr[2], mclist->dmi_addr[3],
9860 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9861 9854
9862 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN); 9855 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9863 bit = (crc >> 24) & 0xff; 9856 bit = (crc >> 24) & 0xff;
@@ -10008,6 +10001,25 @@ static void poll_bnx2x(struct net_device *dev)
10008} 10001}
10009#endif 10002#endif
10010 10003
10004static const struct net_device_ops bnx2x_netdev_ops = {
10005 .ndo_open = bnx2x_open,
10006 .ndo_stop = bnx2x_close,
10007 .ndo_start_xmit = bnx2x_start_xmit,
10008 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10009 .ndo_set_mac_address = bnx2x_change_mac_addr,
10010 .ndo_validate_addr = eth_validate_addr,
10011 .ndo_do_ioctl = bnx2x_ioctl,
10012 .ndo_change_mtu = bnx2x_change_mtu,
10013 .ndo_tx_timeout = bnx2x_tx_timeout,
10014#ifdef BCM_VLAN
10015 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10016#endif
10017#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10018 .ndo_poll_controller = poll_bnx2x,
10019#endif
10020};
10021
10022
10011static int __devinit bnx2x_init_dev(struct pci_dev *pdev, 10023static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10012 struct net_device *dev) 10024 struct net_device *dev)
10013{ 10025{
@@ -10092,8 +10104,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10092 10104
10093 dev->irq = pdev->irq; 10105 dev->irq = pdev->irq;
10094 10106
10095 bp->regview = ioremap_nocache(dev->base_addr, 10107 bp->regview = pci_ioremap_bar(pdev, 0);
10096 pci_resource_len(pdev, 0));
10097 if (!bp->regview) { 10108 if (!bp->regview) {
10098 printk(KERN_ERR PFX "Cannot map register space, aborting\n"); 10109 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10099 rc = -ENOMEM; 10110 rc = -ENOMEM;
@@ -10119,23 +10130,10 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10119 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); 10130 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10120 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); 10131 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10121 10132
10122 dev->hard_start_xmit = bnx2x_start_xmit;
10123 dev->watchdog_timeo = TX_TIMEOUT; 10133 dev->watchdog_timeo = TX_TIMEOUT;
10124 10134
10135 dev->netdev_ops = &bnx2x_netdev_ops;
10125 dev->ethtool_ops = &bnx2x_ethtool_ops; 10136 dev->ethtool_ops = &bnx2x_ethtool_ops;
10126 dev->open = bnx2x_open;
10127 dev->stop = bnx2x_close;
10128 dev->set_multicast_list = bnx2x_set_rx_mode;
10129 dev->set_mac_address = bnx2x_change_mac_addr;
10130 dev->do_ioctl = bnx2x_ioctl;
10131 dev->change_mtu = bnx2x_change_mtu;
10132 dev->tx_timeout = bnx2x_tx_timeout;
10133#ifdef BCM_VLAN
10134 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10135#endif
10136#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10137 dev->poll_controller = poll_bnx2x;
10138#endif
10139 dev->features |= NETIF_F_SG; 10137 dev->features |= NETIF_F_SG;
10140 dev->features |= NETIF_F_HW_CSUM; 10138 dev->features |= NETIF_F_HW_CSUM;
10141 if (bp->flags & USING_DAC_FLAG) 10139 if (bp->flags & USING_DAC_FLAG)
@@ -10194,7 +10192,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10194 struct net_device *dev = NULL; 10192 struct net_device *dev = NULL;
10195 struct bnx2x *bp; 10193 struct bnx2x *bp;
10196 int rc; 10194 int rc;
10197 DECLARE_MAC_BUF(mac);
10198 10195
10199 if (version_printed++ == 0) 10196 if (version_printed++ == 0)
10200 printk(KERN_INFO "%s", version); 10197 printk(KERN_INFO "%s", version);
@@ -10238,7 +10235,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10238 bnx2x_get_pcie_width(bp), 10235 bnx2x_get_pcie_width(bp),
10239 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz", 10236 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10240 dev->base_addr, bp->pdev->irq); 10237 dev->base_addr, bp->pdev->irq);
10241 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr)); 10238 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10242 return 0; 10239 return 0;
10243 10240
10244init_one_exit: 10241init_one_exit:
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 5cdae2bc055a..6f9c6faef24c 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -6,3 +6,6 @@ obj-$(CONFIG_BONDING) += bonding.o
6 6
7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o 7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o
8 8
9ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
10bonding-objs += $(ipv6-y)
11
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 6106660a4a44..8c2e5ab51f08 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -20,13 +20,12 @@
20 * 20 *
21 */ 21 */
22 22
23//#define BONDING_DEBUG 1
24
25#include <linux/skbuff.h> 23#include <linux/skbuff.h>
26#include <linux/if_ether.h> 24#include <linux/if_ether.h>
27#include <linux/netdevice.h> 25#include <linux/netdevice.h>
28#include <linux/spinlock.h> 26#include <linux/spinlock.h>
29#include <linux/ethtool.h> 27#include <linux/ethtool.h>
28#include <linux/etherdevice.h>
30#include <linux/if_bonding.h> 29#include <linux/if_bonding.h>
31#include <linux/pkt_sched.h> 30#include <linux/pkt_sched.h>
32#include <net/net_namespace.h> 31#include <net/net_namespace.h>
@@ -96,33 +95,7 @@ static struct mac_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
96static u16 ad_ticks_per_sec; 95static u16 ad_ticks_per_sec;
97static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000; 96static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
98 97
99// ================= 3AD api to bonding and kernel code ================== 98static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
100static u16 __get_link_speed(struct port *port);
101static u8 __get_duplex(struct port *port);
102static inline void __initialize_port_locks(struct port *port);
103//conversions
104static u16 __ad_timer_to_ticks(u16 timer_type, u16 Par);
105
106
107// ================= ad code helper functions ==================
108//needed by ad_rx_machine(...)
109static void __record_pdu(struct lacpdu *lacpdu, struct port *port);
110static void __record_default(struct port *port);
111static void __update_selected(struct lacpdu *lacpdu, struct port *port);
112static void __update_default_selected(struct port *port);
113static void __choose_matched(struct lacpdu *lacpdu, struct port *port);
114static void __update_ntt(struct lacpdu *lacpdu, struct port *port);
115
116//needed for ad_mux_machine(..)
117static void __attach_bond_to_agg(struct port *port);
118static void __detach_bond_from_agg(struct port *port);
119static int __agg_ports_are_ready(struct aggregator *aggregator);
120static void __set_agg_ports_ready(struct aggregator *aggregator, int val);
121
122//needed for ad_agg_selection_logic(...)
123static u32 __get_agg_bandwidth(struct aggregator *aggregator);
124static struct aggregator *__get_active_agg(struct aggregator *aggregator);
125
126 99
127// ================= main 802.3ad protocol functions ================== 100// ================= main 802.3ad protocol functions ==================
128static int ad_lacpdu_send(struct port *port); 101static int ad_lacpdu_send(struct port *port);
@@ -136,7 +109,6 @@ static void ad_agg_selection_logic(struct aggregator *aggregator);
136static void ad_clear_agg(struct aggregator *aggregator); 109static void ad_clear_agg(struct aggregator *aggregator);
137static void ad_initialize_agg(struct aggregator *aggregator); 110static void ad_initialize_agg(struct aggregator *aggregator);
138static void ad_initialize_port(struct port *port, int lacp_fast); 111static void ad_initialize_port(struct port *port, int lacp_fast);
139static void ad_initialize_lacpdu(struct lacpdu *Lacpdu);
140static void ad_enable_collecting_distributing(struct port *port); 112static void ad_enable_collecting_distributing(struct port *port);
141static void ad_disable_collecting_distributing(struct port *port); 113static void ad_disable_collecting_distributing(struct port *port);
142static void ad_marker_info_received(struct bond_marker *marker_info, struct port *port); 114static void ad_marker_info_received(struct bond_marker *marker_info, struct port *port);
@@ -236,6 +208,17 @@ static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
236 return &(SLAVE_AD_INFO(slave->next).aggregator); 208 return &(SLAVE_AD_INFO(slave->next).aggregator);
237} 209}
238 210
211/*
212 * __agg_has_partner
213 *
214 * Return nonzero if aggregator has a partner (denoted by a non-zero ether
215 * address for the partner). Return 0 if not.
216 */
217static inline int __agg_has_partner(struct aggregator *agg)
218{
219 return !is_zero_ether_addr(agg->partner_system.mac_addr_value);
220}
221
239/** 222/**
240 * __disable_port - disable the port's slave 223 * __disable_port - disable the port's slave
241 * @port: the port we're looking at 224 * @port: the port we're looking at
@@ -274,14 +257,14 @@ static inline int __port_is_enabled(struct port *port)
274 * __get_agg_selection_mode - get the aggregator selection mode 257 * __get_agg_selection_mode - get the aggregator selection mode
275 * @port: the port we're looking at 258 * @port: the port we're looking at
276 * 259 *
277 * Get the aggregator selection mode. Can be %BANDWIDTH or %COUNT. 260 * Get the aggregator selection mode. Can be %STABLE, %BANDWIDTH or %COUNT.
278 */ 261 */
279static inline u32 __get_agg_selection_mode(struct port *port) 262static inline u32 __get_agg_selection_mode(struct port *port)
280{ 263{
281 struct bonding *bond = __get_bond_by_port(port); 264 struct bonding *bond = __get_bond_by_port(port);
282 265
283 if (bond == NULL) { 266 if (bond == NULL) {
284 return AD_BANDWIDTH; 267 return BOND_AD_STABLE;
285 } 268 }
286 269
287 return BOND_AD_INFO(bond).agg_select_mode; 270 return BOND_AD_INFO(bond).agg_select_mode;
@@ -369,7 +352,7 @@ static u16 __get_link_speed(struct port *port)
369 } 352 }
370 } 353 }
371 354
372 dprintk("Port %d Received link speed %d update from adapter\n", port->actor_port_number, speed); 355 pr_debug("Port %d Received link speed %d update from adapter\n", port->actor_port_number, speed);
373 return speed; 356 return speed;
374} 357}
375 358
@@ -395,12 +378,12 @@ static u8 __get_duplex(struct port *port)
395 switch (slave->duplex) { 378 switch (slave->duplex) {
396 case DUPLEX_FULL: 379 case DUPLEX_FULL:
397 retval=0x1; 380 retval=0x1;
398 dprintk("Port %d Received status full duplex update from adapter\n", port->actor_port_number); 381 pr_debug("Port %d Received status full duplex update from adapter\n", port->actor_port_number);
399 break; 382 break;
400 case DUPLEX_HALF: 383 case DUPLEX_HALF:
401 default: 384 default:
402 retval=0x0; 385 retval=0x0;
403 dprintk("Port %d Received status NOT full duplex update from adapter\n", port->actor_port_number); 386 pr_debug("Port %d Received status NOT full duplex update from adapter\n", port->actor_port_number);
404 break; 387 break;
405 } 388 }
406 } 389 }
@@ -473,33 +456,25 @@ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
473 */ 456 */
474static void __record_pdu(struct lacpdu *lacpdu, struct port *port) 457static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
475{ 458{
476 // validate lacpdu and port
477 if (lacpdu && port) { 459 if (lacpdu && port) {
460 struct port_params *partner = &port->partner_oper;
461
478 // record the new parameter values for the partner operational 462 // record the new parameter values for the partner operational
479 port->partner_oper_port_number = ntohs(lacpdu->actor_port); 463 partner->port_number = ntohs(lacpdu->actor_port);
480 port->partner_oper_port_priority = ntohs(lacpdu->actor_port_priority); 464 partner->port_priority = ntohs(lacpdu->actor_port_priority);
481 port->partner_oper_system = lacpdu->actor_system; 465 partner->system = lacpdu->actor_system;
482 port->partner_oper_system_priority = ntohs(lacpdu->actor_system_priority); 466 partner->system_priority = ntohs(lacpdu->actor_system_priority);
483 port->partner_oper_key = ntohs(lacpdu->actor_key); 467 partner->key = ntohs(lacpdu->actor_key);
484 // zero partener's lase states 468 partner->port_state = lacpdu->actor_state;
485 port->partner_oper_port_state = 0;
486 port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_LACP_ACTIVITY);
487 port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_LACP_TIMEOUT);
488 port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_AGGREGATION);
489 port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION);
490 port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_COLLECTING);
491 port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_DISTRIBUTING);
492 port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_DEFAULTED);
493 port->partner_oper_port_state |= (lacpdu->actor_state & AD_STATE_EXPIRED);
494 469
495 // set actor_oper_port_state.defaulted to FALSE 470 // set actor_oper_port_state.defaulted to FALSE
496 port->actor_oper_port_state &= ~AD_STATE_DEFAULTED; 471 port->actor_oper_port_state &= ~AD_STATE_DEFAULTED;
497 472
498 // set the partner sync. to on if the partner is sync. and the port is matched 473 // set the partner sync. to on if the partner is sync. and the port is matched
499 if ((port->sm_vars & AD_PORT_MATCHED) && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) { 474 if ((port->sm_vars & AD_PORT_MATCHED) && (lacpdu->actor_state & AD_STATE_SYNCHRONIZATION)) {
500 port->partner_oper_port_state |= AD_STATE_SYNCHRONIZATION; 475 partner->port_state |= AD_STATE_SYNCHRONIZATION;
501 } else { 476 } else {
502 port->partner_oper_port_state &= ~AD_STATE_SYNCHRONIZATION; 477 partner->port_state &= ~AD_STATE_SYNCHRONIZATION;
503 } 478 }
504 } 479 }
505} 480}
@@ -514,15 +489,10 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
514 */ 489 */
515static void __record_default(struct port *port) 490static void __record_default(struct port *port)
516{ 491{
517 // validate the port
518 if (port) { 492 if (port) {
519 // record the partner admin parameters 493 // record the partner admin parameters
520 port->partner_oper_port_number = port->partner_admin_port_number; 494 memcpy(&port->partner_oper, &port->partner_admin,
521 port->partner_oper_port_priority = port->partner_admin_port_priority; 495 sizeof(struct port_params));
522 port->partner_oper_system = port->partner_admin_system;
523 port->partner_oper_system_priority = port->partner_admin_system_priority;
524 port->partner_oper_key = port->partner_admin_key;
525 port->partner_oper_port_state = port->partner_admin_port_state;
526 496
527 // set actor_oper_port_state.defaulted to true 497 // set actor_oper_port_state.defaulted to true
528 port->actor_oper_port_state |= AD_STATE_DEFAULTED; 498 port->actor_oper_port_state |= AD_STATE_DEFAULTED;
@@ -544,16 +514,16 @@ static void __record_default(struct port *port)
544 */ 514 */
545static void __update_selected(struct lacpdu *lacpdu, struct port *port) 515static void __update_selected(struct lacpdu *lacpdu, struct port *port)
546{ 516{
547 // validate lacpdu and port
548 if (lacpdu && port) { 517 if (lacpdu && port) {
518 const struct port_params *partner = &port->partner_oper;
519
549 // check if any parameter is different 520 // check if any parameter is different
550 if ((ntohs(lacpdu->actor_port) != port->partner_oper_port_number) || 521 if (ntohs(lacpdu->actor_port) != partner->port_number
551 (ntohs(lacpdu->actor_port_priority) != port->partner_oper_port_priority) || 522 || ntohs(lacpdu->actor_port_priority) != partner->port_priority
552 MAC_ADDRESS_COMPARE(&(lacpdu->actor_system), &(port->partner_oper_system)) || 523 || MAC_ADDRESS_COMPARE(&lacpdu->actor_system, &partner->system)
553 (ntohs(lacpdu->actor_system_priority) != port->partner_oper_system_priority) || 524 || ntohs(lacpdu->actor_system_priority) != partner->system_priority
554 (ntohs(lacpdu->actor_key) != port->partner_oper_key) || 525 || ntohs(lacpdu->actor_key) != partner->key
555 ((lacpdu->actor_state & AD_STATE_AGGREGATION) != (port->partner_oper_port_state & AD_STATE_AGGREGATION)) 526 || (lacpdu->actor_state & AD_STATE_AGGREGATION) != (partner->port_state & AD_STATE_AGGREGATION)) {
556 ) {
557 // update the state machine Selected variable 527 // update the state machine Selected variable
558 port->sm_vars &= ~AD_PORT_SELECTED; 528 port->sm_vars &= ~AD_PORT_SELECTED;
559 } 529 }
@@ -574,16 +544,18 @@ static void __update_selected(struct lacpdu *lacpdu, struct port *port)
574 */ 544 */
575static void __update_default_selected(struct port *port) 545static void __update_default_selected(struct port *port)
576{ 546{
577 // validate the port
578 if (port) { 547 if (port) {
548 const struct port_params *admin = &port->partner_admin;
549 const struct port_params *oper = &port->partner_oper;
550
579 // check if any parameter is different 551 // check if any parameter is different
580 if ((port->partner_admin_port_number != port->partner_oper_port_number) || 552 if (admin->port_number != oper->port_number
581 (port->partner_admin_port_priority != port->partner_oper_port_priority) || 553 || admin->port_priority != oper->port_priority
582 MAC_ADDRESS_COMPARE(&(port->partner_admin_system), &(port->partner_oper_system)) || 554 || MAC_ADDRESS_COMPARE(&admin->system, &oper->system)
583 (port->partner_admin_system_priority != port->partner_oper_system_priority) || 555 || admin->system_priority != oper->system_priority
584 (port->partner_admin_key != port->partner_oper_key) || 556 || admin->key != oper->key
585 ((port->partner_admin_port_state & AD_STATE_AGGREGATION) != (port->partner_oper_port_state & AD_STATE_AGGREGATION)) 557 || (admin->port_state & AD_STATE_AGGREGATION)
586 ) { 558 != (oper->port_state & AD_STATE_AGGREGATION)) {
587 // update the state machine Selected variable 559 // update the state machine Selected variable
588 port->sm_vars &= ~AD_PORT_SELECTED; 560 port->sm_vars &= ~AD_PORT_SELECTED;
589 } 561 }
@@ -658,8 +630,8 @@ static void __update_ntt(struct lacpdu *lacpdu, struct port *port)
658 ((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) || 630 ((lacpdu->partner_state & AD_STATE_SYNCHRONIZATION) != (port->actor_oper_port_state & AD_STATE_SYNCHRONIZATION)) ||
659 ((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION)) 631 ((lacpdu->partner_state & AD_STATE_AGGREGATION) != (port->actor_oper_port_state & AD_STATE_AGGREGATION))
660 ) { 632 ) {
661 // set ntt to be TRUE 633
662 port->ntt = 1; 634 port->ntt = true;
663 } 635 }
664 } 636 }
665} 637}
@@ -798,6 +770,7 @@ static struct aggregator *__get_active_agg(struct aggregator *aggregator)
798static inline void __update_lacpdu_from_port(struct port *port) 770static inline void __update_lacpdu_from_port(struct port *port)
799{ 771{
800 struct lacpdu *lacpdu = &port->lacpdu; 772 struct lacpdu *lacpdu = &port->lacpdu;
773 const struct port_params *partner = &port->partner_oper;
801 774
802 /* update current actual Actor parameters */ 775 /* update current actual Actor parameters */
803 /* lacpdu->subtype initialized 776 /* lacpdu->subtype initialized
@@ -818,12 +791,12 @@ static inline void __update_lacpdu_from_port(struct port *port)
818 * lacpdu->partner_information_length initialized 791 * lacpdu->partner_information_length initialized
819 */ 792 */
820 793
821 lacpdu->partner_system_priority = htons(port->partner_oper_system_priority); 794 lacpdu->partner_system_priority = htons(partner->system_priority);
822 lacpdu->partner_system = port->partner_oper_system; 795 lacpdu->partner_system = partner->system;
823 lacpdu->partner_key = htons(port->partner_oper_key); 796 lacpdu->partner_key = htons(partner->key);
824 lacpdu->partner_port_priority = htons(port->partner_oper_port_priority); 797 lacpdu->partner_port_priority = htons(partner->port_priority);
825 lacpdu->partner_port = htons(port->partner_oper_port_number); 798 lacpdu->partner_port = htons(partner->port_number);
826 lacpdu->partner_state = port->partner_oper_port_state; 799 lacpdu->partner_state = partner->port_state;
827 800
828 /* lacpdu->reserved_3_2 initialized 801 /* lacpdu->reserved_3_2 initialized
829 * lacpdu->tlv_type_collector_info initialized 802 * lacpdu->tlv_type_collector_info initialized
@@ -853,7 +826,6 @@ static int ad_lacpdu_send(struct port *port)
853 struct sk_buff *skb; 826 struct sk_buff *skb;
854 struct lacpdu_header *lacpdu_header; 827 struct lacpdu_header *lacpdu_header;
855 int length = sizeof(struct lacpdu_header); 828 int length = sizeof(struct lacpdu_header);
856 struct mac_addr lacpdu_multicast_address = AD_MULTICAST_LACPDU_ADDR;
857 829
858 skb = dev_alloc_skb(length); 830 skb = dev_alloc_skb(length);
859 if (!skb) { 831 if (!skb) {
@@ -868,11 +840,11 @@ static int ad_lacpdu_send(struct port *port)
868 840
869 lacpdu_header = (struct lacpdu_header *)skb_put(skb, length); 841 lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
870 842
871 lacpdu_header->ad_header.destination_address = lacpdu_multicast_address; 843 memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
872 /* Note: source addres is set to be the member's PERMANENT address, because we use it 844 /* Note: source addres is set to be the member's PERMANENT address,
873 to identify loopback lacpdus in receive. */ 845 because we use it to identify loopback lacpdus in receive. */
874 lacpdu_header->ad_header.source_address = *((struct mac_addr *)(slave->perm_hwaddr)); 846 memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
875 lacpdu_header->ad_header.length_type = PKT_TYPE_LACPDU; 847 lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
876 848
877 lacpdu_header->lacpdu = port->lacpdu; // struct copy 849 lacpdu_header->lacpdu = port->lacpdu; // struct copy
878 850
@@ -895,7 +867,6 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
895 struct sk_buff *skb; 867 struct sk_buff *skb;
896 struct bond_marker_header *marker_header; 868 struct bond_marker_header *marker_header;
897 int length = sizeof(struct bond_marker_header); 869 int length = sizeof(struct bond_marker_header);
898 struct mac_addr lacpdu_multicast_address = AD_MULTICAST_LACPDU_ADDR;
899 870
900 skb = dev_alloc_skb(length + 16); 871 skb = dev_alloc_skb(length + 16);
901 if (!skb) { 872 if (!skb) {
@@ -911,11 +882,11 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
911 882
912 marker_header = (struct bond_marker_header *)skb_put(skb, length); 883 marker_header = (struct bond_marker_header *)skb_put(skb, length);
913 884
914 marker_header->ad_header.destination_address = lacpdu_multicast_address; 885 memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
915 /* Note: source addres is set to be the member's PERMANENT address, because we use it 886 /* Note: source addres is set to be the member's PERMANENT address,
916 to identify loopback MARKERs in receive. */ 887 because we use it to identify loopback MARKERs in receive. */
917 marker_header->ad_header.source_address = *((struct mac_addr *)(slave->perm_hwaddr)); 888 memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
918 marker_header->ad_header.length_type = PKT_TYPE_LACPDU; 889 marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
919 890
920 marker_header->marker = *marker; // struct copy 891 marker_header->marker = *marker; // struct copy
921 892
@@ -972,7 +943,7 @@ static void ad_mux_machine(struct port *port)
972 break; 943 break;
973 case AD_MUX_ATTACHED: 944 case AD_MUX_ATTACHED:
974 // check also if agg_select_timer expired(so the edable port will take place only after this timer) 945 // check also if agg_select_timer expired(so the edable port will take place only after this timer)
975 if ((port->sm_vars & AD_PORT_SELECTED) && (port->partner_oper_port_state & AD_STATE_SYNCHRONIZATION) && !__check_agg_selection_timer(port)) { 946 if ((port->sm_vars & AD_PORT_SELECTED) && (port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION) && !__check_agg_selection_timer(port)) {
976 port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;// next state 947 port->sm_mux_state = AD_MUX_COLLECTING_DISTRIBUTING;// next state
977 } else if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) { // if UNSELECTED or STANDBY 948 } else if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY)) { // if UNSELECTED or STANDBY
978 port->sm_vars &= ~AD_PORT_READY_N; 949 port->sm_vars &= ~AD_PORT_READY_N;
@@ -984,7 +955,7 @@ static void ad_mux_machine(struct port *port)
984 break; 955 break;
985 case AD_MUX_COLLECTING_DISTRIBUTING: 956 case AD_MUX_COLLECTING_DISTRIBUTING:
986 if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY) || 957 if (!(port->sm_vars & AD_PORT_SELECTED) || (port->sm_vars & AD_PORT_STANDBY) ||
987 !(port->partner_oper_port_state & AD_STATE_SYNCHRONIZATION) 958 !(port->partner_oper.port_state & AD_STATE_SYNCHRONIZATION)
988 ) { 959 ) {
989 port->sm_mux_state = AD_MUX_ATTACHED;// next state 960 port->sm_mux_state = AD_MUX_ATTACHED;// next state
990 961
@@ -1007,7 +978,7 @@ static void ad_mux_machine(struct port *port)
1007 978
1008 // check if the state machine was changed 979 // check if the state machine was changed
1009 if (port->sm_mux_state != last_state) { 980 if (port->sm_mux_state != last_state) {
1010 dprintk("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_mux_state); 981 pr_debug("Mux Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_mux_state);
1011 switch (port->sm_mux_state) { 982 switch (port->sm_mux_state) {
1012 case AD_MUX_DETACHED: 983 case AD_MUX_DETACHED:
1013 __detach_bond_from_agg(port); 984 __detach_bond_from_agg(port);
@@ -1015,7 +986,7 @@ static void ad_mux_machine(struct port *port)
1015 ad_disable_collecting_distributing(port); 986 ad_disable_collecting_distributing(port);
1016 port->actor_oper_port_state &= ~AD_STATE_COLLECTING; 987 port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
1017 port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING; 988 port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
1018 port->ntt = 1; 989 port->ntt = true;
1019 break; 990 break;
1020 case AD_MUX_WAITING: 991 case AD_MUX_WAITING:
1021 port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0); 992 port->sm_mux_timer_counter = __ad_timer_to_ticks(AD_WAIT_WHILE_TIMER, 0);
@@ -1026,13 +997,13 @@ static void ad_mux_machine(struct port *port)
1026 port->actor_oper_port_state &= ~AD_STATE_COLLECTING; 997 port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
1027 port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING; 998 port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
1028 ad_disable_collecting_distributing(port); 999 ad_disable_collecting_distributing(port);
1029 port->ntt = 1; 1000 port->ntt = true;
1030 break; 1001 break;
1031 case AD_MUX_COLLECTING_DISTRIBUTING: 1002 case AD_MUX_COLLECTING_DISTRIBUTING:
1032 port->actor_oper_port_state |= AD_STATE_COLLECTING; 1003 port->actor_oper_port_state |= AD_STATE_COLLECTING;
1033 port->actor_oper_port_state |= AD_STATE_DISTRIBUTING; 1004 port->actor_oper_port_state |= AD_STATE_DISTRIBUTING;
1034 ad_enable_collecting_distributing(port); 1005 ad_enable_collecting_distributing(port);
1035 port->ntt = 1; 1006 port->ntt = true;
1036 break; 1007 break;
1037 default: //to silence the compiler 1008 default: //to silence the compiler
1038 break; 1009 break;
@@ -1106,7 +1077,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1106 1077
1107 // check if the State machine was changed or new lacpdu arrived 1078 // check if the State machine was changed or new lacpdu arrived
1108 if ((port->sm_rx_state != last_state) || (lacpdu)) { 1079 if ((port->sm_rx_state != last_state) || (lacpdu)) {
1109 dprintk("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_rx_state); 1080 pr_debug("Rx Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_rx_state);
1110 switch (port->sm_rx_state) { 1081 switch (port->sm_rx_state) {
1111 case AD_RX_INITIALIZE: 1082 case AD_RX_INITIALIZE:
1112 if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) { 1083 if (!(port->actor_oper_port_key & AD_DUPLEX_KEY_BITS)) {
@@ -1128,7 +1099,7 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1128 case AD_RX_LACP_DISABLED: 1099 case AD_RX_LACP_DISABLED:
1129 port->sm_vars &= ~AD_PORT_SELECTED; 1100 port->sm_vars &= ~AD_PORT_SELECTED;
1130 __record_default(port); 1101 __record_default(port);
1131 port->partner_oper_port_state &= ~AD_STATE_AGGREGATION; 1102 port->partner_oper.port_state &= ~AD_STATE_AGGREGATION;
1132 port->sm_vars |= AD_PORT_MATCHED; 1103 port->sm_vars |= AD_PORT_MATCHED;
1133 port->actor_oper_port_state &= ~AD_STATE_EXPIRED; 1104 port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
1134 break; 1105 break;
@@ -1136,9 +1107,9 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1136 //Reset of the Synchronization flag. (Standard 43.4.12) 1107 //Reset of the Synchronization flag. (Standard 43.4.12)
1137 //This reset cause to disable this port in the COLLECTING_DISTRIBUTING state of the 1108 //This reset cause to disable this port in the COLLECTING_DISTRIBUTING state of the
1138 //mux machine in case of EXPIRED even if LINK_DOWN didn't arrive for the port. 1109 //mux machine in case of EXPIRED even if LINK_DOWN didn't arrive for the port.
1139 port->partner_oper_port_state &= ~AD_STATE_SYNCHRONIZATION; 1110 port->partner_oper.port_state &= ~AD_STATE_SYNCHRONIZATION;
1140 port->sm_vars &= ~AD_PORT_MATCHED; 1111 port->sm_vars &= ~AD_PORT_MATCHED;
1141 port->partner_oper_port_state |= AD_SHORT_TIMEOUT; 1112 port->partner_oper.port_state |= AD_SHORT_TIMEOUT;
1142 port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT)); 1113 port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
1143 port->actor_oper_port_state |= AD_STATE_EXPIRED; 1114 port->actor_oper_port_state |= AD_STATE_EXPIRED;
1144 break; 1115 break;
@@ -1191,11 +1162,13 @@ static void ad_tx_machine(struct port *port)
1191 // check if there is something to send 1162 // check if there is something to send
1192 if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) { 1163 if (port->ntt && (port->sm_vars & AD_PORT_LACP_ENABLED)) {
1193 __update_lacpdu_from_port(port); 1164 __update_lacpdu_from_port(port);
1194 // send the lacpdu 1165
1195 if (ad_lacpdu_send(port) >= 0) { 1166 if (ad_lacpdu_send(port) >= 0) {
1196 dprintk("Sent LACPDU on port %d\n", port->actor_port_number); 1167 pr_debug("Sent LACPDU on port %d\n", port->actor_port_number);
1197 // mark ntt as false, so it will not be sent again until demanded 1168
1198 port->ntt = 0; 1169 /* mark ntt as false, so it will not be sent again until
1170 demanded */
1171 port->ntt = false;
1199 } 1172 }
1200 } 1173 }
1201 // restart tx timer(to verify that we will not exceed AD_MAX_TX_IN_SECOND 1174 // restart tx timer(to verify that we will not exceed AD_MAX_TX_IN_SECOND
@@ -1218,7 +1191,7 @@ static void ad_periodic_machine(struct port *port)
1218 1191
1219 // check if port was reinitialized 1192 // check if port was reinitialized
1220 if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) || 1193 if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
1221 (!(port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY) && !(port->partner_oper_port_state & AD_STATE_LACP_ACTIVITY)) 1194 (!(port->actor_oper_port_state & AD_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & AD_STATE_LACP_ACTIVITY))
1222 ) { 1195 ) {
1223 port->sm_periodic_state = AD_NO_PERIODIC; // next state 1196 port->sm_periodic_state = AD_NO_PERIODIC; // next state
1224 } 1197 }
@@ -1232,12 +1205,12 @@ static void ad_periodic_machine(struct port *port)
1232 // If not expired, check if there is some new timeout parameter from the partner state 1205 // If not expired, check if there is some new timeout parameter from the partner state
1233 switch (port->sm_periodic_state) { 1206 switch (port->sm_periodic_state) {
1234 case AD_FAST_PERIODIC: 1207 case AD_FAST_PERIODIC:
1235 if (!(port->partner_oper_port_state & AD_STATE_LACP_TIMEOUT)) { 1208 if (!(port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) {
1236 port->sm_periodic_state = AD_SLOW_PERIODIC; // next state 1209 port->sm_periodic_state = AD_SLOW_PERIODIC; // next state
1237 } 1210 }
1238 break; 1211 break;
1239 case AD_SLOW_PERIODIC: 1212 case AD_SLOW_PERIODIC:
1240 if ((port->partner_oper_port_state & AD_STATE_LACP_TIMEOUT)) { 1213 if ((port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) {
1241 // stop current timer 1214 // stop current timer
1242 port->sm_periodic_timer_counter = 0; 1215 port->sm_periodic_timer_counter = 0;
1243 port->sm_periodic_state = AD_PERIODIC_TX; // next state 1216 port->sm_periodic_state = AD_PERIODIC_TX; // next state
@@ -1253,7 +1226,7 @@ static void ad_periodic_machine(struct port *port)
1253 port->sm_periodic_state = AD_FAST_PERIODIC; // next state 1226 port->sm_periodic_state = AD_FAST_PERIODIC; // next state
1254 break; 1227 break;
1255 case AD_PERIODIC_TX: 1228 case AD_PERIODIC_TX:
1256 if (!(port->partner_oper_port_state & AD_STATE_LACP_TIMEOUT)) { 1229 if (!(port->partner_oper.port_state & AD_STATE_LACP_TIMEOUT)) {
1257 port->sm_periodic_state = AD_SLOW_PERIODIC; // next state 1230 port->sm_periodic_state = AD_SLOW_PERIODIC; // next state
1258 } else { 1231 } else {
1259 port->sm_periodic_state = AD_FAST_PERIODIC; // next state 1232 port->sm_periodic_state = AD_FAST_PERIODIC; // next state
@@ -1266,7 +1239,7 @@ static void ad_periodic_machine(struct port *port)
1266 1239
1267 // check if the state machine was changed 1240 // check if the state machine was changed
1268 if (port->sm_periodic_state != last_state) { 1241 if (port->sm_periodic_state != last_state) {
1269 dprintk("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_periodic_state); 1242 pr_debug("Periodic Machine: Port=%d, Last State=%d, Curr State=%d\n", port->actor_port_number, last_state, port->sm_periodic_state);
1270 switch (port->sm_periodic_state) { 1243 switch (port->sm_periodic_state) {
1271 case AD_NO_PERIODIC: 1244 case AD_NO_PERIODIC:
1272 port->sm_periodic_timer_counter = 0; // zero timer 1245 port->sm_periodic_timer_counter = 0; // zero timer
@@ -1278,7 +1251,7 @@ static void ad_periodic_machine(struct port *port)
1278 port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle 1251 port->sm_periodic_timer_counter = __ad_timer_to_ticks(AD_PERIODIC_TIMER, (u16)(AD_SLOW_PERIODIC_TIME))-1; // decrement 1 tick we lost in the PERIODIC_TX cycle
1279 break; 1252 break;
1280 case AD_PERIODIC_TX: 1253 case AD_PERIODIC_TX:
1281 port->ntt = 1; 1254 port->ntt = true;
1282 break; 1255 break;
1283 default: //to silence the compiler 1256 default: //to silence the compiler
1284 break; 1257 break;
@@ -1323,7 +1296,7 @@ static void ad_port_selection_logic(struct port *port)
1323 port->next_port_in_aggregator=NULL; 1296 port->next_port_in_aggregator=NULL;
1324 port->actor_port_aggregator_identifier=0; 1297 port->actor_port_aggregator_identifier=0;
1325 1298
1326 dprintk("Port %d left LAG %d\n", port->actor_port_number, temp_aggregator->aggregator_identifier); 1299 pr_debug("Port %d left LAG %d\n", port->actor_port_number, temp_aggregator->aggregator_identifier);
1327 // if the aggregator is empty, clear its parameters, and set it ready to be attached 1300 // if the aggregator is empty, clear its parameters, and set it ready to be attached
1328 if (!temp_aggregator->lag_ports) { 1301 if (!temp_aggregator->lag_ports) {
1329 ad_clear_agg(temp_aggregator); 1302 ad_clear_agg(temp_aggregator);
@@ -1352,11 +1325,11 @@ static void ad_port_selection_logic(struct port *port)
1352 } 1325 }
1353 // check if current aggregator suits us 1326 // check if current aggregator suits us
1354 if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && // if all parameters match AND 1327 if (((aggregator->actor_oper_aggregator_key == port->actor_oper_port_key) && // if all parameters match AND
1355 !MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(port->partner_oper_system)) && 1328 !MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(port->partner_oper.system)) &&
1356 (aggregator->partner_system_priority == port->partner_oper_system_priority) && 1329 (aggregator->partner_system_priority == port->partner_oper.system_priority) &&
1357 (aggregator->partner_oper_aggregator_key == port->partner_oper_key) 1330 (aggregator->partner_oper_aggregator_key == port->partner_oper.key)
1358 ) && 1331 ) &&
1359 ((MAC_ADDRESS_COMPARE(&(port->partner_oper_system), &(null_mac_addr)) && // partner answers 1332 ((MAC_ADDRESS_COMPARE(&(port->partner_oper.system), &(null_mac_addr)) && // partner answers
1360 !aggregator->is_individual) // but is not individual OR 1333 !aggregator->is_individual) // but is not individual OR
1361 ) 1334 )
1362 ) { 1335 ) {
@@ -1366,7 +1339,7 @@ static void ad_port_selection_logic(struct port *port)
1366 port->next_port_in_aggregator=aggregator->lag_ports; 1339 port->next_port_in_aggregator=aggregator->lag_ports;
1367 port->aggregator->num_of_ports++; 1340 port->aggregator->num_of_ports++;
1368 aggregator->lag_ports=port; 1341 aggregator->lag_ports=port;
1369 dprintk("Port %d joined LAG %d(existing LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier); 1342 pr_debug("Port %d joined LAG %d(existing LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
1370 1343
1371 // mark this port as selected 1344 // mark this port as selected
1372 port->sm_vars |= AD_PORT_SELECTED; 1345 port->sm_vars |= AD_PORT_SELECTED;
@@ -1385,16 +1358,16 @@ static void ad_port_selection_logic(struct port *port)
1385 // update the new aggregator's parameters 1358 // update the new aggregator's parameters
1386 // if port was responsed from the end-user 1359 // if port was responsed from the end-user
1387 if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS) {// if port is full duplex 1360 if (port->actor_oper_port_key & AD_DUPLEX_KEY_BITS) {// if port is full duplex
1388 port->aggregator->is_individual = 0; 1361 port->aggregator->is_individual = false;
1389 } else { 1362 } else {
1390 port->aggregator->is_individual = 1; 1363 port->aggregator->is_individual = true;
1391 } 1364 }
1392 1365
1393 port->aggregator->actor_admin_aggregator_key = port->actor_admin_port_key; 1366 port->aggregator->actor_admin_aggregator_key = port->actor_admin_port_key;
1394 port->aggregator->actor_oper_aggregator_key = port->actor_oper_port_key; 1367 port->aggregator->actor_oper_aggregator_key = port->actor_oper_port_key;
1395 port->aggregator->partner_system=port->partner_oper_system; 1368 port->aggregator->partner_system=port->partner_oper.system;
1396 port->aggregator->partner_system_priority = port->partner_oper_system_priority; 1369 port->aggregator->partner_system_priority = port->partner_oper.system_priority;
1397 port->aggregator->partner_oper_aggregator_key = port->partner_oper_key; 1370 port->aggregator->partner_oper_aggregator_key = port->partner_oper.key;
1398 port->aggregator->receive_state = 1; 1371 port->aggregator->receive_state = 1;
1399 port->aggregator->transmit_state = 1; 1372 port->aggregator->transmit_state = 1;
1400 port->aggregator->lag_ports = port; 1373 port->aggregator->lag_ports = port;
@@ -1403,7 +1376,7 @@ static void ad_port_selection_logic(struct port *port)
1403 // mark this port as selected 1376 // mark this port as selected
1404 port->sm_vars |= AD_PORT_SELECTED; 1377 port->sm_vars |= AD_PORT_SELECTED;
1405 1378
1406 dprintk("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier); 1379 pr_debug("Port %d joined LAG %d(new LAG)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
1407 } else { 1380 } else {
1408 printk(KERN_ERR DRV_NAME ": %s: Port %d (on %s) did not find a suitable aggregator\n", 1381 printk(KERN_ERR DRV_NAME ": %s: Port %d (on %s) did not find a suitable aggregator\n",
1409 port->slave->dev->master->name, 1382 port->slave->dev->master->name,
@@ -1414,9 +1387,82 @@ static void ad_port_selection_logic(struct port *port)
1414 // else set ready=FALSE in all aggregator's ports 1387 // else set ready=FALSE in all aggregator's ports
1415 __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator)); 1388 __set_agg_ports_ready(port->aggregator, __agg_ports_are_ready(port->aggregator));
1416 1389
1417 if (!__check_agg_selection_timer(port) && (aggregator = __get_first_agg(port))) { 1390 aggregator = __get_first_agg(port);
1418 ad_agg_selection_logic(aggregator); 1391 ad_agg_selection_logic(aggregator);
1392}
1393
1394/*
1395 * Decide if "agg" is a better choice for the new active aggregator that
1396 * the current best, according to the ad_select policy.
1397 */
1398static struct aggregator *ad_agg_selection_test(struct aggregator *best,
1399 struct aggregator *curr)
1400{
1401 /*
1402 * 0. If no best, select current.
1403 *
1404 * 1. If the current agg is not individual, and the best is
1405 * individual, select current.
1406 *
1407 * 2. If current agg is individual and the best is not, keep best.
1408 *
1409 * 3. Therefore, current and best are both individual or both not
1410 * individual, so:
1411 *
1412 * 3a. If current agg partner replied, and best agg partner did not,
1413 * select current.
1414 *
1415 * 3b. If current agg partner did not reply and best agg partner
1416 * did reply, keep best.
1417 *
1418 * 4. Therefore, current and best both have partner replies or
1419 * both do not, so perform selection policy:
1420 *
1421 * BOND_AD_COUNT: Select by count of ports. If count is equal,
1422 * select by bandwidth.
1423 *
1424 * BOND_AD_STABLE, BOND_AD_BANDWIDTH: Select by bandwidth.
1425 */
1426 if (!best)
1427 return curr;
1428
1429 if (!curr->is_individual && best->is_individual)
1430 return curr;
1431
1432 if (curr->is_individual && !best->is_individual)
1433 return best;
1434
1435 if (__agg_has_partner(curr) && !__agg_has_partner(best))
1436 return curr;
1437
1438 if (!__agg_has_partner(curr) && __agg_has_partner(best))
1439 return best;
1440
1441 switch (__get_agg_selection_mode(curr->lag_ports)) {
1442 case BOND_AD_COUNT:
1443 if (curr->num_of_ports > best->num_of_ports)
1444 return curr;
1445
1446 if (curr->num_of_ports < best->num_of_ports)
1447 return best;
1448
1449 /*FALLTHROUGH*/
1450 case BOND_AD_STABLE:
1451 case BOND_AD_BANDWIDTH:
1452 if (__get_agg_bandwidth(curr) > __get_agg_bandwidth(best))
1453 return curr;
1454
1455 break;
1456
1457 default:
1458 printk(KERN_WARNING DRV_NAME
1459 ": %s: Impossible agg select mode %d\n",
1460 curr->slave->dev->master->name,
1461 __get_agg_selection_mode(curr->lag_ports));
1462 break;
1419 } 1463 }
1464
1465 return best;
1420} 1466}
1421 1467
1422/** 1468/**
@@ -1424,156 +1470,138 @@ static void ad_port_selection_logic(struct port *port)
1424 * @aggregator: the aggregator we're looking at 1470 * @aggregator: the aggregator we're looking at
1425 * 1471 *
1426 * It is assumed that only one aggregator may be selected for a team. 1472 * It is assumed that only one aggregator may be selected for a team.
1427 * The logic of this function is to select (at first time) the aggregator with 1473 *
1428 * the most ports attached to it, and to reselect the active aggregator only if 1474 * The logic of this function is to select the aggregator according to
1429 * the previous aggregator has no more ports related to it. 1475 * the ad_select policy:
1476 *
1477 * BOND_AD_STABLE: select the aggregator with the most ports attached to
1478 * it, and to reselect the active aggregator only if the previous
1479 * aggregator has no more ports related to it.
1480 *
1481 * BOND_AD_BANDWIDTH: select the aggregator with the highest total
1482 * bandwidth, and reselect whenever a link state change takes place or the
1483 * set of slaves in the bond changes.
1484 *
1485 * BOND_AD_COUNT: select the aggregator with largest number of ports
1486 * (slaves), and reselect whenever a link state change takes place or the
1487 * set of slaves in the bond changes.
1430 * 1488 *
1431 * FIXME: this function MUST be called with the first agg in the bond, or 1489 * FIXME: this function MUST be called with the first agg in the bond, or
1432 * __get_active_agg() won't work correctly. This function should be better 1490 * __get_active_agg() won't work correctly. This function should be better
1433 * called with the bond itself, and retrieve the first agg from it. 1491 * called with the bond itself, and retrieve the first agg from it.
1434 */ 1492 */
1435static void ad_agg_selection_logic(struct aggregator *aggregator) 1493static void ad_agg_selection_logic(struct aggregator *agg)
1436{ 1494{
1437 struct aggregator *best_aggregator = NULL, *active_aggregator = NULL; 1495 struct aggregator *best, *active, *origin;
1438 struct aggregator *last_active_aggregator = NULL, *origin_aggregator;
1439 struct port *port; 1496 struct port *port;
1440 u16 num_of_aggs=0;
1441 1497
1442 origin_aggregator = aggregator; 1498 origin = agg;
1443 1499
1444 //get current active aggregator 1500 active = __get_active_agg(agg);
1445 last_active_aggregator = __get_active_agg(aggregator); 1501 best = active;
1446 1502
1447 // search for the aggregator with the most ports attached to it.
1448 do { 1503 do {
1449 // count how many candidate lag's we have 1504 agg->is_active = 0;
1450 if (aggregator->lag_ports) { 1505
1451 num_of_aggs++; 1506 if (agg->num_of_ports)
1452 } 1507 best = ad_agg_selection_test(best, agg);
1453 if (aggregator->is_active && !aggregator->is_individual && // if current aggregator is the active aggregator 1508
1454 MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(null_mac_addr))) { // and partner answers to 802.3ad PDUs 1509 } while ((agg = __get_next_agg(agg)));
1455 if (aggregator->num_of_ports) { // if any ports attached to the current aggregator 1510
1456 best_aggregator=NULL; // disregard the best aggregator that was chosen by now 1511 if (best &&
1457 break; // stop the selection of other aggregator if there are any ports attached to this active aggregator 1512 __get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
1458 } else { // no ports attached to this active aggregator 1513 /*
1459 aggregator->is_active = 0; // mark this aggregator as not active anymore 1514 * For the STABLE policy, don't replace the old active
1460 } 1515 * aggregator if it's still active (it has an answering
1461 } 1516 * partner) or if both the best and active don't have an
1462 if (aggregator->num_of_ports) { // if any ports attached 1517 * answering partner.
1463 if (best_aggregator) { // if there is a candidte aggregator 1518 */
1464 //The reasons for choosing new best aggregator: 1519 if (active && active->lag_ports &&
1465 // 1. if current agg is NOT individual and the best agg chosen so far is individual OR 1520 active->lag_ports->is_enabled &&
1466 // current and best aggs are both individual or both not individual, AND 1521 (__agg_has_partner(active) ||
1467 // 2a. current agg partner reply but best agg partner do not reply OR 1522 (!__agg_has_partner(active) && !__agg_has_partner(best)))) {
1468 // 2b. current agg partner reply OR current agg partner do not reply AND best agg partner also do not reply AND 1523 if (!(!active->actor_oper_aggregator_key &&
1469 // current has more ports/bandwidth, or same amount of ports but current has faster ports, THEN 1524 best->actor_oper_aggregator_key)) {
1470 // current agg become best agg so far 1525 best = NULL;
1471 1526 active->is_active = 1;
1472 //if current agg is NOT individual and the best agg chosen so far is individual change best_aggregator
1473 if (!aggregator->is_individual && best_aggregator->is_individual) {
1474 best_aggregator=aggregator;
1475 }
1476 // current and best aggs are both individual or both not individual
1477 else if ((aggregator->is_individual && best_aggregator->is_individual) ||
1478 (!aggregator->is_individual && !best_aggregator->is_individual)) {
1479 // current and best aggs are both individual or both not individual AND
1480 // current agg partner reply but best agg partner do not reply
1481 if ((MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(null_mac_addr)) &&
1482 !MAC_ADDRESS_COMPARE(&(best_aggregator->partner_system), &(null_mac_addr)))) {
1483 best_aggregator=aggregator;
1484 }
1485 // current agg partner reply OR current agg partner do not reply AND best agg partner also do not reply
1486 else if (! (!MAC_ADDRESS_COMPARE(&(aggregator->partner_system), &(null_mac_addr)) &&
1487 MAC_ADDRESS_COMPARE(&(best_aggregator->partner_system), &(null_mac_addr)))) {
1488 if ((__get_agg_selection_mode(aggregator->lag_ports) == AD_BANDWIDTH)&&
1489 (__get_agg_bandwidth(aggregator) > __get_agg_bandwidth(best_aggregator))) {
1490 best_aggregator=aggregator;
1491 } else if (__get_agg_selection_mode(aggregator->lag_ports) == AD_COUNT) {
1492 if (((aggregator->num_of_ports > best_aggregator->num_of_ports) &&
1493 (aggregator->actor_oper_aggregator_key & AD_SPEED_KEY_BITS))||
1494 ((aggregator->num_of_ports == best_aggregator->num_of_ports) &&
1495 ((u16)(aggregator->actor_oper_aggregator_key & AD_SPEED_KEY_BITS) >
1496 (u16)(best_aggregator->actor_oper_aggregator_key & AD_SPEED_KEY_BITS)))) {
1497 best_aggregator=aggregator;
1498 }
1499 }
1500 }
1501 }
1502 } else {
1503 best_aggregator=aggregator;
1504 } 1527 }
1505 } 1528 }
1506 aggregator->is_active = 0; // mark all aggregators as not active anymore 1529 }
1507 } while ((aggregator = __get_next_agg(aggregator)));
1508
1509 // if we have new aggregator selected, don't replace the old aggregator if it has an answering partner,
1510 // or if both old aggregator and new aggregator don't have answering partner
1511 if (best_aggregator) {
1512 if (last_active_aggregator && last_active_aggregator->lag_ports && last_active_aggregator->lag_ports->is_enabled &&
1513 (MAC_ADDRESS_COMPARE(&(last_active_aggregator->partner_system), &(null_mac_addr)) || // partner answers OR
1514 (!MAC_ADDRESS_COMPARE(&(last_active_aggregator->partner_system), &(null_mac_addr)) && // both old and new
1515 !MAC_ADDRESS_COMPARE(&(best_aggregator->partner_system), &(null_mac_addr)))) // partner do not answer
1516 ) {
1517 // if new aggregator has link, and old aggregator does not, replace old aggregator.(do nothing)
1518 // -> don't replace otherwise.
1519 if (!(!last_active_aggregator->actor_oper_aggregator_key && best_aggregator->actor_oper_aggregator_key)) {
1520 best_aggregator=NULL;
1521 last_active_aggregator->is_active = 1; // don't replace good old aggregator
1522 1530
1523 } 1531 if (best && (best == active)) {
1524 } 1532 best = NULL;
1533 active->is_active = 1;
1525 } 1534 }
1526 1535
1527 // if there is new best aggregator, activate it 1536 // if there is new best aggregator, activate it
1528 if (best_aggregator) { 1537 if (best) {
1529 for (aggregator = __get_first_agg(best_aggregator->lag_ports); 1538 pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
1530 aggregator; 1539 best->aggregator_identifier, best->num_of_ports,
1531 aggregator = __get_next_agg(aggregator)) { 1540 best->actor_oper_aggregator_key,
1532 1541 best->partner_oper_aggregator_key,
1533 dprintk("Agg=%d; Ports=%d; a key=%d; p key=%d; Indiv=%d; Active=%d\n", 1542 best->is_individual, best->is_active);
1534 aggregator->aggregator_identifier, aggregator->num_of_ports, 1543 pr_debug("best ports %p slave %p %s\n",
1535 aggregator->actor_oper_aggregator_key, aggregator->partner_oper_aggregator_key, 1544 best->lag_ports, best->slave,
1536 aggregator->is_individual, aggregator->is_active); 1545 best->slave ? best->slave->dev->name : "NULL");
1546
1547 for (agg = __get_first_agg(best->lag_ports); agg;
1548 agg = __get_next_agg(agg)) {
1549
1550 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
1551 agg->aggregator_identifier, agg->num_of_ports,
1552 agg->actor_oper_aggregator_key,
1553 agg->partner_oper_aggregator_key,
1554 agg->is_individual, agg->is_active);
1537 } 1555 }
1538 1556
1539 // check if any partner replys 1557 // check if any partner replys
1540 if (best_aggregator->is_individual) { 1558 if (best->is_individual) {
1541 printk(KERN_WARNING DRV_NAME ": %s: Warning: No 802.3ad response from " 1559 printk(KERN_WARNING DRV_NAME ": %s: Warning: No 802.3ad"
1542 "the link partner for any adapters in the bond\n", 1560 " response from the link partner for any"
1543 best_aggregator->slave->dev->master->name); 1561 " adapters in the bond\n",
1562 best->slave->dev->master->name);
1544 } 1563 }
1545 1564
1546 // check if there are more than one aggregator 1565 best->is_active = 1;
1547 if (num_of_aggs > 1) { 1566 pr_debug("LAG %d chosen as the active LAG\n",
1548 dprintk("Warning: More than one Link Aggregation Group was " 1567 best->aggregator_identifier);
1549 "found in the bond. Only one group will function in the bond\n"); 1568 pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
1550 } 1569 best->aggregator_identifier, best->num_of_ports,
1551 1570 best->actor_oper_aggregator_key,
1552 best_aggregator->is_active = 1; 1571 best->partner_oper_aggregator_key,
1553 dprintk("LAG %d choosed as the active LAG\n", best_aggregator->aggregator_identifier); 1572 best->is_individual, best->is_active);
1554 dprintk("Agg=%d; Ports=%d; a key=%d; p key=%d; Indiv=%d; Active=%d\n",
1555 best_aggregator->aggregator_identifier, best_aggregator->num_of_ports,
1556 best_aggregator->actor_oper_aggregator_key, best_aggregator->partner_oper_aggregator_key,
1557 best_aggregator->is_individual, best_aggregator->is_active);
1558 1573
1559 // disable the ports that were related to the former active_aggregator 1574 // disable the ports that were related to the former active_aggregator
1560 if (last_active_aggregator) { 1575 if (active) {
1561 for (port=last_active_aggregator->lag_ports; port; port=port->next_port_in_aggregator) { 1576 for (port = active->lag_ports; port;
1577 port = port->next_port_in_aggregator) {
1562 __disable_port(port); 1578 __disable_port(port);
1563 } 1579 }
1564 } 1580 }
1565 } 1581 }
1566 1582
1567 // if the selected aggregator is of join individuals(partner_system is NULL), enable their ports 1583 /*
1568 active_aggregator = __get_active_agg(origin_aggregator); 1584 * if the selected aggregator is of join individuals
1585 * (partner_system is NULL), enable their ports
1586 */
1587 active = __get_active_agg(origin);
1569 1588
1570 if (active_aggregator) { 1589 if (active) {
1571 if (!MAC_ADDRESS_COMPARE(&(active_aggregator->partner_system), &(null_mac_addr))) { 1590 if (!__agg_has_partner(active)) {
1572 for (port=active_aggregator->lag_ports; port; port=port->next_port_in_aggregator) { 1591 for (port = active->lag_ports; port;
1592 port = port->next_port_in_aggregator) {
1573 __enable_port(port); 1593 __enable_port(port);
1574 } 1594 }
1575 } 1595 }
1576 } 1596 }
1597
1598 if (origin->slave) {
1599 struct bonding *bond;
1600
1601 bond = bond_get_bond_by_slave(origin->slave);
1602 if (bond)
1603 bond_3ad_set_carrier(bond);
1604 }
1577} 1605}
1578 1606
1579/** 1607/**
@@ -1584,7 +1612,7 @@ static void ad_agg_selection_logic(struct aggregator *aggregator)
1584static void ad_clear_agg(struct aggregator *aggregator) 1612static void ad_clear_agg(struct aggregator *aggregator)
1585{ 1613{
1586 if (aggregator) { 1614 if (aggregator) {
1587 aggregator->is_individual = 0; 1615 aggregator->is_individual = false;
1588 aggregator->actor_admin_aggregator_key = 0; 1616 aggregator->actor_admin_aggregator_key = 0;
1589 aggregator->actor_oper_aggregator_key = 0; 1617 aggregator->actor_oper_aggregator_key = 0;
1590 aggregator->partner_system = null_mac_addr; 1618 aggregator->partner_system = null_mac_addr;
@@ -1595,7 +1623,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
1595 aggregator->lag_ports = NULL; 1623 aggregator->lag_ports = NULL;
1596 aggregator->is_active = 0; 1624 aggregator->is_active = 0;
1597 aggregator->num_of_ports = 0; 1625 aggregator->num_of_ports = 0;
1598 dprintk("LAG %d was cleared\n", aggregator->aggregator_identifier); 1626 pr_debug("LAG %d was cleared\n", aggregator->aggregator_identifier);
1599 } 1627 }
1600} 1628}
1601 1629
@@ -1623,13 +1651,32 @@ static void ad_initialize_agg(struct aggregator *aggregator)
1623 */ 1651 */
1624static void ad_initialize_port(struct port *port, int lacp_fast) 1652static void ad_initialize_port(struct port *port, int lacp_fast)
1625{ 1653{
1654 static const struct port_params tmpl = {
1655 .system_priority = 0xffff,
1656 .key = 1,
1657 .port_number = 1,
1658 .port_priority = 0xff,
1659 .port_state = 1,
1660 };
1661 static const struct lacpdu lacpdu = {
1662 .subtype = 0x01,
1663 .version_number = 0x01,
1664 .tlv_type_actor_info = 0x01,
1665 .actor_information_length = 0x14,
1666 .tlv_type_partner_info = 0x02,
1667 .partner_information_length = 0x14,
1668 .tlv_type_collector_info = 0x03,
1669 .collector_information_length = 0x10,
1670 .collector_max_delay = htons(AD_COLLECTOR_MAX_DELAY),
1671 };
1672
1626 if (port) { 1673 if (port) {
1627 port->actor_port_number = 1; 1674 port->actor_port_number = 1;
1628 port->actor_port_priority = 0xff; 1675 port->actor_port_priority = 0xff;
1629 port->actor_system = null_mac_addr; 1676 port->actor_system = null_mac_addr;
1630 port->actor_system_priority = 0xffff; 1677 port->actor_system_priority = 0xffff;
1631 port->actor_port_aggregator_identifier = 0; 1678 port->actor_port_aggregator_identifier = 0;
1632 port->ntt = 0; 1679 port->ntt = false;
1633 port->actor_admin_port_key = 1; 1680 port->actor_admin_port_key = 1;
1634 port->actor_oper_port_key = 1; 1681 port->actor_oper_port_key = 1;
1635 port->actor_admin_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY; 1682 port->actor_admin_port_state = AD_STATE_AGGREGATION | AD_STATE_LACP_ACTIVITY;
@@ -1639,19 +1686,10 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
1639 port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT; 1686 port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
1640 } 1687 }
1641 1688
1642 port->partner_admin_system = null_mac_addr; 1689 memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
1643 port->partner_oper_system = null_mac_addr; 1690 memcpy(&port->partner_oper, &tmpl, sizeof(tmpl));
1644 port->partner_admin_system_priority = 0xffff; 1691
1645 port->partner_oper_system_priority = 0xffff; 1692 port->is_enabled = true;
1646 port->partner_admin_key = 1;
1647 port->partner_oper_key = 1;
1648 port->partner_admin_port_number = 1;
1649 port->partner_oper_port_number = 1;
1650 port->partner_admin_port_priority = 0xff;
1651 port->partner_oper_port_priority = 0xff;
1652 port->partner_admin_port_state = 1;
1653 port->partner_oper_port_state = 1;
1654 port->is_enabled = 1;
1655 // ****** private parameters ****** 1693 // ****** private parameters ******
1656 port->sm_vars = 0x3; 1694 port->sm_vars = 0x3;
1657 port->sm_rx_state = 0; 1695 port->sm_rx_state = 0;
@@ -1667,7 +1705,7 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
1667 port->next_port_in_aggregator = NULL; 1705 port->next_port_in_aggregator = NULL;
1668 port->transaction_id = 0; 1706 port->transaction_id = 0;
1669 1707
1670 ad_initialize_lacpdu(&(port->lacpdu)); 1708 memcpy(&port->lacpdu, &lacpdu, sizeof(lacpdu));
1671 } 1709 }
1672} 1710}
1673 1711
@@ -1680,7 +1718,7 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
1680static void ad_enable_collecting_distributing(struct port *port) 1718static void ad_enable_collecting_distributing(struct port *port)
1681{ 1719{
1682 if (port->aggregator->is_active) { 1720 if (port->aggregator->is_active) {
1683 dprintk("Enabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier); 1721 pr_debug("Enabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
1684 __enable_port(port); 1722 __enable_port(port);
1685 } 1723 }
1686} 1724}
@@ -1693,7 +1731,7 @@ static void ad_enable_collecting_distributing(struct port *port)
1693static void ad_disable_collecting_distributing(struct port *port) 1731static void ad_disable_collecting_distributing(struct port *port)
1694{ 1732{
1695 if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) { 1733 if (port->aggregator && MAC_ADDRESS_COMPARE(&(port->aggregator->partner_system), &(null_mac_addr))) {
1696 dprintk("Disabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier); 1734 pr_debug("Disabling port %d(LAG %d)\n", port->actor_port_number, port->aggregator->aggregator_identifier);
1697 __disable_port(port); 1735 __disable_port(port);
1698 } 1736 }
1699} 1737}
@@ -1731,7 +1769,7 @@ static void ad_marker_info_send(struct port *port)
1731 1769
1732 // send the marker information 1770 // send the marker information
1733 if (ad_marker_send(port, &marker) >= 0) { 1771 if (ad_marker_send(port, &marker) >= 0) {
1734 dprintk("Sent Marker Information on port %d\n", port->actor_port_number); 1772 pr_debug("Sent Marker Information on port %d\n", port->actor_port_number);
1735 } 1773 }
1736} 1774}
1737#endif 1775#endif
@@ -1755,7 +1793,7 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
1755 // send the marker response 1793 // send the marker response
1756 1794
1757 if (ad_marker_send(port, &marker) >= 0) { 1795 if (ad_marker_send(port, &marker) >= 0) {
1758 dprintk("Sent Marker Response on port %d\n", port->actor_port_number); 1796 pr_debug("Sent Marker Response on port %d\n", port->actor_port_number);
1759 } 1797 }
1760} 1798}
1761 1799
@@ -1776,53 +1814,6 @@ static void ad_marker_response_received(struct bond_marker *marker,
1776 // DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW 1814 // DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW
1777} 1815}
1778 1816
1779/**
1780 * ad_initialize_lacpdu - initialize a given lacpdu structure
1781 * @lacpdu: lacpdu structure to initialize
1782 *
1783 */
1784static void ad_initialize_lacpdu(struct lacpdu *lacpdu)
1785{
1786 u16 index;
1787
1788 // initialize lacpdu data
1789 lacpdu->subtype = 0x01;
1790 lacpdu->version_number = 0x01;
1791 lacpdu->tlv_type_actor_info = 0x01;
1792 lacpdu->actor_information_length = 0x14;
1793 // lacpdu->actor_system_priority updated on send
1794 // lacpdu->actor_system updated on send
1795 // lacpdu->actor_key updated on send
1796 // lacpdu->actor_port_priority updated on send
1797 // lacpdu->actor_port updated on send
1798 // lacpdu->actor_state updated on send
1799 lacpdu->tlv_type_partner_info = 0x02;
1800 lacpdu->partner_information_length = 0x14;
1801 for (index=0; index<=2; index++) {
1802 lacpdu->reserved_3_1[index]=0;
1803 }
1804 // lacpdu->partner_system_priority updated on send
1805 // lacpdu->partner_system updated on send
1806 // lacpdu->partner_key updated on send
1807 // lacpdu->partner_port_priority updated on send
1808 // lacpdu->partner_port updated on send
1809 // lacpdu->partner_state updated on send
1810 for (index=0; index<=2; index++) {
1811 lacpdu->reserved_3_2[index]=0;
1812 }
1813 lacpdu->tlv_type_collector_info = 0x03;
1814 lacpdu->collector_information_length= 0x10;
1815 lacpdu->collector_max_delay = htons(AD_COLLECTOR_MAX_DELAY);
1816 for (index=0; index<=11; index++) {
1817 lacpdu->reserved_12[index]=0;
1818 }
1819 lacpdu->tlv_type_terminator = 0x00;
1820 lacpdu->terminator_length = 0;
1821 for (index=0; index<=49; index++) {
1822 lacpdu->reserved_50[index]=0;
1823 }
1824}
1825
1826////////////////////////////////////////////////////////////////////////////////////// 1817//////////////////////////////////////////////////////////////////////////////////////
1827// ================= AD exported functions to the main bonding code ================== 1818// ================= AD exported functions to the main bonding code ==================
1828////////////////////////////////////////////////////////////////////////////////////// 1819//////////////////////////////////////////////////////////////////////////////////////
@@ -1830,6 +1821,19 @@ static void ad_initialize_lacpdu(struct lacpdu *lacpdu)
1830// Check aggregators status in team every T seconds 1821// Check aggregators status in team every T seconds
1831#define AD_AGGREGATOR_SELECTION_TIMER 8 1822#define AD_AGGREGATOR_SELECTION_TIMER 8
1832 1823
1824/*
1825 * bond_3ad_initiate_agg_selection(struct bonding *bond)
1826 *
1827 * Set the aggregation selection timer, to initiate an agg selection in
1828 * the very near future. Called during first initialization, and during
1829 * any down to up transitions of the bond.
1830 */
1831void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
1832{
1833 BOND_AD_INFO(bond).agg_select_timer = timeout;
1834 BOND_AD_INFO(bond).agg_select_mode = bond->params.ad_select;
1835}
1836
1833static u16 aggregator_identifier; 1837static u16 aggregator_identifier;
1834 1838
1835/** 1839/**
@@ -1854,9 +1858,9 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fas
1854 // initialize how many times this module is called in one second(should be about every 100ms) 1858 // initialize how many times this module is called in one second(should be about every 100ms)
1855 ad_ticks_per_sec = tick_resolution; 1859 ad_ticks_per_sec = tick_resolution;
1856 1860
1857 // initialize the aggregator selection timer(to activate an aggregation selection after initialize) 1861 bond_3ad_initiate_agg_selection(bond,
1858 BOND_AD_INFO(bond).agg_select_timer = (AD_AGGREGATOR_SELECTION_TIMER * ad_ticks_per_sec); 1862 AD_AGGREGATOR_SELECTION_TIMER *
1859 BOND_AD_INFO(bond).agg_select_mode = AD_BANDWIDTH; 1863 ad_ticks_per_sec);
1860 } 1864 }
1861} 1865}
1862 1866
@@ -1956,7 +1960,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
1956 return; 1960 return;
1957 } 1961 }
1958 1962
1959 dprintk("Unbinding Link Aggregation Group %d\n", aggregator->aggregator_identifier); 1963 pr_debug("Unbinding Link Aggregation Group %d\n", aggregator->aggregator_identifier);
1960 1964
1961 /* Tell the partner that this port is not suitable for aggregation */ 1965 /* Tell the partner that this port is not suitable for aggregation */
1962 port->actor_oper_port_state &= ~AD_STATE_AGGREGATION; 1966 port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
@@ -1980,7 +1984,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
1980 // if new aggregator found, copy the aggregator's parameters 1984 // if new aggregator found, copy the aggregator's parameters
1981 // and connect the related lag_ports to the new aggregator 1985 // and connect the related lag_ports to the new aggregator
1982 if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) { 1986 if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
1983 dprintk("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier); 1987 pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n", aggregator->aggregator_identifier, new_aggregator->aggregator_identifier);
1984 1988
1985 if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) { 1989 if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) {
1986 printk(KERN_INFO DRV_NAME ": %s: Removing an active aggregator\n", 1990 printk(KERN_INFO DRV_NAME ": %s: Removing an active aggregator\n",
@@ -2031,7 +2035,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2031 } 2035 }
2032 } 2036 }
2033 2037
2034 dprintk("Unbinding port %d\n", port->actor_port_number); 2038 pr_debug("Unbinding port %d\n", port->actor_port_number);
2035 // find the aggregator that this port is connected to 2039 // find the aggregator that this port is connected to
2036 temp_aggregator = __get_first_agg(port); 2040 temp_aggregator = __get_first_agg(port);
2037 for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) { 2041 for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) {
@@ -2162,7 +2166,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2162 2166
2163 switch (lacpdu->subtype) { 2167 switch (lacpdu->subtype) {
2164 case AD_TYPE_LACPDU: 2168 case AD_TYPE_LACPDU:
2165 dprintk("Received LACPDU on port %d\n", port->actor_port_number); 2169 pr_debug("Received LACPDU on port %d\n", port->actor_port_number);
2166 ad_rx_machine(lacpdu, port); 2170 ad_rx_machine(lacpdu, port);
2167 break; 2171 break;
2168 2172
@@ -2171,17 +2175,17 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2171 2175
2172 switch (((struct bond_marker *)lacpdu)->tlv_type) { 2176 switch (((struct bond_marker *)lacpdu)->tlv_type) {
2173 case AD_MARKER_INFORMATION_SUBTYPE: 2177 case AD_MARKER_INFORMATION_SUBTYPE:
2174 dprintk("Received Marker Information on port %d\n", port->actor_port_number); 2178 pr_debug("Received Marker Information on port %d\n", port->actor_port_number);
2175 ad_marker_info_received((struct bond_marker *)lacpdu, port); 2179 ad_marker_info_received((struct bond_marker *)lacpdu, port);
2176 break; 2180 break;
2177 2181
2178 case AD_MARKER_RESPONSE_SUBTYPE: 2182 case AD_MARKER_RESPONSE_SUBTYPE:
2179 dprintk("Received Marker Response on port %d\n", port->actor_port_number); 2183 pr_debug("Received Marker Response on port %d\n", port->actor_port_number);
2180 ad_marker_response_received((struct bond_marker *)lacpdu, port); 2184 ad_marker_response_received((struct bond_marker *)lacpdu, port);
2181 break; 2185 break;
2182 2186
2183 default: 2187 default:
2184 dprintk("Received an unknown Marker subtype on slot %d\n", port->actor_port_number); 2188 pr_debug("Received an unknown Marker subtype on slot %d\n", port->actor_port_number);
2185 } 2189 }
2186 } 2190 }
2187 } 2191 }
@@ -2209,7 +2213,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
2209 2213
2210 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; 2214 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
2211 port->actor_oper_port_key=port->actor_admin_port_key |= (__get_link_speed(port) << 1); 2215 port->actor_oper_port_key=port->actor_admin_port_key |= (__get_link_speed(port) << 1);
2212 dprintk("Port %d changed speed\n", port->actor_port_number); 2216 pr_debug("Port %d changed speed\n", port->actor_port_number);
2213 // there is no need to reselect a new aggregator, just signal the 2217 // there is no need to reselect a new aggregator, just signal the
2214 // state machines to reinitialize 2218 // state machines to reinitialize
2215 port->sm_vars |= AD_PORT_BEGIN; 2219 port->sm_vars |= AD_PORT_BEGIN;
@@ -2237,7 +2241,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
2237 2241
2238 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2242 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
2239 port->actor_oper_port_key=port->actor_admin_port_key |= __get_duplex(port); 2243 port->actor_oper_port_key=port->actor_admin_port_key |= __get_duplex(port);
2240 dprintk("Port %d changed duplex\n", port->actor_port_number); 2244 pr_debug("Port %d changed duplex\n", port->actor_port_number);
2241 // there is no need to reselect a new aggregator, just signal the 2245 // there is no need to reselect a new aggregator, just signal the
2242 // state machines to reinitialize 2246 // state machines to reinitialize
2243 port->sm_vars |= AD_PORT_BEGIN; 2247 port->sm_vars |= AD_PORT_BEGIN;
@@ -2267,14 +2271,14 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2267 // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed) 2271 // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed)
2268 // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report 2272 // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report
2269 if (link == BOND_LINK_UP) { 2273 if (link == BOND_LINK_UP) {
2270 port->is_enabled = 1; 2274 port->is_enabled = true;
2271 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2275 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
2272 port->actor_oper_port_key=port->actor_admin_port_key |= __get_duplex(port); 2276 port->actor_oper_port_key=port->actor_admin_port_key |= __get_duplex(port);
2273 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS; 2277 port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
2274 port->actor_oper_port_key=port->actor_admin_port_key |= (__get_link_speed(port) << 1); 2278 port->actor_oper_port_key=port->actor_admin_port_key |= (__get_link_speed(port) << 1);
2275 } else { 2279 } else {
2276 /* link has failed */ 2280 /* link has failed */
2277 port->is_enabled = 0; 2281 port->is_enabled = false;
2278 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS; 2282 port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
2279 port->actor_oper_port_key= (port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS); 2283 port->actor_oper_port_key= (port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS);
2280 } 2284 }
@@ -2346,7 +2350,7 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
2346int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) 2350int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2347{ 2351{
2348 struct slave *slave, *start_at; 2352 struct slave *slave, *start_at;
2349 struct bonding *bond = dev->priv; 2353 struct bonding *bond = netdev_priv(dev);
2350 int slave_agg_no; 2354 int slave_agg_no;
2351 int slaves_in_agg; 2355 int slaves_in_agg;
2352 int agg_id; 2356 int agg_id;
@@ -2426,7 +2430,7 @@ out:
2426 2430
2427int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev) 2431int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev)
2428{ 2432{
2429 struct bonding *bond = dev->priv; 2433 struct bonding *bond = netdev_priv(dev);
2430 struct slave *slave = NULL; 2434 struct slave *slave = NULL;
2431 int ret = NET_RX_DROP; 2435 int ret = NET_RX_DROP;
2432 2436
@@ -2437,7 +2441,8 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2437 goto out; 2441 goto out;
2438 2442
2439 read_lock(&bond->lock); 2443 read_lock(&bond->lock);
2440 slave = bond_get_slave_by_dev((struct bonding *)dev->priv, orig_dev); 2444 slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
2445 orig_dev);
2441 if (!slave) 2446 if (!slave)
2442 goto out_unlock; 2447 goto out_unlock;
2443 2448
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index b5ee45f6d55a..8a83eb283c21 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -33,7 +33,6 @@
33#define AD_TIMER_INTERVAL 100 /*msec*/ 33#define AD_TIMER_INTERVAL 100 /*msec*/
34 34
35#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02} 35#define MULTICAST_LACPDU_ADDR {0x01, 0x80, 0xC2, 0x00, 0x00, 0x02}
36#define AD_MULTICAST_LACPDU_ADDR {MULTICAST_LACPDU_ADDR}
37 36
38#define AD_LACP_SLOW 0 37#define AD_LACP_SLOW 0
39#define AD_LACP_FAST 1 38#define AD_LACP_FAST 1
@@ -42,10 +41,11 @@ typedef struct mac_addr {
42 u8 mac_addr_value[ETH_ALEN]; 41 u8 mac_addr_value[ETH_ALEN];
43} mac_addr_t; 42} mac_addr_t;
44 43
45typedef enum { 44enum {
46 AD_BANDWIDTH = 0, 45 BOND_AD_STABLE = 0,
47 AD_COUNT 46 BOND_AD_BANDWIDTH = 1,
48} agg_selection_t; 47 BOND_AD_COUNT = 2,
48};
49 49
50// rx machine states(43.4.11 in the 802.3ad standard) 50// rx machine states(43.4.11 in the 802.3ad standard)
51typedef enum { 51typedef enum {
@@ -105,12 +105,6 @@ typedef enum {
105 105
106#pragma pack(1) 106#pragma pack(1)
107 107
108typedef struct ad_header {
109 struct mac_addr destination_address;
110 struct mac_addr source_address;
111 __be16 length_type;
112} ad_header_t;
113
114// Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) 108// Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard)
115typedef struct lacpdu { 109typedef struct lacpdu {
116 u8 subtype; // = LACP(= 0x01) 110 u8 subtype; // = LACP(= 0x01)
@@ -143,7 +137,7 @@ typedef struct lacpdu {
143} lacpdu_t; 137} lacpdu_t;
144 138
145typedef struct lacpdu_header { 139typedef struct lacpdu_header {
146 struct ad_header ad_header; 140 struct ethhdr hdr;
147 struct lacpdu lacpdu; 141 struct lacpdu lacpdu;
148} lacpdu_header_t; 142} lacpdu_header_t;
149 143
@@ -164,7 +158,7 @@ typedef struct bond_marker {
164} bond_marker_t; 158} bond_marker_t;
165 159
166typedef struct bond_marker_header { 160typedef struct bond_marker_header {
167 struct ad_header ad_header; 161 struct ethhdr hdr;
168 struct bond_marker marker; 162 struct bond_marker marker;
169} bond_marker_header_t; 163} bond_marker_header_t;
170 164
@@ -183,7 +177,7 @@ struct port;
183typedef struct aggregator { 177typedef struct aggregator {
184 struct mac_addr aggregator_mac_address; 178 struct mac_addr aggregator_mac_address;
185 u16 aggregator_identifier; 179 u16 aggregator_identifier;
186 u16 is_individual; // BOOLEAN 180 bool is_individual;
187 u16 actor_admin_aggregator_key; 181 u16 actor_admin_aggregator_key;
188 u16 actor_oper_aggregator_key; 182 u16 actor_oper_aggregator_key;
189 struct mac_addr partner_system; 183 struct mac_addr partner_system;
@@ -198,6 +192,15 @@ typedef struct aggregator {
198 u16 num_of_ports; 192 u16 num_of_ports;
199} aggregator_t; 193} aggregator_t;
200 194
195struct port_params {
196 struct mac_addr system;
197 u16 system_priority;
198 u16 key;
199 u16 port_number;
200 u16 port_priority;
201 u16 port_state;
202};
203
201// port structure(43.4.6 in the 802.3ad standard) 204// port structure(43.4.6 in the 802.3ad standard)
202typedef struct port { 205typedef struct port {
203 u16 actor_port_number; 206 u16 actor_port_number;
@@ -205,24 +208,17 @@ typedef struct port {
205 struct mac_addr actor_system; // This parameter is added here although it is not specified in the standard, just for simplification 208 struct mac_addr actor_system; // This parameter is added here although it is not specified in the standard, just for simplification
206 u16 actor_system_priority; // This parameter is added here although it is not specified in the standard, just for simplification 209 u16 actor_system_priority; // This parameter is added here although it is not specified in the standard, just for simplification
207 u16 actor_port_aggregator_identifier; 210 u16 actor_port_aggregator_identifier;
208 u16 ntt; // BOOLEAN 211 bool ntt;
209 u16 actor_admin_port_key; 212 u16 actor_admin_port_key;
210 u16 actor_oper_port_key; 213 u16 actor_oper_port_key;
211 u8 actor_admin_port_state; 214 u8 actor_admin_port_state;
212 u8 actor_oper_port_state; 215 u8 actor_oper_port_state;
213 struct mac_addr partner_admin_system; 216
214 struct mac_addr partner_oper_system; 217 struct port_params partner_admin;
215 u16 partner_admin_system_priority; 218 struct port_params partner_oper;
216 u16 partner_oper_system_priority; 219
217 u16 partner_admin_key; 220 bool is_enabled;
218 u16 partner_oper_key; 221
219 u16 partner_admin_port_number;
220 u16 partner_oper_port_number;
221 u16 partner_admin_port_priority;
222 u16 partner_oper_port_priority;
223 u8 partner_admin_port_state;
224 u8 partner_oper_port_state;
225 u16 is_enabled; // BOOLEAN
226 // ****** PRIVATE PARAMETERS ****** 222 // ****** PRIVATE PARAMETERS ******
227 u16 sm_vars; // all state machines variables for this port 223 u16 sm_vars; // all state machines variables for this port
228 rx_states_t sm_rx_state; // state machine rx state 224 rx_states_t sm_rx_state; // state machine rx state
@@ -241,10 +237,10 @@ typedef struct port {
241} port_t; 237} port_t;
242 238
243// system structure 239// system structure
244typedef struct ad_system { 240struct ad_system {
245 u16 sys_priority; 241 u16 sys_priority;
246 struct mac_addr sys_mac_addr; 242 struct mac_addr sys_mac_addr;
247} ad_system_t; 243};
248 244
249#ifdef __ia64__ 245#ifdef __ia64__
250#pragma pack() 246#pragma pack()
@@ -255,7 +251,7 @@ typedef struct ad_system {
255#define SLAVE_AD_INFO(slave) ((slave)->ad_info) 251#define SLAVE_AD_INFO(slave) ((slave)->ad_info)
256 252
257struct ad_bond_info { 253struct ad_bond_info {
258 ad_system_t system; // 802.3ad system structure 254 struct ad_system system; /* 802.3ad system structure */
259 u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes 255 u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
260 u32 agg_select_mode; // Mode of selection of active aggregator(bandwidth/count) 256 u32 agg_select_mode; // Mode of selection of active aggregator(bandwidth/count)
261 int lacp_fast; /* whether fast periodic tx should be 257 int lacp_fast; /* whether fast periodic tx should be
@@ -277,6 +273,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution, int lacp_fas
277int bond_3ad_bind_slave(struct slave *slave); 273int bond_3ad_bind_slave(struct slave *slave);
278void bond_3ad_unbind_slave(struct slave *slave); 274void bond_3ad_unbind_slave(struct slave *slave);
279void bond_3ad_state_machine_handler(struct work_struct *); 275void bond_3ad_state_machine_handler(struct work_struct *);
276void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
280void bond_3ad_adapter_speed_changed(struct slave *slave); 277void bond_3ad_adapter_speed_changed(struct slave *slave);
281void bond_3ad_adapter_duplex_changed(struct slave *slave); 278void bond_3ad_adapter_duplex_changed(struct slave *slave);
282void bond_3ad_handle_link_change(struct slave *slave, char link); 279void bond_3ad_handle_link_change(struct slave *slave, char link);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 87437c788476..27fb7f5c21cf 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -20,8 +20,6 @@
20 * 20 *
21 */ 21 */
22 22
23//#define BONDING_DEBUG 1
24
25#include <linux/skbuff.h> 23#include <linux/skbuff.h>
26#include <linux/netdevice.h> 24#include <linux/netdevice.h>
27#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
@@ -346,30 +344,37 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
346 344
347static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype, struct net_device *orig_dev) 345static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype, struct net_device *orig_dev)
348{ 346{
349 struct bonding *bond = bond_dev->priv; 347 struct bonding *bond;
350 struct arp_pkt *arp = (struct arp_pkt *)skb->data; 348 struct arp_pkt *arp = (struct arp_pkt *)skb->data;
351 int res = NET_RX_DROP; 349 int res = NET_RX_DROP;
352 350
353 if (dev_net(bond_dev) != &init_net) 351 if (dev_net(bond_dev) != &init_net)
354 goto out; 352 goto out;
355 353
356 if (!(bond_dev->flags & IFF_MASTER)) 354 while (bond_dev->priv_flags & IFF_802_1Q_VLAN)
355 bond_dev = vlan_dev_real_dev(bond_dev);
356
357 if (!(bond_dev->priv_flags & IFF_BONDING) ||
358 !(bond_dev->flags & IFF_MASTER))
357 goto out; 359 goto out;
358 360
359 if (!arp) { 361 if (!arp) {
360 dprintk("Packet has no ARP data\n"); 362 pr_debug("Packet has no ARP data\n");
361 goto out; 363 goto out;
362 } 364 }
363 365
364 if (skb->len < sizeof(struct arp_pkt)) { 366 if (skb->len < sizeof(struct arp_pkt)) {
365 dprintk("Packet is too small to be an ARP\n"); 367 pr_debug("Packet is too small to be an ARP\n");
366 goto out; 368 goto out;
367 } 369 }
368 370
369 if (arp->op_code == htons(ARPOP_REPLY)) { 371 if (arp->op_code == htons(ARPOP_REPLY)) {
370 /* update rx hash table for this ARP */ 372 /* update rx hash table for this ARP */
373 printk("rar: update orig %s bond_dev %s\n", orig_dev->name,
374 bond_dev->name);
375 bond = netdev_priv(bond_dev);
371 rlb_update_entry_from_arp(bond, arp); 376 rlb_update_entry_from_arp(bond, arp);
372 dprintk("Server received an ARP Reply from client\n"); 377 pr_debug("Server received an ARP Reply from client\n");
373 } 378 }
374 379
375 res = NET_RX_SUCCESS; 380 res = NET_RX_SUCCESS;
@@ -723,7 +728,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
723 if (tx_slave) { 728 if (tx_slave) {
724 memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN); 729 memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
725 } 730 }
726 dprintk("Server sent ARP Reply packet\n"); 731 pr_debug("Server sent ARP Reply packet\n");
727 } else if (arp->op_code == htons(ARPOP_REQUEST)) { 732 } else if (arp->op_code == htons(ARPOP_REQUEST)) {
728 /* Create an entry in the rx_hashtbl for this client as a 733 /* Create an entry in the rx_hashtbl for this client as a
729 * place holder. 734 * place holder.
@@ -743,7 +748,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
743 * updated with their assigned mac. 748 * updated with their assigned mac.
744 */ 749 */
745 rlb_req_update_subnet_clients(bond, arp->ip_src); 750 rlb_req_update_subnet_clients(bond, arp->ip_src);
746 dprintk("Server sent ARP Request packet\n"); 751 pr_debug("Server sent ARP Request packet\n");
747 } 752 }
748 753
749 return tx_slave; 754 return tx_slave;
@@ -818,7 +823,7 @@ static int rlb_initialize(struct bonding *bond)
818 823
819 /*initialize packet type*/ 824 /*initialize packet type*/
820 pk_type->type = __constant_htons(ETH_P_ARP); 825 pk_type->type = __constant_htons(ETH_P_ARP);
821 pk_type->dev = bond->dev; 826 pk_type->dev = NULL;
822 pk_type->func = rlb_arp_recv; 827 pk_type->func = rlb_arp_recv;
823 828
824 /* register to receive ARPs */ 829 /* register to receive ARPs */
@@ -1211,11 +1216,6 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
1211 } 1216 }
1212 1217
1213 bond_for_each_slave(bond, slave, i) { 1218 bond_for_each_slave(bond, slave, i) {
1214 if (slave->dev->set_mac_address == NULL) {
1215 res = -EOPNOTSUPP;
1216 goto unwind;
1217 }
1218
1219 /* save net_device's current hw address */ 1219 /* save net_device's current hw address */
1220 memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN); 1220 memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
1221 1221
@@ -1224,9 +1224,8 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
1224 /* restore net_device's hw address */ 1224 /* restore net_device's hw address */
1225 memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN); 1225 memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
1226 1226
1227 if (res) { 1227 if (res)
1228 goto unwind; 1228 goto unwind;
1229 }
1230 } 1229 }
1231 1230
1232 return 0; 1231 return 0;
@@ -1285,7 +1284,7 @@ void bond_alb_deinitialize(struct bonding *bond)
1285 1284
1286int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) 1285int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1287{ 1286{
1288 struct bonding *bond = bond_dev->priv; 1287 struct bonding *bond = netdev_priv(bond_dev);
1289 struct ethhdr *eth_data; 1288 struct ethhdr *eth_data;
1290 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); 1289 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1291 struct slave *tx_slave = NULL; 1290 struct slave *tx_slave = NULL;
@@ -1706,7 +1705,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
1706 */ 1705 */
1707int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) 1706int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1708{ 1707{
1709 struct bonding *bond = bond_dev->priv; 1708 struct bonding *bond = netdev_priv(bond_dev);
1710 struct sockaddr *sa = addr; 1709 struct sockaddr *sa = addr;
1711 struct slave *slave, *swap_slave; 1710 struct slave *slave, *swap_slave;
1712 int res; 1711 int res;
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
new file mode 100644
index 000000000000..0d73bf5ac5a5
--- /dev/null
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -0,0 +1,216 @@
1/*
2 * Copyright(c) 2008 Hewlett-Packard Development Company, L.P.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 */
22
23#include <linux/types.h>
24#include <linux/if_vlan.h>
25#include <net/ipv6.h>
26#include <net/ndisc.h>
27#include <net/addrconf.h>
28#include "bonding.h"
29
30/*
31 * Assign bond->master_ipv6 to the next IPv6 address in the list, or
32 * zero it out if there are none.
33 */
34static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
35{
36 struct inet6_dev *idev;
37 struct inet6_ifaddr *ifa;
38
39 if (!dev)
40 return;
41
42 idev = in6_dev_get(dev);
43 if (!idev)
44 return;
45
46 read_lock_bh(&idev->lock);
47 ifa = idev->addr_list;
48 if (ifa)
49 ipv6_addr_copy(addr, &ifa->addr);
50 else
51 ipv6_addr_set(addr, 0, 0, 0, 0);
52
53 read_unlock_bh(&idev->lock);
54
55 in6_dev_put(idev);
56}
57
58static void bond_na_send(struct net_device *slave_dev,
59 struct in6_addr *daddr,
60 int router,
61 unsigned short vlan_id)
62{
63 struct in6_addr mcaddr;
64 struct icmp6hdr icmp6h = {
65 .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
66 };
67 struct sk_buff *skb;
68
69 icmp6h.icmp6_router = router;
70 icmp6h.icmp6_solicited = 0;
71 icmp6h.icmp6_override = 1;
72
73 addrconf_addr_solict_mult(daddr, &mcaddr);
74
75 pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n",
76 slave_dev->name, &mcaddr, daddr);
77
78 skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr,
79 ND_OPT_TARGET_LL_ADDR);
80
81 if (!skb) {
82 printk(KERN_ERR DRV_NAME ": NA packet allocation failed\n");
83 return;
84 }
85
86 if (vlan_id) {
87 skb = vlan_put_tag(skb, vlan_id);
88 if (!skb) {
89 printk(KERN_ERR DRV_NAME ": failed to insert VLAN tag\n");
90 return;
91 }
92 }
93
94 ndisc_send_skb(skb, slave_dev, NULL, &mcaddr, daddr, &icmp6h);
95}
96
97/*
98 * Kick out an unsolicited Neighbor Advertisement for an IPv6 address on
99 * the bonding master. This will help the switch learn our address
100 * if in active-backup mode.
101 *
102 * Caller must hold curr_slave_lock for read or better
103 */
104void bond_send_unsolicited_na(struct bonding *bond)
105{
106 struct slave *slave = bond->curr_active_slave;
107 struct vlan_entry *vlan;
108 struct inet6_dev *idev;
109 int is_router;
110
111 pr_debug("bond_send_unsol_na: bond %s slave %s\n", bond->dev->name,
112 slave ? slave->dev->name : "NULL");
113
114 if (!slave || !bond->send_unsol_na ||
115 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
116 return;
117
118 bond->send_unsol_na--;
119
120 idev = in6_dev_get(bond->dev);
121 if (!idev)
122 return;
123
124 is_router = !!idev->cnf.forwarding;
125
126 in6_dev_put(idev);
127
128 if (!ipv6_addr_any(&bond->master_ipv6))
129 bond_na_send(slave->dev, &bond->master_ipv6, is_router, 0);
130
131 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
132 if (!ipv6_addr_any(&vlan->vlan_ipv6)) {
133 bond_na_send(slave->dev, &vlan->vlan_ipv6, is_router,
134 vlan->vlan_id);
135 }
136 }
137}
138
139/*
140 * bond_inet6addr_event: handle inet6addr notifier chain events.
141 *
142 * We keep track of device IPv6 addresses primarily to use as source
143 * addresses in NS probes.
144 *
145 * We track one IPv6 for the main device (if it has one).
146 */
147static int bond_inet6addr_event(struct notifier_block *this,
148 unsigned long event,
149 void *ptr)
150{
151 struct inet6_ifaddr *ifa = ptr;
152 struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
153 struct bonding *bond;
154 struct vlan_entry *vlan;
155
156 if (dev_net(event_dev) != &init_net)
157 return NOTIFY_DONE;
158
159 list_for_each_entry(bond, &bond_dev_list, bond_list) {
160 if (bond->dev == event_dev) {
161 switch (event) {
162 case NETDEV_UP:
163 if (ipv6_addr_any(&bond->master_ipv6))
164 ipv6_addr_copy(&bond->master_ipv6,
165 &ifa->addr);
166 return NOTIFY_OK;
167 case NETDEV_DOWN:
168 if (ipv6_addr_equal(&bond->master_ipv6,
169 &ifa->addr))
170 bond_glean_dev_ipv6(bond->dev,
171 &bond->master_ipv6);
172 return NOTIFY_OK;
173 default:
174 return NOTIFY_DONE;
175 }
176 }
177
178 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
179 vlan_dev = vlan_group_get_device(bond->vlgrp,
180 vlan->vlan_id);
181 if (vlan_dev == event_dev) {
182 switch (event) {
183 case NETDEV_UP:
184 if (ipv6_addr_any(&vlan->vlan_ipv6))
185 ipv6_addr_copy(&vlan->vlan_ipv6,
186 &ifa->addr);
187 return NOTIFY_OK;
188 case NETDEV_DOWN:
189 if (ipv6_addr_equal(&vlan->vlan_ipv6,
190 &ifa->addr))
191 bond_glean_dev_ipv6(vlan_dev,
192 &vlan->vlan_ipv6);
193 return NOTIFY_OK;
194 default:
195 return NOTIFY_DONE;
196 }
197 }
198 }
199 }
200 return NOTIFY_DONE;
201}
202
203static struct notifier_block bond_inet6addr_notifier = {
204 .notifier_call = bond_inet6addr_event,
205};
206
207void bond_register_ipv6_notifier(void)
208{
209 register_inet6addr_notifier(&bond_inet6addr_notifier);
210}
211
212void bond_unregister_ipv6_notifier(void)
213{
214 unregister_inet6addr_notifier(&bond_inet6addr_notifier);
215}
216
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a3efba59eee9..460c2cad2755 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -31,8 +31,6 @@
31 * 31 *
32 */ 32 */
33 33
34//#define BONDING_DEBUG 1
35
36#include <linux/kernel.h> 34#include <linux/kernel.h>
37#include <linux/module.h> 35#include <linux/module.h>
38#include <linux/types.h> 36#include <linux/types.h>
@@ -89,6 +87,7 @@
89 87
90static int max_bonds = BOND_DEFAULT_MAX_BONDS; 88static int max_bonds = BOND_DEFAULT_MAX_BONDS;
91static int num_grat_arp = 1; 89static int num_grat_arp = 1;
90static int num_unsol_na = 1;
92static int miimon = BOND_LINK_MON_INTERV; 91static int miimon = BOND_LINK_MON_INTERV;
93static int updelay = 0; 92static int updelay = 0;
94static int downdelay = 0; 93static int downdelay = 0;
@@ -96,6 +95,7 @@ static int use_carrier = 1;
96static char *mode = NULL; 95static char *mode = NULL;
97static char *primary = NULL; 96static char *primary = NULL;
98static char *lacp_rate = NULL; 97static char *lacp_rate = NULL;
98static char *ad_select = NULL;
99static char *xmit_hash_policy = NULL; 99static char *xmit_hash_policy = NULL;
100static int arp_interval = BOND_LINK_ARP_INTERV; 100static int arp_interval = BOND_LINK_ARP_INTERV;
101static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, }; 101static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, };
@@ -107,6 +107,8 @@ module_param(max_bonds, int, 0);
107MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); 107MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
108module_param(num_grat_arp, int, 0644); 108module_param(num_grat_arp, int, 0644);
109MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event"); 109MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event");
110module_param(num_unsol_na, int, 0644);
111MODULE_PARM_DESC(num_unsol_na, "Number of unsolicited IPv6 Neighbor Advertisements packets to send on failover event");
110module_param(miimon, int, 0); 112module_param(miimon, int, 0);
111MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); 113MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
112module_param(updelay, int, 0); 114module_param(updelay, int, 0);
@@ -127,6 +129,8 @@ MODULE_PARM_DESC(primary, "Primary network device to use");
127module_param(lacp_rate, charp, 0); 129module_param(lacp_rate, charp, 0);
128MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner " 130MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner "
129 "(slow/fast)"); 131 "(slow/fast)");
132module_param(ad_select, charp, 0);
133MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic: stable (0, default), bandwidth (1), count (2)");
130module_param(xmit_hash_policy, charp, 0); 134module_param(xmit_hash_policy, charp, 0);
131MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)" 135MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)"
132 ", 1 for layer 3+4"); 136 ", 1 for layer 3+4");
@@ -150,7 +154,6 @@ LIST_HEAD(bond_dev_list);
150static struct proc_dir_entry *bond_proc_dir = NULL; 154static struct proc_dir_entry *bond_proc_dir = NULL;
151#endif 155#endif
152 156
153extern struct rw_semaphore bonding_rwsem;
154static __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ; 157static __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0, } ;
155static int arp_ip_count = 0; 158static int arp_ip_count = 0;
156static int bond_mode = BOND_MODE_ROUNDROBIN; 159static int bond_mode = BOND_MODE_ROUNDROBIN;
@@ -158,13 +161,13 @@ static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2;
158static int lacp_fast = 0; 161static int lacp_fast = 0;
159 162
160 163
161struct bond_parm_tbl bond_lacp_tbl[] = { 164const struct bond_parm_tbl bond_lacp_tbl[] = {
162{ "slow", AD_LACP_SLOW}, 165{ "slow", AD_LACP_SLOW},
163{ "fast", AD_LACP_FAST}, 166{ "fast", AD_LACP_FAST},
164{ NULL, -1}, 167{ NULL, -1},
165}; 168};
166 169
167struct bond_parm_tbl bond_mode_tbl[] = { 170const struct bond_parm_tbl bond_mode_tbl[] = {
168{ "balance-rr", BOND_MODE_ROUNDROBIN}, 171{ "balance-rr", BOND_MODE_ROUNDROBIN},
169{ "active-backup", BOND_MODE_ACTIVEBACKUP}, 172{ "active-backup", BOND_MODE_ACTIVEBACKUP},
170{ "balance-xor", BOND_MODE_XOR}, 173{ "balance-xor", BOND_MODE_XOR},
@@ -175,14 +178,14 @@ struct bond_parm_tbl bond_mode_tbl[] = {
175{ NULL, -1}, 178{ NULL, -1},
176}; 179};
177 180
178struct bond_parm_tbl xmit_hashtype_tbl[] = { 181const struct bond_parm_tbl xmit_hashtype_tbl[] = {
179{ "layer2", BOND_XMIT_POLICY_LAYER2}, 182{ "layer2", BOND_XMIT_POLICY_LAYER2},
180{ "layer3+4", BOND_XMIT_POLICY_LAYER34}, 183{ "layer3+4", BOND_XMIT_POLICY_LAYER34},
181{ "layer2+3", BOND_XMIT_POLICY_LAYER23}, 184{ "layer2+3", BOND_XMIT_POLICY_LAYER23},
182{ NULL, -1}, 185{ NULL, -1},
183}; 186};
184 187
185struct bond_parm_tbl arp_validate_tbl[] = { 188const struct bond_parm_tbl arp_validate_tbl[] = {
186{ "none", BOND_ARP_VALIDATE_NONE}, 189{ "none", BOND_ARP_VALIDATE_NONE},
187{ "active", BOND_ARP_VALIDATE_ACTIVE}, 190{ "active", BOND_ARP_VALIDATE_ACTIVE},
188{ "backup", BOND_ARP_VALIDATE_BACKUP}, 191{ "backup", BOND_ARP_VALIDATE_BACKUP},
@@ -190,13 +193,20 @@ struct bond_parm_tbl arp_validate_tbl[] = {
190{ NULL, -1}, 193{ NULL, -1},
191}; 194};
192 195
193struct bond_parm_tbl fail_over_mac_tbl[] = { 196const struct bond_parm_tbl fail_over_mac_tbl[] = {
194{ "none", BOND_FOM_NONE}, 197{ "none", BOND_FOM_NONE},
195{ "active", BOND_FOM_ACTIVE}, 198{ "active", BOND_FOM_ACTIVE},
196{ "follow", BOND_FOM_FOLLOW}, 199{ "follow", BOND_FOM_FOLLOW},
197{ NULL, -1}, 200{ NULL, -1},
198}; 201};
199 202
203struct bond_parm_tbl ad_select_tbl[] = {
204{ "stable", BOND_AD_STABLE},
205{ "bandwidth", BOND_AD_BANDWIDTH},
206{ "count", BOND_AD_COUNT},
207{ NULL, -1},
208};
209
200/*-------------------------- Forward declarations ---------------------------*/ 210/*-------------------------- Forward declarations ---------------------------*/
201 211
202static void bond_send_gratuitous_arp(struct bonding *bond); 212static void bond_send_gratuitous_arp(struct bonding *bond);
@@ -206,24 +216,20 @@ static void bond_deinit(struct net_device *bond_dev);
206 216
207static const char *bond_mode_name(int mode) 217static const char *bond_mode_name(int mode)
208{ 218{
209 switch (mode) { 219 static const char *names[] = {
210 case BOND_MODE_ROUNDROBIN : 220 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
211 return "load balancing (round-robin)"; 221 [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
212 case BOND_MODE_ACTIVEBACKUP : 222 [BOND_MODE_XOR] = "load balancing (xor)",
213 return "fault-tolerance (active-backup)"; 223 [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
214 case BOND_MODE_XOR : 224 [BOND_MODE_8023AD]= "IEEE 802.3ad Dynamic link aggregation",
215 return "load balancing (xor)"; 225 [BOND_MODE_TLB] = "transmit load balancing",
216 case BOND_MODE_BROADCAST : 226 [BOND_MODE_ALB] = "adaptive load balancing",
217 return "fault-tolerance (broadcast)"; 227 };
218 case BOND_MODE_8023AD: 228
219 return "IEEE 802.3ad Dynamic link aggregation"; 229 if (mode < 0 || mode > BOND_MODE_ALB)
220 case BOND_MODE_TLB:
221 return "transmit load balancing";
222 case BOND_MODE_ALB:
223 return "adaptive load balancing";
224 default:
225 return "unknown"; 230 return "unknown";
226 } 231
232 return names[mode];
227} 233}
228 234
229/*---------------------------------- VLAN -----------------------------------*/ 235/*---------------------------------- VLAN -----------------------------------*/
@@ -239,17 +245,16 @@ static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
239{ 245{
240 struct vlan_entry *vlan; 246 struct vlan_entry *vlan;
241 247
242 dprintk("bond: %s, vlan id %d\n", 248 pr_debug("bond: %s, vlan id %d\n",
243 (bond ? bond->dev->name: "None"), vlan_id); 249 (bond ? bond->dev->name: "None"), vlan_id);
244 250
245 vlan = kmalloc(sizeof(struct vlan_entry), GFP_KERNEL); 251 vlan = kzalloc(sizeof(struct vlan_entry), GFP_KERNEL);
246 if (!vlan) { 252 if (!vlan) {
247 return -ENOMEM; 253 return -ENOMEM;
248 } 254 }
249 255
250 INIT_LIST_HEAD(&vlan->vlan_list); 256 INIT_LIST_HEAD(&vlan->vlan_list);
251 vlan->vlan_id = vlan_id; 257 vlan->vlan_id = vlan_id;
252 vlan->vlan_ip = 0;
253 258
254 write_lock_bh(&bond->lock); 259 write_lock_bh(&bond->lock);
255 260
@@ -257,7 +262,7 @@ static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
257 262
258 write_unlock_bh(&bond->lock); 263 write_unlock_bh(&bond->lock);
259 264
260 dprintk("added VLAN ID %d on bond %s\n", vlan_id, bond->dev->name); 265 pr_debug("added VLAN ID %d on bond %s\n", vlan_id, bond->dev->name);
261 266
262 return 0; 267 return 0;
263} 268}
@@ -274,7 +279,7 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
274 struct vlan_entry *vlan; 279 struct vlan_entry *vlan;
275 int res = -ENODEV; 280 int res = -ENODEV;
276 281
277 dprintk("bond: %s, vlan id %d\n", bond->dev->name, vlan_id); 282 pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id);
278 283
279 write_lock_bh(&bond->lock); 284 write_lock_bh(&bond->lock);
280 285
@@ -282,12 +287,10 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
282 if (vlan->vlan_id == vlan_id) { 287 if (vlan->vlan_id == vlan_id) {
283 list_del(&vlan->vlan_list); 288 list_del(&vlan->vlan_list);
284 289
285 if ((bond->params.mode == BOND_MODE_TLB) || 290 if (bond_is_lb(bond))
286 (bond->params.mode == BOND_MODE_ALB)) {
287 bond_alb_clear_vlan(bond, vlan_id); 291 bond_alb_clear_vlan(bond, vlan_id);
288 }
289 292
290 dprintk("removed VLAN ID %d from bond %s\n", vlan_id, 293 pr_debug("removed VLAN ID %d from bond %s\n", vlan_id,
291 bond->dev->name); 294 bond->dev->name);
292 295
293 kfree(vlan); 296 kfree(vlan);
@@ -307,7 +310,7 @@ static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
307 } 310 }
308 } 311 }
309 312
310 dprintk("couldn't find VLAN ID %d in bond %s\n", vlan_id, 313 pr_debug("couldn't find VLAN ID %d in bond %s\n", vlan_id,
311 bond->dev->name); 314 bond->dev->name);
312 315
313out: 316out:
@@ -331,13 +334,13 @@ static int bond_has_challenged_slaves(struct bonding *bond)
331 334
332 bond_for_each_slave(bond, slave, i) { 335 bond_for_each_slave(bond, slave, i) {
333 if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) { 336 if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) {
334 dprintk("found VLAN challenged slave - %s\n", 337 pr_debug("found VLAN challenged slave - %s\n",
335 slave->dev->name); 338 slave->dev->name);
336 return 1; 339 return 1;
337 } 340 }
338 } 341 }
339 342
340 dprintk("no VLAN challenged slaves found\n"); 343 pr_debug("no VLAN challenged slaves found\n");
341 return 0; 344 return 0;
342} 345}
343 346
@@ -442,7 +445,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_de
442 */ 445 */
443static void bond_vlan_rx_register(struct net_device *bond_dev, struct vlan_group *grp) 446static void bond_vlan_rx_register(struct net_device *bond_dev, struct vlan_group *grp)
444{ 447{
445 struct bonding *bond = bond_dev->priv; 448 struct bonding *bond = netdev_priv(bond_dev);
446 struct slave *slave; 449 struct slave *slave;
447 int i; 450 int i;
448 451
@@ -450,10 +453,11 @@ static void bond_vlan_rx_register(struct net_device *bond_dev, struct vlan_group
450 453
451 bond_for_each_slave(bond, slave, i) { 454 bond_for_each_slave(bond, slave, i) {
452 struct net_device *slave_dev = slave->dev; 455 struct net_device *slave_dev = slave->dev;
456 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
453 457
454 if ((slave_dev->features & NETIF_F_HW_VLAN_RX) && 458 if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
455 slave_dev->vlan_rx_register) { 459 slave_ops->ndo_vlan_rx_register) {
456 slave_dev->vlan_rx_register(slave_dev, grp); 460 slave_ops->ndo_vlan_rx_register(slave_dev, grp);
457 } 461 }
458 } 462 }
459} 463}
@@ -465,16 +469,17 @@ static void bond_vlan_rx_register(struct net_device *bond_dev, struct vlan_group
465 */ 469 */
466static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid) 470static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
467{ 471{
468 struct bonding *bond = bond_dev->priv; 472 struct bonding *bond = netdev_priv(bond_dev);
469 struct slave *slave; 473 struct slave *slave;
470 int i, res; 474 int i, res;
471 475
472 bond_for_each_slave(bond, slave, i) { 476 bond_for_each_slave(bond, slave, i) {
473 struct net_device *slave_dev = slave->dev; 477 struct net_device *slave_dev = slave->dev;
478 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
474 479
475 if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) && 480 if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
476 slave_dev->vlan_rx_add_vid) { 481 slave_ops->ndo_vlan_rx_add_vid) {
477 slave_dev->vlan_rx_add_vid(slave_dev, vid); 482 slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid);
478 } 483 }
479 } 484 }
480 485
@@ -493,21 +498,22 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
493 */ 498 */
494static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid) 499static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
495{ 500{
496 struct bonding *bond = bond_dev->priv; 501 struct bonding *bond = netdev_priv(bond_dev);
497 struct slave *slave; 502 struct slave *slave;
498 struct net_device *vlan_dev; 503 struct net_device *vlan_dev;
499 int i, res; 504 int i, res;
500 505
501 bond_for_each_slave(bond, slave, i) { 506 bond_for_each_slave(bond, slave, i) {
502 struct net_device *slave_dev = slave->dev; 507 struct net_device *slave_dev = slave->dev;
508 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
503 509
504 if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) && 510 if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
505 slave_dev->vlan_rx_kill_vid) { 511 slave_ops->ndo_vlan_rx_kill_vid) {
506 /* Save and then restore vlan_dev in the grp array, 512 /* Save and then restore vlan_dev in the grp array,
507 * since the slave's driver might clear it. 513 * since the slave's driver might clear it.
508 */ 514 */
509 vlan_dev = vlan_group_get_device(bond->vlgrp, vid); 515 vlan_dev = vlan_group_get_device(bond->vlgrp, vid);
510 slave_dev->vlan_rx_kill_vid(slave_dev, vid); 516 slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
511 vlan_group_set_device(bond->vlgrp, vid, vlan_dev); 517 vlan_group_set_device(bond->vlgrp, vid, vlan_dev);
512 } 518 }
513 } 519 }
@@ -523,26 +529,23 @@ static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
523static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev) 529static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
524{ 530{
525 struct vlan_entry *vlan; 531 struct vlan_entry *vlan;
532 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
526 533
527 write_lock_bh(&bond->lock); 534 write_lock_bh(&bond->lock);
528 535
529 if (list_empty(&bond->vlan_list)) { 536 if (list_empty(&bond->vlan_list))
530 goto out; 537 goto out;
531 }
532 538
533 if ((slave_dev->features & NETIF_F_HW_VLAN_RX) && 539 if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
534 slave_dev->vlan_rx_register) { 540 slave_ops->ndo_vlan_rx_register)
535 slave_dev->vlan_rx_register(slave_dev, bond->vlgrp); 541 slave_ops->ndo_vlan_rx_register(slave_dev, bond->vlgrp);
536 }
537 542
538 if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || 543 if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
539 !(slave_dev->vlan_rx_add_vid)) { 544 !(slave_ops->ndo_vlan_rx_add_vid))
540 goto out; 545 goto out;
541 }
542 546
543 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 547 list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
544 slave_dev->vlan_rx_add_vid(slave_dev, vlan->vlan_id); 548 slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
545 }
546 549
547out: 550out:
548 write_unlock_bh(&bond->lock); 551 write_unlock_bh(&bond->lock);
@@ -550,34 +553,32 @@ out:
550 553
551static void bond_del_vlans_from_slave(struct bonding *bond, struct net_device *slave_dev) 554static void bond_del_vlans_from_slave(struct bonding *bond, struct net_device *slave_dev)
552{ 555{
556 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
553 struct vlan_entry *vlan; 557 struct vlan_entry *vlan;
554 struct net_device *vlan_dev; 558 struct net_device *vlan_dev;
555 559
556 write_lock_bh(&bond->lock); 560 write_lock_bh(&bond->lock);
557 561
558 if (list_empty(&bond->vlan_list)) { 562 if (list_empty(&bond->vlan_list))
559 goto out; 563 goto out;
560 }
561 564
562 if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) || 565 if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
563 !(slave_dev->vlan_rx_kill_vid)) { 566 !(slave_ops->ndo_vlan_rx_kill_vid))
564 goto unreg; 567 goto unreg;
565 }
566 568
567 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { 569 list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
568 /* Save and then restore vlan_dev in the grp array, 570 /* Save and then restore vlan_dev in the grp array,
569 * since the slave's driver might clear it. 571 * since the slave's driver might clear it.
570 */ 572 */
571 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 573 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
572 slave_dev->vlan_rx_kill_vid(slave_dev, vlan->vlan_id); 574 slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
573 vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev); 575 vlan_group_set_device(bond->vlgrp, vlan->vlan_id, vlan_dev);
574 } 576 }
575 577
576unreg: 578unreg:
577 if ((slave_dev->features & NETIF_F_HW_VLAN_RX) && 579 if ((slave_dev->features & NETIF_F_HW_VLAN_RX) &&
578 slave_dev->vlan_rx_register) { 580 slave_ops->ndo_vlan_rx_register)
579 slave_dev->vlan_rx_register(slave_dev, NULL); 581 slave_ops->ndo_vlan_rx_register(slave_dev, NULL);
580 }
581 582
582out: 583out:
583 write_unlock_bh(&bond->lock); 584 write_unlock_bh(&bond->lock);
@@ -686,15 +687,15 @@ static int bond_update_speed_duplex(struct slave *slave)
686 */ 687 */
687static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_dev, int reporting) 688static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_dev, int reporting)
688{ 689{
690 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
689 static int (* ioctl)(struct net_device *, struct ifreq *, int); 691 static int (* ioctl)(struct net_device *, struct ifreq *, int);
690 struct ifreq ifr; 692 struct ifreq ifr;
691 struct mii_ioctl_data *mii; 693 struct mii_ioctl_data *mii;
692 694
693 if (bond->params.use_carrier) { 695 if (bond->params.use_carrier)
694 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; 696 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
695 }
696 697
697 ioctl = slave_dev->do_ioctl; 698 ioctl = slave_ops->ndo_do_ioctl;
698 if (ioctl) { 699 if (ioctl) {
699 /* TODO: set pointer to correct ioctl on a per team member */ 700 /* TODO: set pointer to correct ioctl on a per team member */
700 /* bases to make this more efficient. that is, once */ 701 /* bases to make this more efficient. that is, once */
@@ -927,7 +928,7 @@ static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
927 */ 928 */
928static void bond_mc_list_flush(struct net_device *bond_dev, struct net_device *slave_dev) 929static void bond_mc_list_flush(struct net_device *bond_dev, struct net_device *slave_dev)
929{ 930{
930 struct bonding *bond = bond_dev->priv; 931 struct bonding *bond = netdev_priv(bond_dev);
931 struct dev_mc_list *dmi; 932 struct dev_mc_list *dmi;
932 933
933 for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) { 934 for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
@@ -1164,10 +1165,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1164 bond_3ad_handle_link_change(new_active, BOND_LINK_UP); 1165 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
1165 } 1166 }
1166 1167
1167 if ((bond->params.mode == BOND_MODE_TLB) || 1168 if (bond_is_lb(bond))
1168 (bond->params.mode == BOND_MODE_ALB)) {
1169 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); 1169 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
1170 }
1171 } else { 1170 } else {
1172 if (USES_PRIMARY(bond->params.mode)) { 1171 if (USES_PRIMARY(bond->params.mode)) {
1173 printk(KERN_INFO DRV_NAME 1172 printk(KERN_INFO DRV_NAME
@@ -1182,8 +1181,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1182 bond_mc_swap(bond, new_active, old_active); 1181 bond_mc_swap(bond, new_active, old_active);
1183 } 1182 }
1184 1183
1185 if ((bond->params.mode == BOND_MODE_TLB) || 1184 if (bond_is_lb(bond)) {
1186 (bond->params.mode == BOND_MODE_ALB)) {
1187 bond_alb_handle_active_change(bond, new_active); 1185 bond_alb_handle_active_change(bond, new_active);
1188 if (old_active) 1186 if (old_active)
1189 bond_set_slave_inactive_flags(old_active); 1187 bond_set_slave_inactive_flags(old_active);
@@ -1208,6 +1206,9 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1208 bond->send_grat_arp = bond->params.num_grat_arp; 1206 bond->send_grat_arp = bond->params.num_grat_arp;
1209 bond_send_gratuitous_arp(bond); 1207 bond_send_gratuitous_arp(bond);
1210 1208
1209 bond->send_unsol_na = bond->params.num_unsol_na;
1210 bond_send_unsolicited_na(bond);
1211
1211 write_unlock_bh(&bond->curr_slave_lock); 1212 write_unlock_bh(&bond->curr_slave_lock);
1212 read_unlock(&bond->lock); 1213 read_unlock(&bond->lock);
1213 1214
@@ -1315,9 +1316,9 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
1315static int bond_sethwaddr(struct net_device *bond_dev, 1316static int bond_sethwaddr(struct net_device *bond_dev,
1316 struct net_device *slave_dev) 1317 struct net_device *slave_dev)
1317{ 1318{
1318 dprintk("bond_dev=%p\n", bond_dev); 1319 pr_debug("bond_dev=%p\n", bond_dev);
1319 dprintk("slave_dev=%p\n", slave_dev); 1320 pr_debug("slave_dev=%p\n", slave_dev);
1320 dprintk("slave_dev->addr_len=%d\n", slave_dev->addr_len); 1321 pr_debug("slave_dev->addr_len=%d\n", slave_dev->addr_len);
1321 memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len); 1322 memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
1322 return 0; 1323 return 0;
1323} 1324}
@@ -1364,14 +1365,12 @@ done:
1364 return 0; 1365 return 0;
1365} 1366}
1366 1367
1367
1368static void bond_setup_by_slave(struct net_device *bond_dev, 1368static void bond_setup_by_slave(struct net_device *bond_dev,
1369 struct net_device *slave_dev) 1369 struct net_device *slave_dev)
1370{ 1370{
1371 struct bonding *bond = bond_dev->priv; 1371 struct bonding *bond = netdev_priv(bond_dev);
1372 1372
1373 bond_dev->neigh_setup = slave_dev->neigh_setup; 1373 bond_dev->header_ops = slave_dev->header_ops;
1374 bond_dev->header_ops = slave_dev->header_ops;
1375 1374
1376 bond_dev->type = slave_dev->type; 1375 bond_dev->type = slave_dev->type;
1377 bond_dev->hard_header_len = slave_dev->hard_header_len; 1376 bond_dev->hard_header_len = slave_dev->hard_header_len;
@@ -1385,7 +1384,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
1385/* enslave device <slave> to bond device <master> */ 1384/* enslave device <slave> to bond device <master> */
1386int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1385int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1387{ 1386{
1388 struct bonding *bond = bond_dev->priv; 1387 struct bonding *bond = netdev_priv(bond_dev);
1388 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1389 struct slave *new_slave = NULL; 1389 struct slave *new_slave = NULL;
1390 struct dev_mc_list *dmi; 1390 struct dev_mc_list *dmi;
1391 struct sockaddr addr; 1391 struct sockaddr addr;
@@ -1394,7 +1394,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1394 int res = 0; 1394 int res = 0;
1395 1395
1396 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && 1396 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
1397 slave_dev->do_ioctl == NULL) { 1397 slave_ops->ndo_do_ioctl == NULL) {
1398 printk(KERN_WARNING DRV_NAME 1398 printk(KERN_WARNING DRV_NAME
1399 ": %s: Warning: no link monitoring support for %s\n", 1399 ": %s: Warning: no link monitoring support for %s\n",
1400 bond_dev->name, slave_dev->name); 1400 bond_dev->name, slave_dev->name);
@@ -1409,14 +1409,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1409 1409
1410 /* already enslaved */ 1410 /* already enslaved */
1411 if (slave_dev->flags & IFF_SLAVE) { 1411 if (slave_dev->flags & IFF_SLAVE) {
1412 dprintk("Error, Device was already enslaved\n"); 1412 pr_debug("Error, Device was already enslaved\n");
1413 return -EBUSY; 1413 return -EBUSY;
1414 } 1414 }
1415 1415
1416 /* vlan challenged mutual exclusion */ 1416 /* vlan challenged mutual exclusion */
1417 /* no need to lock since we're protected by rtnl_lock */ 1417 /* no need to lock since we're protected by rtnl_lock */
1418 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { 1418 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1419 dprintk("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); 1419 pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1420 if (!list_empty(&bond->vlan_list)) { 1420 if (!list_empty(&bond->vlan_list)) {
1421 printk(KERN_ERR DRV_NAME 1421 printk(KERN_ERR DRV_NAME
1422 ": %s: Error: cannot enslave VLAN " 1422 ": %s: Error: cannot enslave VLAN "
@@ -1434,7 +1434,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1434 bond_dev->features |= NETIF_F_VLAN_CHALLENGED; 1434 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
1435 } 1435 }
1436 } else { 1436 } else {
1437 dprintk("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); 1437 pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1438 if (bond->slave_cnt == 0) { 1438 if (bond->slave_cnt == 0) {
1439 /* First slave, and it is not VLAN challenged, 1439 /* First slave, and it is not VLAN challenged,
1440 * so remove the block of adding VLANs over the bond. 1440 * so remove the block of adding VLANs over the bond.
@@ -1476,7 +1476,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1476 goto err_undo_flags; 1476 goto err_undo_flags;
1477 } 1477 }
1478 1478
1479 if (slave_dev->set_mac_address == NULL) { 1479 if (slave_ops->ndo_set_mac_address == NULL) {
1480 if (bond->slave_cnt == 0) { 1480 if (bond->slave_cnt == 0) {
1481 printk(KERN_WARNING DRV_NAME 1481 printk(KERN_WARNING DRV_NAME
1482 ": %s: Warning: The first slave device " 1482 ": %s: Warning: The first slave device "
@@ -1522,28 +1522,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1522 addr.sa_family = slave_dev->type; 1522 addr.sa_family = slave_dev->type;
1523 res = dev_set_mac_address(slave_dev, &addr); 1523 res = dev_set_mac_address(slave_dev, &addr);
1524 if (res) { 1524 if (res) {
1525 dprintk("Error %d calling set_mac_address\n", res); 1525 pr_debug("Error %d calling set_mac_address\n", res);
1526 goto err_free; 1526 goto err_free;
1527 } 1527 }
1528 } 1528 }
1529 1529
1530 res = netdev_set_master(slave_dev, bond_dev); 1530 res = netdev_set_master(slave_dev, bond_dev);
1531 if (res) { 1531 if (res) {
1532 dprintk("Error %d calling netdev_set_master\n", res); 1532 pr_debug("Error %d calling netdev_set_master\n", res);
1533 goto err_restore_mac; 1533 goto err_restore_mac;
1534 } 1534 }
1535 /* open the slave since the application closed it */ 1535 /* open the slave since the application closed it */
1536 res = dev_open(slave_dev); 1536 res = dev_open(slave_dev);
1537 if (res) { 1537 if (res) {
1538 dprintk("Openning slave %s failed\n", slave_dev->name); 1538 pr_debug("Openning slave %s failed\n", slave_dev->name);
1539 goto err_unset_master; 1539 goto err_unset_master;
1540 } 1540 }
1541 1541
1542 new_slave->dev = slave_dev; 1542 new_slave->dev = slave_dev;
1543 slave_dev->priv_flags |= IFF_BONDING; 1543 slave_dev->priv_flags |= IFF_BONDING;
1544 1544
1545 if ((bond->params.mode == BOND_MODE_TLB) || 1545 if (bond_is_lb(bond)) {
1546 (bond->params.mode == BOND_MODE_ALB)) {
1547 /* bond_alb_init_slave() must be called before all other stages since 1546 /* bond_alb_init_slave() must be called before all other stages since
1548 * it might fail and we do not want to have to undo everything 1547 * it might fail and we do not want to have to undo everything
1549 */ 1548 */
@@ -1641,18 +1640,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1641 if (!bond->params.miimon || 1640 if (!bond->params.miimon ||
1642 (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) { 1641 (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) {
1643 if (bond->params.updelay) { 1642 if (bond->params.updelay) {
1644 dprintk("Initial state of slave_dev is " 1643 pr_debug("Initial state of slave_dev is "
1645 "BOND_LINK_BACK\n"); 1644 "BOND_LINK_BACK\n");
1646 new_slave->link = BOND_LINK_BACK; 1645 new_slave->link = BOND_LINK_BACK;
1647 new_slave->delay = bond->params.updelay; 1646 new_slave->delay = bond->params.updelay;
1648 } else { 1647 } else {
1649 dprintk("Initial state of slave_dev is " 1648 pr_debug("Initial state of slave_dev is "
1650 "BOND_LINK_UP\n"); 1649 "BOND_LINK_UP\n");
1651 new_slave->link = BOND_LINK_UP; 1650 new_slave->link = BOND_LINK_UP;
1652 } 1651 }
1653 new_slave->jiffies = jiffies; 1652 new_slave->jiffies = jiffies;
1654 } else { 1653 } else {
1655 dprintk("Initial state of slave_dev is " 1654 pr_debug("Initial state of slave_dev is "
1656 "BOND_LINK_DOWN\n"); 1655 "BOND_LINK_DOWN\n");
1657 new_slave->link = BOND_LINK_DOWN; 1656 new_slave->link = BOND_LINK_DOWN;
1658 } 1657 }
@@ -1713,7 +1712,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1713 bond_set_slave_inactive_flags(new_slave); 1712 bond_set_slave_inactive_flags(new_slave);
1714 break; 1713 break;
1715 default: 1714 default:
1716 dprintk("This slave is always active in trunk mode\n"); 1715 pr_debug("This slave is always active in trunk mode\n");
1717 1716
1718 /* always active in trunk mode */ 1717 /* always active in trunk mode */
1719 new_slave->state = BOND_STATE_ACTIVE; 1718 new_slave->state = BOND_STATE_ACTIVE;
@@ -1787,11 +1786,10 @@ err_undo_flags:
1787 */ 1786 */
1788int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) 1787int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1789{ 1788{
1790 struct bonding *bond = bond_dev->priv; 1789 struct bonding *bond = netdev_priv(bond_dev);
1791 struct slave *slave, *oldcurrent; 1790 struct slave *slave, *oldcurrent;
1792 struct sockaddr addr; 1791 struct sockaddr addr;
1793 int mac_addr_differ; 1792 int mac_addr_differ;
1794 DECLARE_MAC_BUF(mac);
1795 1793
1796 /* slave is not a slave or master is not master of this slave */ 1794 /* slave is not a slave or master is not master of this slave */
1797 if (!(slave_dev->flags & IFF_SLAVE) || 1795 if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -1820,11 +1818,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1820 if (!mac_addr_differ && (bond->slave_cnt > 1)) 1818 if (!mac_addr_differ && (bond->slave_cnt > 1))
1821 printk(KERN_WARNING DRV_NAME 1819 printk(KERN_WARNING DRV_NAME
1822 ": %s: Warning: the permanent HWaddr of %s - " 1820 ": %s: Warning: the permanent HWaddr of %s - "
1823 "%s - is still in use by %s. " 1821 "%pM - is still in use by %s. "
1824 "Set the HWaddr of %s to a different address " 1822 "Set the HWaddr of %s to a different address "
1825 "to avoid conflicts.\n", 1823 "to avoid conflicts.\n",
1826 bond_dev->name, slave_dev->name, 1824 bond_dev->name, slave_dev->name,
1827 print_mac(mac, slave->perm_hwaddr), 1825 slave->perm_hwaddr,
1828 bond_dev->name, slave_dev->name); 1826 bond_dev->name, slave_dev->name);
1829 } 1827 }
1830 1828
@@ -1860,8 +1858,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1860 bond_change_active_slave(bond, NULL); 1858 bond_change_active_slave(bond, NULL);
1861 } 1859 }
1862 1860
1863 if ((bond->params.mode == BOND_MODE_TLB) || 1861 if (bond_is_lb(bond)) {
1864 (bond->params.mode == BOND_MODE_ALB)) {
1865 /* Must be called only after the slave has been 1862 /* Must be called only after the slave has been
1866 * detached from the list and the curr_active_slave 1863 * detached from the list and the curr_active_slave
1867 * has been cleared (if our_slave == old_current), 1864 * has been cleared (if our_slave == old_current),
@@ -1981,7 +1978,7 @@ void bond_destroy(struct bonding *bond)
1981 1978
1982static void bond_destructor(struct net_device *bond_dev) 1979static void bond_destructor(struct net_device *bond_dev)
1983{ 1980{
1984 struct bonding *bond = bond_dev->priv; 1981 struct bonding *bond = netdev_priv(bond_dev);
1985 1982
1986 if (bond->wq) 1983 if (bond->wq)
1987 destroy_workqueue(bond->wq); 1984 destroy_workqueue(bond->wq);
@@ -1999,7 +1996,7 @@ static void bond_destructor(struct net_device *bond_dev)
1999*/ 1996*/
2000int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev) 1997int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev)
2001{ 1998{
2002 struct bonding *bond = bond_dev->priv; 1999 struct bonding *bond = netdev_priv(bond_dev);
2003 int ret; 2000 int ret;
2004 2001
2005 ret = bond_release(bond_dev, slave_dev); 2002 ret = bond_release(bond_dev, slave_dev);
@@ -2016,7 +2013,7 @@ int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *sl
2016 */ 2013 */
2017static int bond_release_all(struct net_device *bond_dev) 2014static int bond_release_all(struct net_device *bond_dev)
2018{ 2015{
2019 struct bonding *bond = bond_dev->priv; 2016 struct bonding *bond = netdev_priv(bond_dev);
2020 struct slave *slave; 2017 struct slave *slave;
2021 struct net_device *slave_dev; 2018 struct net_device *slave_dev;
2022 struct sockaddr addr; 2019 struct sockaddr addr;
@@ -2050,8 +2047,7 @@ static int bond_release_all(struct net_device *bond_dev)
2050 */ 2047 */
2051 write_unlock_bh(&bond->lock); 2048 write_unlock_bh(&bond->lock);
2052 2049
2053 if ((bond->params.mode == BOND_MODE_TLB) || 2050 if (bond_is_lb(bond)) {
2054 (bond->params.mode == BOND_MODE_ALB)) {
2055 /* must be called only after the slave 2051 /* must be called only after the slave
2056 * has been detached from the list 2052 * has been detached from the list
2057 */ 2053 */
@@ -2147,7 +2143,7 @@ out:
2147 */ 2143 */
2148static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev) 2144static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev)
2149{ 2145{
2150 struct bonding *bond = bond_dev->priv; 2146 struct bonding *bond = netdev_priv(bond_dev);
2151 struct slave *old_active = NULL; 2147 struct slave *old_active = NULL;
2152 struct slave *new_active = NULL; 2148 struct slave *new_active = NULL;
2153 int res = 0; 2149 int res = 0;
@@ -2196,7 +2192,7 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
2196 2192
2197static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) 2193static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2198{ 2194{
2199 struct bonding *bond = bond_dev->priv; 2195 struct bonding *bond = netdev_priv(bond_dev);
2200 2196
2201 info->bond_mode = bond->params.mode; 2197 info->bond_mode = bond->params.mode;
2202 info->miimon = bond->params.miimon; 2198 info->miimon = bond->params.miimon;
@@ -2210,7 +2206,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2210 2206
2211static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info) 2207static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
2212{ 2208{
2213 struct bonding *bond = bond_dev->priv; 2209 struct bonding *bond = netdev_priv(bond_dev);
2214 struct slave *slave; 2210 struct slave *slave;
2215 int i, found = 0; 2211 int i, found = 0;
2216 2212
@@ -2378,8 +2374,7 @@ static void bond_miimon_commit(struct bonding *bond)
2378 if (bond->params.mode == BOND_MODE_8023AD) 2374 if (bond->params.mode == BOND_MODE_8023AD)
2379 bond_3ad_handle_link_change(slave, BOND_LINK_UP); 2375 bond_3ad_handle_link_change(slave, BOND_LINK_UP);
2380 2376
2381 if ((bond->params.mode == BOND_MODE_TLB) || 2377 if (bond_is_lb(bond))
2382 (bond->params.mode == BOND_MODE_ALB))
2383 bond_alb_handle_link_change(bond, slave, 2378 bond_alb_handle_link_change(bond, slave,
2384 BOND_LINK_UP); 2379 BOND_LINK_UP);
2385 2380
@@ -2464,6 +2459,12 @@ void bond_mii_monitor(struct work_struct *work)
2464 read_unlock(&bond->curr_slave_lock); 2459 read_unlock(&bond->curr_slave_lock);
2465 } 2460 }
2466 2461
2462 if (bond->send_unsol_na) {
2463 read_lock(&bond->curr_slave_lock);
2464 bond_send_unsolicited_na(bond);
2465 read_unlock(&bond->curr_slave_lock);
2466 }
2467
2467 if (bond_miimon_inspect(bond)) { 2468 if (bond_miimon_inspect(bond)) {
2468 read_unlock(&bond->lock); 2469 read_unlock(&bond->lock);
2469 rtnl_lock(); 2470 rtnl_lock();
@@ -2532,7 +2533,7 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
2532{ 2533{
2533 struct sk_buff *skb; 2534 struct sk_buff *skb;
2534 2535
2535 dprintk("arp %d on slave %s: dst %x src %x vid %d\n", arp_op, 2536 pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op,
2536 slave_dev->name, dest_ip, src_ip, vlan_id); 2537 slave_dev->name, dest_ip, src_ip, vlan_id);
2537 2538
2538 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, 2539 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
@@ -2565,9 +2566,9 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2565 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 2566 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
2566 if (!targets[i]) 2567 if (!targets[i])
2567 continue; 2568 continue;
2568 dprintk("basa: target %x\n", targets[i]); 2569 pr_debug("basa: target %x\n", targets[i]);
2569 if (list_empty(&bond->vlan_list)) { 2570 if (list_empty(&bond->vlan_list)) {
2570 dprintk("basa: empty vlan: arp_send\n"); 2571 pr_debug("basa: empty vlan: arp_send\n");
2571 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2572 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2572 bond->master_ip, 0); 2573 bond->master_ip, 0);
2573 continue; 2574 continue;
@@ -2586,8 +2587,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2586 if (rv) { 2587 if (rv) {
2587 if (net_ratelimit()) { 2588 if (net_ratelimit()) {
2588 printk(KERN_WARNING DRV_NAME 2589 printk(KERN_WARNING DRV_NAME
2589 ": %s: no route to arp_ip_target %u.%u.%u.%u\n", 2590 ": %s: no route to arp_ip_target %pI4\n",
2590 bond->dev->name, NIPQUAD(fl.fl4_dst)); 2591 bond->dev->name, &fl.fl4_dst);
2591 } 2592 }
2592 continue; 2593 continue;
2593 } 2594 }
@@ -2597,7 +2598,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2597 */ 2598 */
2598 if (rt->u.dst.dev == bond->dev) { 2599 if (rt->u.dst.dev == bond->dev) {
2599 ip_rt_put(rt); 2600 ip_rt_put(rt);
2600 dprintk("basa: rtdev == bond->dev: arp_send\n"); 2601 pr_debug("basa: rtdev == bond->dev: arp_send\n");
2601 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 2602 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2602 bond->master_ip, 0); 2603 bond->master_ip, 0);
2603 continue; 2604 continue;
@@ -2608,7 +2609,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2608 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id); 2609 vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
2609 if (vlan_dev == rt->u.dst.dev) { 2610 if (vlan_dev == rt->u.dst.dev) {
2610 vlan_id = vlan->vlan_id; 2611 vlan_id = vlan->vlan_id;
2611 dprintk("basa: vlan match on %s %d\n", 2612 pr_debug("basa: vlan match on %s %d\n",
2612 vlan_dev->name, vlan_id); 2613 vlan_dev->name, vlan_id);
2613 break; 2614 break;
2614 } 2615 }
@@ -2623,8 +2624,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2623 2624
2624 if (net_ratelimit()) { 2625 if (net_ratelimit()) {
2625 printk(KERN_WARNING DRV_NAME 2626 printk(KERN_WARNING DRV_NAME
2626 ": %s: no path to arp_ip_target %u.%u.%u.%u via rt.dev %s\n", 2627 ": %s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2627 bond->dev->name, NIPQUAD(fl.fl4_dst), 2628 bond->dev->name, &fl.fl4_dst,
2628 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL"); 2629 rt->u.dst.dev ? rt->u.dst.dev->name : "NULL");
2629 } 2630 }
2630 ip_rt_put(rt); 2631 ip_rt_put(rt);
@@ -2643,7 +2644,7 @@ static void bond_send_gratuitous_arp(struct bonding *bond)
2643 struct vlan_entry *vlan; 2644 struct vlan_entry *vlan;
2644 struct net_device *vlan_dev; 2645 struct net_device *vlan_dev;
2645 2646
2646 dprintk("bond_send_grat_arp: bond %s slave %s\n", bond->dev->name, 2647 pr_debug("bond_send_grat_arp: bond %s slave %s\n", bond->dev->name,
2647 slave ? slave->dev->name : "NULL"); 2648 slave ? slave->dev->name : "NULL");
2648 2649
2649 if (!slave || !bond->send_grat_arp || 2650 if (!slave || !bond->send_grat_arp ||
@@ -2673,10 +2674,8 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
2673 2674
2674 targets = bond->params.arp_targets; 2675 targets = bond->params.arp_targets;
2675 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { 2676 for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) {
2676 dprintk("bva: sip %u.%u.%u.%u tip %u.%u.%u.%u t[%d] " 2677 pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n",
2677 "%u.%u.%u.%u bhti(tip) %d\n", 2678 &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip));
2678 NIPQUAD(sip), NIPQUAD(tip), i, NIPQUAD(targets[i]),
2679 bond_has_this_ip(bond, tip));
2680 if (sip == targets[i]) { 2679 if (sip == targets[i]) {
2681 if (bond_has_this_ip(bond, tip)) 2680 if (bond_has_this_ip(bond, tip))
2682 slave->last_arp_rx = jiffies; 2681 slave->last_arp_rx = jiffies;
@@ -2699,10 +2698,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2699 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) 2698 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
2700 goto out; 2699 goto out;
2701 2700
2702 bond = dev->priv; 2701 bond = netdev_priv(dev);
2703 read_lock(&bond->lock); 2702 read_lock(&bond->lock);
2704 2703
2705 dprintk("bond_arp_rcv: bond %s skb->dev %s orig_dev %s\n", 2704 pr_debug("bond_arp_rcv: bond %s skb->dev %s orig_dev %s\n",
2706 bond->dev->name, skb->dev ? skb->dev->name : "NULL", 2705 bond->dev->name, skb->dev ? skb->dev->name : "NULL",
2707 orig_dev ? orig_dev->name : "NULL"); 2706 orig_dev ? orig_dev->name : "NULL");
2708 2707
@@ -2728,10 +2727,10 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2728 arp_ptr += 4 + dev->addr_len; 2727 arp_ptr += 4 + dev->addr_len;
2729 memcpy(&tip, arp_ptr, 4); 2728 memcpy(&tip, arp_ptr, 4);
2730 2729
2731 dprintk("bond_arp_rcv: %s %s/%d av %d sv %d sip %u.%u.%u.%u" 2730 pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
2732 " tip %u.%u.%u.%u\n", bond->dev->name, slave->dev->name, 2731 bond->dev->name, slave->dev->name, slave->state,
2733 slave->state, bond->params.arp_validate, 2732 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
2734 slave_do_arp_validate(bond, slave), NIPQUAD(sip), NIPQUAD(tip)); 2733 &sip, &tip);
2735 2734
2736 /* 2735 /*
2737 * Backup slaves won't see the ARP reply, but do come through 2736 * Backup slaves won't see the ARP reply, but do come through
@@ -3161,6 +3160,12 @@ void bond_activebackup_arp_mon(struct work_struct *work)
3161 read_unlock(&bond->curr_slave_lock); 3160 read_unlock(&bond->curr_slave_lock);
3162 } 3161 }
3163 3162
3163 if (bond->send_unsol_na) {
3164 read_lock(&bond->curr_slave_lock);
3165 bond_send_unsolicited_na(bond);
3166 read_unlock(&bond->curr_slave_lock);
3167 }
3168
3164 if (bond_ab_arp_inspect(bond, delta_in_ticks)) { 3169 if (bond_ab_arp_inspect(bond, delta_in_ticks)) {
3165 read_unlock(&bond->lock); 3170 read_unlock(&bond->lock);
3166 rtnl_lock(); 3171 rtnl_lock();
@@ -3239,7 +3244,6 @@ static void bond_info_show_master(struct seq_file *seq)
3239 struct bonding *bond = seq->private; 3244 struct bonding *bond = seq->private;
3240 struct slave *curr; 3245 struct slave *curr;
3241 int i; 3246 int i;
3242 u32 target;
3243 3247
3244 read_lock(&bond->curr_slave_lock); 3248 read_lock(&bond->curr_slave_lock);
3245 curr = bond->curr_active_slave; 3249 curr = bond->curr_active_slave;
@@ -3293,8 +3297,7 @@ static void bond_info_show_master(struct seq_file *seq)
3293 continue; 3297 continue;
3294 if (printed) 3298 if (printed)
3295 seq_printf(seq, ","); 3299 seq_printf(seq, ",");
3296 target = ntohl(bond->params.arp_targets[i]); 3300 seq_printf(seq, " %pI4", &bond->params.arp_targets[i]);
3297 seq_printf(seq, " %d.%d.%d.%d", HIPQUAD(target));
3298 printed = 1; 3301 printed = 1;
3299 } 3302 }
3300 seq_printf(seq, "\n"); 3303 seq_printf(seq, "\n");
@@ -3302,11 +3305,12 @@ static void bond_info_show_master(struct seq_file *seq)
3302 3305
3303 if (bond->params.mode == BOND_MODE_8023AD) { 3306 if (bond->params.mode == BOND_MODE_8023AD) {
3304 struct ad_info ad_info; 3307 struct ad_info ad_info;
3305 DECLARE_MAC_BUF(mac);
3306 3308
3307 seq_puts(seq, "\n802.3ad info\n"); 3309 seq_puts(seq, "\n802.3ad info\n");
3308 seq_printf(seq, "LACP rate: %s\n", 3310 seq_printf(seq, "LACP rate: %s\n",
3309 (bond->params.lacp_fast) ? "fast" : "slow"); 3311 (bond->params.lacp_fast) ? "fast" : "slow");
3312 seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
3313 ad_select_tbl[bond->params.ad_select].modename);
3310 3314
3311 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 3315 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
3312 seq_printf(seq, "bond %s has no active aggregator\n", 3316 seq_printf(seq, "bond %s has no active aggregator\n",
@@ -3322,8 +3326,8 @@ static void bond_info_show_master(struct seq_file *seq)
3322 ad_info.actor_key); 3326 ad_info.actor_key);
3323 seq_printf(seq, "\tPartner Key: %d\n", 3327 seq_printf(seq, "\tPartner Key: %d\n",
3324 ad_info.partner_key); 3328 ad_info.partner_key);
3325 seq_printf(seq, "\tPartner Mac Address: %s\n", 3329 seq_printf(seq, "\tPartner Mac Address: %pM\n",
3326 print_mac(mac, ad_info.partner_system)); 3330 ad_info.partner_system);
3327 } 3331 }
3328 } 3332 }
3329} 3333}
@@ -3331,7 +3335,6 @@ static void bond_info_show_master(struct seq_file *seq)
3331static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave) 3335static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave)
3332{ 3336{
3333 struct bonding *bond = seq->private; 3337 struct bonding *bond = seq->private;
3334 DECLARE_MAC_BUF(mac);
3335 3338
3336 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); 3339 seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
3337 seq_printf(seq, "MII Status: %s\n", 3340 seq_printf(seq, "MII Status: %s\n",
@@ -3339,9 +3342,7 @@ static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave
3339 seq_printf(seq, "Link Failure Count: %u\n", 3342 seq_printf(seq, "Link Failure Count: %u\n",
3340 slave->link_failure_count); 3343 slave->link_failure_count);
3341 3344
3342 seq_printf(seq, 3345 seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr);
3343 "Permanent HW addr: %s\n",
3344 print_mac(mac, slave->perm_hwaddr));
3345 3346
3346 if (bond->params.mode == BOND_MODE_8023AD) { 3347 if (bond->params.mode == BOND_MODE_8023AD) {
3347 const struct aggregator *agg 3348 const struct aggregator *agg
@@ -3506,7 +3507,7 @@ static int bond_event_changename(struct bonding *bond)
3506 3507
3507static int bond_master_netdev_event(unsigned long event, struct net_device *bond_dev) 3508static int bond_master_netdev_event(unsigned long event, struct net_device *bond_dev)
3508{ 3509{
3509 struct bonding *event_bond = bond_dev->priv; 3510 struct bonding *event_bond = netdev_priv(bond_dev);
3510 3511
3511 switch (event) { 3512 switch (event) {
3512 case NETDEV_CHANGENAME: 3513 case NETDEV_CHANGENAME:
@@ -3524,7 +3525,7 @@ static int bond_master_netdev_event(unsigned long event, struct net_device *bond
3524static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev) 3525static int bond_slave_netdev_event(unsigned long event, struct net_device *slave_dev)
3525{ 3526{
3526 struct net_device *bond_dev = slave_dev->master; 3527 struct net_device *bond_dev = slave_dev->master;
3527 struct bonding *bond = bond_dev->priv; 3528 struct bonding *bond = netdev_priv(bond_dev);
3528 3529
3529 switch (event) { 3530 switch (event) {
3530 case NETDEV_UNREGISTER: 3531 case NETDEV_UNREGISTER:
@@ -3591,7 +3592,7 @@ static int bond_netdev_event(struct notifier_block *this, unsigned long event, v
3591 if (dev_net(event_dev) != &init_net) 3592 if (dev_net(event_dev) != &init_net)
3592 return NOTIFY_DONE; 3593 return NOTIFY_DONE;
3593 3594
3594 dprintk("event_dev: %s, event: %lx\n", 3595 pr_debug("event_dev: %s, event: %lx\n",
3595 (event_dev ? event_dev->name : "None"), 3596 (event_dev ? event_dev->name : "None"),
3596 event); 3597 event);
3597 3598
@@ -3599,12 +3600,12 @@ static int bond_netdev_event(struct notifier_block *this, unsigned long event, v
3599 return NOTIFY_DONE; 3600 return NOTIFY_DONE;
3600 3601
3601 if (event_dev->flags & IFF_MASTER) { 3602 if (event_dev->flags & IFF_MASTER) {
3602 dprintk("IFF_MASTER\n"); 3603 pr_debug("IFF_MASTER\n");
3603 return bond_master_netdev_event(event, event_dev); 3604 return bond_master_netdev_event(event, event_dev);
3604 } 3605 }
3605 3606
3606 if (event_dev->flags & IFF_SLAVE) { 3607 if (event_dev->flags & IFF_SLAVE) {
3607 dprintk("IFF_SLAVE\n"); 3608 pr_debug("IFF_SLAVE\n");
3608 return bond_slave_netdev_event(event, event_dev); 3609 return bond_slave_netdev_event(event, event_dev);
3609 } 3610 }
3610 3611
@@ -3775,12 +3776,11 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb,
3775 3776
3776static int bond_open(struct net_device *bond_dev) 3777static int bond_open(struct net_device *bond_dev)
3777{ 3778{
3778 struct bonding *bond = bond_dev->priv; 3779 struct bonding *bond = netdev_priv(bond_dev);
3779 3780
3780 bond->kill_timers = 0; 3781 bond->kill_timers = 0;
3781 3782
3782 if ((bond->params.mode == BOND_MODE_TLB) || 3783 if (bond_is_lb(bond)) {
3783 (bond->params.mode == BOND_MODE_ALB)) {
3784 /* bond_alb_initialize must be called before the timer 3784 /* bond_alb_initialize must be called before the timer
3785 * is started. 3785 * is started.
3786 */ 3786 */
@@ -3816,6 +3816,7 @@ static int bond_open(struct net_device *bond_dev)
3816 queue_delayed_work(bond->wq, &bond->ad_work, 0); 3816 queue_delayed_work(bond->wq, &bond->ad_work, 0);
3817 /* register to receive LACPDUs */ 3817 /* register to receive LACPDUs */
3818 bond_register_lacpdu(bond); 3818 bond_register_lacpdu(bond);
3819 bond_3ad_initiate_agg_selection(bond, 1);
3819 } 3820 }
3820 3821
3821 return 0; 3822 return 0;
@@ -3823,7 +3824,7 @@ static int bond_open(struct net_device *bond_dev)
3823 3824
3824static int bond_close(struct net_device *bond_dev) 3825static int bond_close(struct net_device *bond_dev)
3825{ 3826{
3826 struct bonding *bond = bond_dev->priv; 3827 struct bonding *bond = netdev_priv(bond_dev);
3827 3828
3828 if (bond->params.mode == BOND_MODE_8023AD) { 3829 if (bond->params.mode == BOND_MODE_8023AD) {
3829 /* Unregister the receive of LACPDUs */ 3830 /* Unregister the receive of LACPDUs */
@@ -3836,6 +3837,7 @@ static int bond_close(struct net_device *bond_dev)
3836 write_lock_bh(&bond->lock); 3837 write_lock_bh(&bond->lock);
3837 3838
3838 bond->send_grat_arp = 0; 3839 bond->send_grat_arp = 0;
3840 bond->send_unsol_na = 0;
3839 3841
3840 /* signal timers not to re-arm */ 3842 /* signal timers not to re-arm */
3841 bond->kill_timers = 1; 3843 bond->kill_timers = 1;
@@ -3863,8 +3865,7 @@ static int bond_close(struct net_device *bond_dev)
3863 } 3865 }
3864 3866
3865 3867
3866 if ((bond->params.mode == BOND_MODE_TLB) || 3868 if (bond_is_lb(bond)) {
3867 (bond->params.mode == BOND_MODE_ALB)) {
3868 /* Must be called only after all 3869 /* Must be called only after all
3869 * slaves have been released 3870 * slaves have been released
3870 */ 3871 */
@@ -3876,8 +3877,8 @@ static int bond_close(struct net_device *bond_dev)
3876 3877
3877static struct net_device_stats *bond_get_stats(struct net_device *bond_dev) 3878static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
3878{ 3879{
3879 struct bonding *bond = bond_dev->priv; 3880 struct bonding *bond = netdev_priv(bond_dev);
3880 struct net_device_stats *stats = &(bond->stats), *sstats; 3881 struct net_device_stats *stats = &bond->stats;
3881 struct net_device_stats local_stats; 3882 struct net_device_stats local_stats;
3882 struct slave *slave; 3883 struct slave *slave;
3883 int i; 3884 int i;
@@ -3887,7 +3888,8 @@ static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
3887 read_lock_bh(&bond->lock); 3888 read_lock_bh(&bond->lock);
3888 3889
3889 bond_for_each_slave(bond, slave, i) { 3890 bond_for_each_slave(bond, slave, i) {
3890 sstats = slave->dev->get_stats(slave->dev); 3891 const struct net_device_stats *sstats = dev_get_stats(slave->dev);
3892
3891 local_stats.rx_packets += sstats->rx_packets; 3893 local_stats.rx_packets += sstats->rx_packets;
3892 local_stats.rx_bytes += sstats->rx_bytes; 3894 local_stats.rx_bytes += sstats->rx_bytes;
3893 local_stats.rx_errors += sstats->rx_errors; 3895 local_stats.rx_errors += sstats->rx_errors;
@@ -3932,7 +3934,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3932 struct mii_ioctl_data *mii = NULL; 3934 struct mii_ioctl_data *mii = NULL;
3933 int res = 0; 3935 int res = 0;
3934 3936
3935 dprintk("bond_ioctl: master=%s, cmd=%d\n", 3937 pr_debug("bond_ioctl: master=%s, cmd=%d\n",
3936 bond_dev->name, cmd); 3938 bond_dev->name, cmd);
3937 3939
3938 switch (cmd) { 3940 switch (cmd) {
@@ -3954,7 +3956,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3954 } 3956 }
3955 3957
3956 if (mii->reg_num == 1) { 3958 if (mii->reg_num == 1) {
3957 struct bonding *bond = bond_dev->priv; 3959 struct bonding *bond = netdev_priv(bond_dev);
3958 mii->val_out = 0; 3960 mii->val_out = 0;
3959 read_lock(&bond->lock); 3961 read_lock(&bond->lock);
3960 read_lock(&bond->curr_slave_lock); 3962 read_lock(&bond->curr_slave_lock);
@@ -4010,12 +4012,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
4010 down_write(&(bonding_rwsem)); 4012 down_write(&(bonding_rwsem));
4011 slave_dev = dev_get_by_name(&init_net, ifr->ifr_slave); 4013 slave_dev = dev_get_by_name(&init_net, ifr->ifr_slave);
4012 4014
4013 dprintk("slave_dev=%p: \n", slave_dev); 4015 pr_debug("slave_dev=%p: \n", slave_dev);
4014 4016
4015 if (!slave_dev) { 4017 if (!slave_dev) {
4016 res = -ENODEV; 4018 res = -ENODEV;
4017 } else { 4019 } else {
4018 dprintk("slave_dev->name=%s: \n", slave_dev->name); 4020 pr_debug("slave_dev->name=%s: \n", slave_dev->name);
4019 switch (cmd) { 4021 switch (cmd) {
4020 case BOND_ENSLAVE_OLD: 4022 case BOND_ENSLAVE_OLD:
4021 case SIOCBONDENSLAVE: 4023 case SIOCBONDENSLAVE:
@@ -4046,7 +4048,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
4046 4048
4047static void bond_set_multicast_list(struct net_device *bond_dev) 4049static void bond_set_multicast_list(struct net_device *bond_dev)
4048{ 4050{
4049 struct bonding *bond = bond_dev->priv; 4051 struct bonding *bond = netdev_priv(bond_dev);
4050 struct dev_mc_list *dmi; 4052 struct dev_mc_list *dmi;
4051 4053
4052 /* 4054 /*
@@ -4102,17 +4104,31 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
4102 read_unlock(&bond->lock); 4104 read_unlock(&bond->lock);
4103} 4105}
4104 4106
4107static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms)
4108{
4109 struct bonding *bond = netdev_priv(dev);
4110 struct slave *slave = bond->first_slave;
4111
4112 if (slave) {
4113 const struct net_device_ops *slave_ops
4114 = slave->dev->netdev_ops;
4115 if (slave_ops->ndo_neigh_setup)
4116 return slave_ops->ndo_neigh_setup(dev, parms);
4117 }
4118 return 0;
4119}
4120
4105/* 4121/*
4106 * Change the MTU of all of a master's slaves to match the master 4122 * Change the MTU of all of a master's slaves to match the master
4107 */ 4123 */
4108static int bond_change_mtu(struct net_device *bond_dev, int new_mtu) 4124static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4109{ 4125{
4110 struct bonding *bond = bond_dev->priv; 4126 struct bonding *bond = netdev_priv(bond_dev);
4111 struct slave *slave, *stop_at; 4127 struct slave *slave, *stop_at;
4112 int res = 0; 4128 int res = 0;
4113 int i; 4129 int i;
4114 4130
4115 dprintk("bond=%p, name=%s, new_mtu=%d\n", bond, 4131 pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
4116 (bond_dev ? bond_dev->name : "None"), new_mtu); 4132 (bond_dev ? bond_dev->name : "None"), new_mtu);
4117 4133
4118 /* Can't hold bond->lock with bh disabled here since 4134 /* Can't hold bond->lock with bh disabled here since
@@ -4131,7 +4147,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4131 */ 4147 */
4132 4148
4133 bond_for_each_slave(bond, slave, i) { 4149 bond_for_each_slave(bond, slave, i) {
4134 dprintk("s %p s->p %p c_m %p\n", slave, 4150 pr_debug("s %p s->p %p c_m %p\n", slave,
4135 slave->prev, slave->dev->change_mtu); 4151 slave->prev, slave->dev->change_mtu);
4136 4152
4137 res = dev_set_mtu(slave->dev, new_mtu); 4153 res = dev_set_mtu(slave->dev, new_mtu);
@@ -4145,7 +4161,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4145 * means changing their mtu from timer context, which 4161 * means changing their mtu from timer context, which
4146 * is probably not a good idea. 4162 * is probably not a good idea.
4147 */ 4163 */
4148 dprintk("err %d %s\n", res, slave->dev->name); 4164 pr_debug("err %d %s\n", res, slave->dev->name);
4149 goto unwind; 4165 goto unwind;
4150 } 4166 }
4151 } 4167 }
@@ -4162,7 +4178,7 @@ unwind:
4162 4178
4163 tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu); 4179 tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
4164 if (tmp_res) { 4180 if (tmp_res) {
4165 dprintk("unwind err %d dev %s\n", tmp_res, 4181 pr_debug("unwind err %d dev %s\n", tmp_res,
4166 slave->dev->name); 4182 slave->dev->name);
4167 } 4183 }
4168 } 4184 }
@@ -4179,13 +4195,17 @@ unwind:
4179 */ 4195 */
4180static int bond_set_mac_address(struct net_device *bond_dev, void *addr) 4196static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4181{ 4197{
4182 struct bonding *bond = bond_dev->priv; 4198 struct bonding *bond = netdev_priv(bond_dev);
4183 struct sockaddr *sa = addr, tmp_sa; 4199 struct sockaddr *sa = addr, tmp_sa;
4184 struct slave *slave, *stop_at; 4200 struct slave *slave, *stop_at;
4185 int res = 0; 4201 int res = 0;
4186 int i; 4202 int i;
4187 4203
4188 dprintk("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None")); 4204 if (bond->params.mode == BOND_MODE_ALB)
4205 return bond_alb_set_mac_address(bond_dev, addr);
4206
4207
4208 pr_debug("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None"));
4189 4209
4190 /* 4210 /*
4191 * If fail_over_mac is set to active, do nothing and return 4211 * If fail_over_mac is set to active, do nothing and return
@@ -4214,11 +4234,12 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4214 */ 4234 */
4215 4235
4216 bond_for_each_slave(bond, slave, i) { 4236 bond_for_each_slave(bond, slave, i) {
4217 dprintk("slave %p %s\n", slave, slave->dev->name); 4237 const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
4238 pr_debug("slave %p %s\n", slave, slave->dev->name);
4218 4239
4219 if (slave->dev->set_mac_address == NULL) { 4240 if (slave_ops->ndo_set_mac_address == NULL) {
4220 res = -EOPNOTSUPP; 4241 res = -EOPNOTSUPP;
4221 dprintk("EOPNOTSUPP %s\n", slave->dev->name); 4242 pr_debug("EOPNOTSUPP %s\n", slave->dev->name);
4222 goto unwind; 4243 goto unwind;
4223 } 4244 }
4224 4245
@@ -4230,7 +4251,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4230 * breakage anyway until ARP finish 4251 * breakage anyway until ARP finish
4231 * updating, so... 4252 * updating, so...
4232 */ 4253 */
4233 dprintk("err %d %s\n", res, slave->dev->name); 4254 pr_debug("err %d %s\n", res, slave->dev->name);
4234 goto unwind; 4255 goto unwind;
4235 } 4256 }
4236 } 4257 }
@@ -4250,7 +4271,7 @@ unwind:
4250 4271
4251 tmp_res = dev_set_mac_address(slave->dev, &tmp_sa); 4272 tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
4252 if (tmp_res) { 4273 if (tmp_res) {
4253 dprintk("unwind err %d dev %s\n", tmp_res, 4274 pr_debug("unwind err %d dev %s\n", tmp_res,
4254 slave->dev->name); 4275 slave->dev->name);
4255 } 4276 }
4256 } 4277 }
@@ -4260,7 +4281,7 @@ unwind:
4260 4281
4261static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev) 4282static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
4262{ 4283{
4263 struct bonding *bond = bond_dev->priv; 4284 struct bonding *bond = netdev_priv(bond_dev);
4264 struct slave *slave, *start_at; 4285 struct slave *slave, *start_at;
4265 int i, slave_no, res = 1; 4286 int i, slave_no, res = 1;
4266 4287
@@ -4309,7 +4330,7 @@ out:
4309 */ 4330 */
4310static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev) 4331static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev)
4311{ 4332{
4312 struct bonding *bond = bond_dev->priv; 4333 struct bonding *bond = netdev_priv(bond_dev);
4313 int res = 1; 4334 int res = 1;
4314 4335
4315 read_lock(&bond->lock); 4336 read_lock(&bond->lock);
@@ -4341,7 +4362,7 @@ out:
4341 */ 4362 */
4342static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) 4363static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
4343{ 4364{
4344 struct bonding *bond = bond_dev->priv; 4365 struct bonding *bond = netdev_priv(bond_dev);
4345 struct slave *slave, *start_at; 4366 struct slave *slave, *start_at;
4346 int slave_no; 4367 int slave_no;
4347 int i; 4368 int i;
@@ -4387,7 +4408,7 @@ out:
4387 */ 4408 */
4388static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) 4409static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
4389{ 4410{
4390 struct bonding *bond = bond_dev->priv; 4411 struct bonding *bond = netdev_priv(bond_dev);
4391 struct slave *slave, *start_at; 4412 struct slave *slave, *start_at;
4392 struct net_device *tx_dev = NULL; 4413 struct net_device *tx_dev = NULL;
4393 int i; 4414 int i;
@@ -4463,6 +4484,35 @@ static void bond_set_xmit_hash_policy(struct bonding *bond)
4463 } 4484 }
4464} 4485}
4465 4486
4487static int bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4488{
4489 const struct bonding *bond = netdev_priv(dev);
4490
4491 switch (bond->params.mode) {
4492 case BOND_MODE_ROUNDROBIN:
4493 return bond_xmit_roundrobin(skb, dev);
4494 case BOND_MODE_ACTIVEBACKUP:
4495 return bond_xmit_activebackup(skb, dev);
4496 case BOND_MODE_XOR:
4497 return bond_xmit_xor(skb, dev);
4498 case BOND_MODE_BROADCAST:
4499 return bond_xmit_broadcast(skb, dev);
4500 case BOND_MODE_8023AD:
4501 return bond_3ad_xmit_xor(skb, dev);
4502 case BOND_MODE_ALB:
4503 case BOND_MODE_TLB:
4504 return bond_alb_xmit(skb, dev);
4505 default:
4506 /* Should never happen, mode already checked */
4507 printk(KERN_ERR DRV_NAME ": %s: Error: Unknown bonding mode %d\n",
4508 dev->name, bond->params.mode);
4509 WARN_ON_ONCE(1);
4510 dev_kfree_skb(skb);
4511 return NETDEV_TX_OK;
4512 }
4513}
4514
4515
4466/* 4516/*
4467 * set bond mode specific net device operations 4517 * set bond mode specific net device operations
4468 */ 4518 */
@@ -4472,29 +4522,22 @@ void bond_set_mode_ops(struct bonding *bond, int mode)
4472 4522
4473 switch (mode) { 4523 switch (mode) {
4474 case BOND_MODE_ROUNDROBIN: 4524 case BOND_MODE_ROUNDROBIN:
4475 bond_dev->hard_start_xmit = bond_xmit_roundrobin;
4476 break; 4525 break;
4477 case BOND_MODE_ACTIVEBACKUP: 4526 case BOND_MODE_ACTIVEBACKUP:
4478 bond_dev->hard_start_xmit = bond_xmit_activebackup;
4479 break; 4527 break;
4480 case BOND_MODE_XOR: 4528 case BOND_MODE_XOR:
4481 bond_dev->hard_start_xmit = bond_xmit_xor;
4482 bond_set_xmit_hash_policy(bond); 4529 bond_set_xmit_hash_policy(bond);
4483 break; 4530 break;
4484 case BOND_MODE_BROADCAST: 4531 case BOND_MODE_BROADCAST:
4485 bond_dev->hard_start_xmit = bond_xmit_broadcast;
4486 break; 4532 break;
4487 case BOND_MODE_8023AD: 4533 case BOND_MODE_8023AD:
4488 bond_set_master_3ad_flags(bond); 4534 bond_set_master_3ad_flags(bond);
4489 bond_dev->hard_start_xmit = bond_3ad_xmit_xor;
4490 bond_set_xmit_hash_policy(bond); 4535 bond_set_xmit_hash_policy(bond);
4491 break; 4536 break;
4492 case BOND_MODE_ALB: 4537 case BOND_MODE_ALB:
4493 bond_set_master_alb_flags(bond); 4538 bond_set_master_alb_flags(bond);
4494 /* FALLTHRU */ 4539 /* FALLTHRU */
4495 case BOND_MODE_TLB: 4540 case BOND_MODE_TLB:
4496 bond_dev->hard_start_xmit = bond_alb_xmit;
4497 bond_dev->set_mac_address = bond_alb_set_mac_address;
4498 break; 4541 break;
4499 default: 4542 default:
4500 /* Should never happen, mode already checked */ 4543 /* Should never happen, mode already checked */
@@ -4524,15 +4567,30 @@ static const struct ethtool_ops bond_ethtool_ops = {
4524 .get_flags = ethtool_op_get_flags, 4567 .get_flags = ethtool_op_get_flags,
4525}; 4568};
4526 4569
4570static const struct net_device_ops bond_netdev_ops = {
4571 .ndo_open = bond_open,
4572 .ndo_stop = bond_close,
4573 .ndo_start_xmit = bond_start_xmit,
4574 .ndo_get_stats = bond_get_stats,
4575 .ndo_do_ioctl = bond_do_ioctl,
4576 .ndo_set_multicast_list = bond_set_multicast_list,
4577 .ndo_change_mtu = bond_change_mtu,
4578 .ndo_set_mac_address = bond_set_mac_address,
4579 .ndo_neigh_setup = bond_neigh_setup,
4580 .ndo_vlan_rx_register = bond_vlan_rx_register,
4581 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
4582 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4583};
4584
4527/* 4585/*
4528 * Does not allocate but creates a /proc entry. 4586 * Does not allocate but creates a /proc entry.
4529 * Allowed to fail. 4587 * Allowed to fail.
4530 */ 4588 */
4531static int bond_init(struct net_device *bond_dev, struct bond_params *params) 4589static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4532{ 4590{
4533 struct bonding *bond = bond_dev->priv; 4591 struct bonding *bond = netdev_priv(bond_dev);
4534 4592
4535 dprintk("Begin bond_init for %s\n", bond_dev->name); 4593 pr_debug("Begin bond_init for %s\n", bond_dev->name);
4536 4594
4537 /* initialize rwlocks */ 4595 /* initialize rwlocks */
4538 rwlock_init(&bond->lock); 4596 rwlock_init(&bond->lock);
@@ -4551,20 +4609,13 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4551 bond->primary_slave = NULL; 4609 bond->primary_slave = NULL;
4552 bond->dev = bond_dev; 4610 bond->dev = bond_dev;
4553 bond->send_grat_arp = 0; 4611 bond->send_grat_arp = 0;
4612 bond->send_unsol_na = 0;
4554 bond->setup_by_slave = 0; 4613 bond->setup_by_slave = 0;
4555 INIT_LIST_HEAD(&bond->vlan_list); 4614 INIT_LIST_HEAD(&bond->vlan_list);
4556 4615
4557 /* Initialize the device entry points */ 4616 /* Initialize the device entry points */
4558 bond_dev->open = bond_open; 4617 bond_dev->netdev_ops = &bond_netdev_ops;
4559 bond_dev->stop = bond_close;
4560 bond_dev->get_stats = bond_get_stats;
4561 bond_dev->do_ioctl = bond_do_ioctl;
4562 bond_dev->ethtool_ops = &bond_ethtool_ops; 4618 bond_dev->ethtool_ops = &bond_ethtool_ops;
4563 bond_dev->set_multicast_list = bond_set_multicast_list;
4564 bond_dev->change_mtu = bond_change_mtu;
4565 bond_dev->set_mac_address = bond_set_mac_address;
4566 bond_dev->validate_addr = NULL;
4567
4568 bond_set_mode_ops(bond, bond->params.mode); 4619 bond_set_mode_ops(bond, bond->params.mode);
4569 4620
4570 bond_dev->destructor = bond_destructor; 4621 bond_dev->destructor = bond_destructor;
@@ -4573,6 +4624,8 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4573 bond_dev->tx_queue_len = 0; 4624 bond_dev->tx_queue_len = 0;
4574 bond_dev->flags |= IFF_MASTER|IFF_MULTICAST; 4625 bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
4575 bond_dev->priv_flags |= IFF_BONDING; 4626 bond_dev->priv_flags |= IFF_BONDING;
4627 if (bond->params.arp_interval)
4628 bond_dev->priv_flags |= IFF_MASTER_ARPMON;
4576 4629
4577 /* At first, we block adding VLANs. That's the only way to 4630 /* At first, we block adding VLANs. That's the only way to
4578 * prevent problems that occur when adding VLANs over an 4631 * prevent problems that occur when adding VLANs over an
@@ -4591,9 +4644,6 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
4591 * when there are slaves that are not hw accel 4644 * when there are slaves that are not hw accel
4592 * capable 4645 * capable
4593 */ 4646 */
4594 bond_dev->vlan_rx_register = bond_vlan_rx_register;
4595 bond_dev->vlan_rx_add_vid = bond_vlan_rx_add_vid;
4596 bond_dev->vlan_rx_kill_vid = bond_vlan_rx_kill_vid;
4597 bond_dev->features |= (NETIF_F_HW_VLAN_TX | 4647 bond_dev->features |= (NETIF_F_HW_VLAN_TX |
4598 NETIF_F_HW_VLAN_RX | 4648 NETIF_F_HW_VLAN_RX |
4599 NETIF_F_HW_VLAN_FILTER); 4649 NETIF_F_HW_VLAN_FILTER);
@@ -4632,7 +4682,7 @@ static void bond_work_cancel_all(struct bonding *bond)
4632 */ 4682 */
4633static void bond_deinit(struct net_device *bond_dev) 4683static void bond_deinit(struct net_device *bond_dev)
4634{ 4684{
4635 struct bonding *bond = bond_dev->priv; 4685 struct bonding *bond = netdev_priv(bond_dev);
4636 4686
4637 list_del(&bond->bond_list); 4687 list_del(&bond->bond_list);
4638 4688
@@ -4672,7 +4722,7 @@ static void bond_free_all(void)
4672 * some mode names are substrings of other names, and calls from sysfs 4722 * some mode names are substrings of other names, and calls from sysfs
4673 * may have whitespace in the name (trailing newlines, for example). 4723 * may have whitespace in the name (trailing newlines, for example).
4674 */ 4724 */
4675int bond_parse_parm(const char *buf, struct bond_parm_tbl *tbl) 4725int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
4676{ 4726{
4677 int mode = -1, i, rv; 4727 int mode = -1, i, rv;
4678 char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, }; 4728 char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
@@ -4751,6 +4801,23 @@ static int bond_check_params(struct bond_params *params)
4751 } 4801 }
4752 } 4802 }
4753 4803
4804 if (ad_select) {
4805 params->ad_select = bond_parse_parm(ad_select, ad_select_tbl);
4806 if (params->ad_select == -1) {
4807 printk(KERN_ERR DRV_NAME
4808 ": Error: Invalid ad_select \"%s\"\n",
4809 ad_select == NULL ? "NULL" : ad_select);
4810 return -EINVAL;
4811 }
4812
4813 if (bond_mode != BOND_MODE_8023AD) {
4814 printk(KERN_WARNING DRV_NAME
4815 ": ad_select param only affects 802.3ad mode\n");
4816 }
4817 } else {
4818 params->ad_select = BOND_AD_STABLE;
4819 }
4820
4754 if (max_bonds < 0 || max_bonds > INT_MAX) { 4821 if (max_bonds < 0 || max_bonds > INT_MAX) {
4755 printk(KERN_WARNING DRV_NAME 4822 printk(KERN_WARNING DRV_NAME
4756 ": Warning: max_bonds (%d) not in range %d-%d, so it " 4823 ": Warning: max_bonds (%d) not in range %d-%d, so it "
@@ -4798,6 +4865,13 @@ static int bond_check_params(struct bond_params *params)
4798 num_grat_arp = 1; 4865 num_grat_arp = 1;
4799 } 4866 }
4800 4867
4868 if (num_unsol_na < 0 || num_unsol_na > 255) {
4869 printk(KERN_WARNING DRV_NAME
4870 ": Warning: num_unsol_na (%d) not in range 0-255 so it "
4871 "was reset to 1 \n", num_unsol_na);
4872 num_unsol_na = 1;
4873 }
4874
4801 /* reset values for 802.3ad */ 4875 /* reset values for 802.3ad */
4802 if (bond_mode == BOND_MODE_8023AD) { 4876 if (bond_mode == BOND_MODE_8023AD) {
4803 if (!miimon) { 4877 if (!miimon) {
@@ -4999,6 +5073,7 @@ static int bond_check_params(struct bond_params *params)
4999 params->xmit_policy = xmit_hashtype; 5073 params->xmit_policy = xmit_hashtype;
5000 params->miimon = miimon; 5074 params->miimon = miimon;
5001 params->num_grat_arp = num_grat_arp; 5075 params->num_grat_arp = num_grat_arp;
5076 params->num_unsol_na = num_unsol_na;
5002 params->arp_interval = arp_interval; 5077 params->arp_interval = arp_interval;
5003 params->arp_validate = arp_validate_value; 5078 params->arp_validate = arp_validate_value;
5004 params->updelay = updelay; 5079 params->updelay = updelay;
@@ -5099,7 +5174,7 @@ int bond_create(char *name, struct bond_params *params)
5099 5174
5100 up_write(&bonding_rwsem); 5175 up_write(&bonding_rwsem);
5101 rtnl_unlock(); /* allows sysfs registration of net device */ 5176 rtnl_unlock(); /* allows sysfs registration of net device */
5102 res = bond_create_sysfs_entry(bond_dev->priv); 5177 res = bond_create_sysfs_entry(netdev_priv(bond_dev));
5103 if (res < 0) { 5178 if (res < 0) {
5104 rtnl_lock(); 5179 rtnl_lock();
5105 down_write(&bonding_rwsem); 5180 down_write(&bonding_rwsem);
@@ -5151,6 +5226,7 @@ static int __init bonding_init(void)
5151 5226
5152 register_netdevice_notifier(&bond_netdev_notifier); 5227 register_netdevice_notifier(&bond_netdev_notifier);
5153 register_inetaddr_notifier(&bond_inetaddr_notifier); 5228 register_inetaddr_notifier(&bond_inetaddr_notifier);
5229 bond_register_ipv6_notifier();
5154 5230
5155 goto out; 5231 goto out;
5156err: 5232err:
@@ -5173,6 +5249,7 @@ static void __exit bonding_exit(void)
5173{ 5249{
5174 unregister_netdevice_notifier(&bond_netdev_notifier); 5250 unregister_netdevice_notifier(&bond_netdev_notifier);
5175 unregister_inetaddr_notifier(&bond_inetaddr_notifier); 5251 unregister_inetaddr_notifier(&bond_inetaddr_notifier);
5252 bond_unregister_ipv6_notifier();
5176 5253
5177 bond_destroy_sysfs(); 5254 bond_destroy_sysfs();
5178 5255
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 3bdb47382521..18cf4787874c 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -36,22 +36,13 @@
36#include <linux/rtnetlink.h> 36#include <linux/rtnetlink.h>
37#include <net/net_namespace.h> 37#include <net/net_namespace.h>
38 38
39/* #define BONDING_DEBUG 1 */
40#include "bonding.h" 39#include "bonding.h"
40
41#define to_dev(obj) container_of(obj,struct device,kobj) 41#define to_dev(obj) container_of(obj,struct device,kobj)
42#define to_bond(cd) ((struct bonding *)(to_net_dev(cd)->priv)) 42#define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd))))
43 43
44/*---------------------------- Declarations -------------------------------*/ 44/*---------------------------- Declarations -------------------------------*/
45 45
46
47extern struct list_head bond_dev_list;
48extern struct bond_params bonding_defaults;
49extern struct bond_parm_tbl bond_mode_tbl[];
50extern struct bond_parm_tbl bond_lacp_tbl[];
51extern struct bond_parm_tbl xmit_hashtype_tbl[];
52extern struct bond_parm_tbl arp_validate_tbl[];
53extern struct bond_parm_tbl fail_over_mac_tbl[];
54
55static int expected_refcount = -1; 46static int expected_refcount = -1;
56/*--------------------------- Data Structures -----------------------------*/ 47/*--------------------------- Data Structures -----------------------------*/
57 48
@@ -316,18 +307,12 @@ static ssize_t bonding_store_slaves(struct device *d,
316 307
317 /* Set the slave's MTU to match the bond */ 308 /* Set the slave's MTU to match the bond */
318 original_mtu = dev->mtu; 309 original_mtu = dev->mtu;
319 if (dev->mtu != bond->dev->mtu) { 310 res = dev_set_mtu(dev, bond->dev->mtu);
320 if (dev->change_mtu) { 311 if (res) {
321 res = dev->change_mtu(dev, 312 ret = res;
322 bond->dev->mtu); 313 goto out;
323 if (res) {
324 ret = res;
325 goto out;
326 }
327 } else {
328 dev->mtu = bond->dev->mtu;
329 }
330 } 314 }
315
331 res = bond_enslave(bond->dev, dev); 316 res = bond_enslave(bond->dev, dev);
332 bond_for_each_slave(bond, slave, i) 317 bond_for_each_slave(bond, slave, i)
333 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) 318 if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0)
@@ -356,11 +341,7 @@ static ssize_t bonding_store_slaves(struct device *d,
356 goto out; 341 goto out;
357 } 342 }
358 /* set the slave MTU to the default */ 343 /* set the slave MTU to the default */
359 if (dev->change_mtu) { 344 dev_set_mtu(dev, original_mtu);
360 dev->change_mtu(dev, original_mtu);
361 } else {
362 dev->mtu = original_mtu;
363 }
364 } 345 }
365 else { 346 else {
366 printk(KERN_ERR DRV_NAME ": unable to remove non-existent slave %s for bond %s.\n", 347 printk(KERN_ERR DRV_NAME ": unable to remove non-existent slave %s for bond %s.\n",
@@ -620,6 +601,8 @@ static ssize_t bonding_store_arp_interval(struct device *d,
620 ": %s: Setting ARP monitoring interval to %d.\n", 601 ": %s: Setting ARP monitoring interval to %d.\n",
621 bond->dev->name, new_value); 602 bond->dev->name, new_value);
622 bond->params.arp_interval = new_value; 603 bond->params.arp_interval = new_value;
604 if (bond->params.arp_interval)
605 bond->dev->priv_flags |= IFF_MASTER_ARPMON;
623 if (bond->params.miimon) { 606 if (bond->params.miimon) {
624 printk(KERN_INFO DRV_NAME 607 printk(KERN_INFO DRV_NAME
625 ": %s: ARP monitoring cannot be used with MII monitoring. " 608 ": %s: ARP monitoring cannot be used with MII monitoring. "
@@ -672,8 +655,8 @@ static ssize_t bonding_show_arp_targets(struct device *d,
672 655
673 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) { 656 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
674 if (bond->params.arp_targets[i]) 657 if (bond->params.arp_targets[i])
675 res += sprintf(buf + res, "%u.%u.%u.%u ", 658 res += sprintf(buf + res, "%pI4 ",
676 NIPQUAD(bond->params.arp_targets[i])); 659 &bond->params.arp_targets[i]);
677 } 660 }
678 if (res) 661 if (res)
679 buf[res-1] = '\n'; /* eat the leftover space */ 662 buf[res-1] = '\n'; /* eat the leftover space */
@@ -695,8 +678,8 @@ static ssize_t bonding_store_arp_targets(struct device *d,
695 if (buf[0] == '+') { 678 if (buf[0] == '+') {
696 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { 679 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
697 printk(KERN_ERR DRV_NAME 680 printk(KERN_ERR DRV_NAME
698 ": %s: invalid ARP target %u.%u.%u.%u specified for addition\n", 681 ": %s: invalid ARP target %pI4 specified for addition\n",
699 bond->dev->name, NIPQUAD(newtarget)); 682 bond->dev->name, &newtarget);
700 ret = -EINVAL; 683 ret = -EINVAL;
701 goto out; 684 goto out;
702 } 685 }
@@ -704,8 +687,8 @@ static ssize_t bonding_store_arp_targets(struct device *d,
704 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 687 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
705 if (targets[i] == newtarget) { /* duplicate */ 688 if (targets[i] == newtarget) { /* duplicate */
706 printk(KERN_ERR DRV_NAME 689 printk(KERN_ERR DRV_NAME
707 ": %s: ARP target %u.%u.%u.%u is already present\n", 690 ": %s: ARP target %pI4 is already present\n",
708 bond->dev->name, NIPQUAD(newtarget)); 691 bond->dev->name, &newtarget);
709 if (done) 692 if (done)
710 targets[i] = 0; 693 targets[i] = 0;
711 ret = -EINVAL; 694 ret = -EINVAL;
@@ -713,8 +696,8 @@ static ssize_t bonding_store_arp_targets(struct device *d,
713 } 696 }
714 if (targets[i] == 0 && !done) { 697 if (targets[i] == 0 && !done) {
715 printk(KERN_INFO DRV_NAME 698 printk(KERN_INFO DRV_NAME
716 ": %s: adding ARP target %d.%d.%d.%d.\n", 699 ": %s: adding ARP target %pI4.\n",
717 bond->dev->name, NIPQUAD(newtarget)); 700 bond->dev->name, &newtarget);
718 done = 1; 701 done = 1;
719 targets[i] = newtarget; 702 targets[i] = newtarget;
720 } 703 }
@@ -731,8 +714,8 @@ static ssize_t bonding_store_arp_targets(struct device *d,
731 else if (buf[0] == '-') { 714 else if (buf[0] == '-') {
732 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { 715 if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) {
733 printk(KERN_ERR DRV_NAME 716 printk(KERN_ERR DRV_NAME
734 ": %s: invalid ARP target %d.%d.%d.%d specified for removal\n", 717 ": %s: invalid ARP target %pI4 specified for removal\n",
735 bond->dev->name, NIPQUAD(newtarget)); 718 bond->dev->name, &newtarget);
736 ret = -EINVAL; 719 ret = -EINVAL;
737 goto out; 720 goto out;
738 } 721 }
@@ -740,16 +723,16 @@ static ssize_t bonding_store_arp_targets(struct device *d,
740 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { 723 for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
741 if (targets[i] == newtarget) { 724 if (targets[i] == newtarget) {
742 printk(KERN_INFO DRV_NAME 725 printk(KERN_INFO DRV_NAME
743 ": %s: removing ARP target %d.%d.%d.%d.\n", 726 ": %s: removing ARP target %pI4.\n",
744 bond->dev->name, NIPQUAD(newtarget)); 727 bond->dev->name, &newtarget);
745 targets[i] = 0; 728 targets[i] = 0;
746 done = 1; 729 done = 1;
747 } 730 }
748 } 731 }
749 if (!done) { 732 if (!done) {
750 printk(KERN_INFO DRV_NAME 733 printk(KERN_INFO DRV_NAME
751 ": %s: unable to remove nonexistent ARP target %d.%d.%d.%d.\n", 734 ": %s: unable to remove nonexistent ARP target %pI4.\n",
752 bond->dev->name, NIPQUAD(newtarget)); 735 bond->dev->name, &newtarget);
753 ret = -EINVAL; 736 ret = -EINVAL;
754 goto out; 737 goto out;
755 } 738 }
@@ -942,6 +925,53 @@ out:
942} 925}
943static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp); 926static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp);
944 927
928static ssize_t bonding_show_ad_select(struct device *d,
929 struct device_attribute *attr,
930 char *buf)
931{
932 struct bonding *bond = to_bond(d);
933
934 return sprintf(buf, "%s %d\n",
935 ad_select_tbl[bond->params.ad_select].modename,
936 bond->params.ad_select);
937}
938
939
940static ssize_t bonding_store_ad_select(struct device *d,
941 struct device_attribute *attr,
942 const char *buf, size_t count)
943{
944 int new_value, ret = count;
945 struct bonding *bond = to_bond(d);
946
947 if (bond->dev->flags & IFF_UP) {
948 printk(KERN_ERR DRV_NAME
949 ": %s: Unable to update ad_select because interface "
950 "is up.\n", bond->dev->name);
951 ret = -EPERM;
952 goto out;
953 }
954
955 new_value = bond_parse_parm(buf, ad_select_tbl);
956
957 if (new_value != -1) {
958 bond->params.ad_select = new_value;
959 printk(KERN_INFO DRV_NAME
960 ": %s: Setting ad_select to %s (%d).\n",
961 bond->dev->name, ad_select_tbl[new_value].modename,
962 new_value);
963 } else {
964 printk(KERN_ERR DRV_NAME
965 ": %s: Ignoring invalid ad_select value %.*s.\n",
966 bond->dev->name, (int)strlen(buf) - 1, buf);
967 ret = -EINVAL;
968 }
969out:
970 return ret;
971}
972
973static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR, bonding_show_ad_select, bonding_store_ad_select);
974
945/* 975/*
946 * Show and set the number of grat ARP to send after a failover event. 976 * Show and set the number of grat ARP to send after a failover event.
947 */ 977 */
@@ -981,6 +1011,47 @@ out:
981 return ret; 1011 return ret;
982} 1012}
983static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, bonding_show_n_grat_arp, bonding_store_n_grat_arp); 1013static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, bonding_show_n_grat_arp, bonding_store_n_grat_arp);
1014
1015/*
1016 * Show and set the number of unsolicted NA's to send after a failover event.
1017 */
1018static ssize_t bonding_show_n_unsol_na(struct device *d,
1019 struct device_attribute *attr,
1020 char *buf)
1021{
1022 struct bonding *bond = to_bond(d);
1023
1024 return sprintf(buf, "%d\n", bond->params.num_unsol_na);
1025}
1026
1027static ssize_t bonding_store_n_unsol_na(struct device *d,
1028 struct device_attribute *attr,
1029 const char *buf, size_t count)
1030{
1031 int new_value, ret = count;
1032 struct bonding *bond = to_bond(d);
1033
1034 if (sscanf(buf, "%d", &new_value) != 1) {
1035 printk(KERN_ERR DRV_NAME
1036 ": %s: no num_unsol_na value specified.\n",
1037 bond->dev->name);
1038 ret = -EINVAL;
1039 goto out;
1040 }
1041 if (new_value < 0 || new_value > 255) {
1042 printk(KERN_ERR DRV_NAME
1043 ": %s: Invalid num_unsol_na value %d not in range 0-255; rejected.\n",
1044 bond->dev->name, new_value);
1045 ret = -EINVAL;
1046 goto out;
1047 } else {
1048 bond->params.num_unsol_na = new_value;
1049 }
1050out:
1051 return ret;
1052}
1053static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR, bonding_show_n_unsol_na, bonding_store_n_unsol_na);
1054
984/* 1055/*
985 * Show and set the MII monitor interval. There are two tricky bits 1056 * Show and set the MII monitor interval. There are two tricky bits
986 * here. First, if MII monitoring is activated, then we must disable 1057 * here. First, if MII monitoring is activated, then we must disable
@@ -1039,6 +1110,7 @@ static ssize_t bonding_store_miimon(struct device *d,
1039 "ARP monitoring. Disabling ARP monitoring...\n", 1110 "ARP monitoring. Disabling ARP monitoring...\n",
1040 bond->dev->name); 1111 bond->dev->name);
1041 bond->params.arp_interval = 0; 1112 bond->params.arp_interval = 0;
1113 bond->dev->priv_flags &= ~IFF_MASTER_ARPMON;
1042 if (bond->params.arp_validate) { 1114 if (bond->params.arp_validate) {
1043 bond_unregister_arp(bond); 1115 bond_unregister_arp(bond);
1044 bond->params.arp_validate = 1116 bond->params.arp_validate =
@@ -1391,13 +1463,11 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
1391{ 1463{
1392 int count = 0; 1464 int count = 0;
1393 struct bonding *bond = to_bond(d); 1465 struct bonding *bond = to_bond(d);
1394 DECLARE_MAC_BUF(mac);
1395 1466
1396 if (bond->params.mode == BOND_MODE_8023AD) { 1467 if (bond->params.mode == BOND_MODE_8023AD) {
1397 struct ad_info ad_info; 1468 struct ad_info ad_info;
1398 if (!bond_3ad_get_active_agg_info(bond, &ad_info)) { 1469 if (!bond_3ad_get_active_agg_info(bond, &ad_info)) {
1399 count = sprintf(buf,"%s\n", 1470 count = sprintf(buf, "%pM\n", ad_info.partner_system);
1400 print_mac(mac, ad_info.partner_system));
1401 } 1471 }
1402 } 1472 }
1403 1473
@@ -1417,8 +1487,10 @@ static struct attribute *per_bond_attrs[] = {
1417 &dev_attr_downdelay.attr, 1487 &dev_attr_downdelay.attr,
1418 &dev_attr_updelay.attr, 1488 &dev_attr_updelay.attr,
1419 &dev_attr_lacp_rate.attr, 1489 &dev_attr_lacp_rate.attr,
1490 &dev_attr_ad_select.attr,
1420 &dev_attr_xmit_hash_policy.attr, 1491 &dev_attr_xmit_hash_policy.attr,
1421 &dev_attr_num_grat_arp.attr, 1492 &dev_attr_num_grat_arp.attr,
1493 &dev_attr_num_unsol_na.attr,
1422 &dev_attr_miimon.attr, 1494 &dev_attr_miimon.attr,
1423 &dev_attr_primary.attr, 1495 &dev_attr_primary.attr,
1424 &dev_attr_use_carrier.attr, 1496 &dev_attr_use_carrier.attr,
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index ffb668dd6d3b..ca849d2adf98 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -19,23 +19,18 @@
19#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
20#include <linux/if_bonding.h> 20#include <linux/if_bonding.h>
21#include <linux/kobject.h> 21#include <linux/kobject.h>
22#include <linux/in6.h>
22#include "bond_3ad.h" 23#include "bond_3ad.h"
23#include "bond_alb.h" 24#include "bond_alb.h"
24 25
25#define DRV_VERSION "3.3.0" 26#define DRV_VERSION "3.5.0"
26#define DRV_RELDATE "June 10, 2008" 27#define DRV_RELDATE "November 4, 2008"
27#define DRV_NAME "bonding" 28#define DRV_NAME "bonding"
28#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 29#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
29 30
30#define BOND_MAX_ARP_TARGETS 16 31#define BOND_MAX_ARP_TARGETS 16
31 32
32#ifdef BONDING_DEBUG 33extern struct list_head bond_dev_list;
33#define dprintk(fmt, args...) \
34 printk(KERN_DEBUG \
35 DRV_NAME ": %s() %d: " fmt, __func__, __LINE__ , ## args )
36#else
37#define dprintk(fmt, args...)
38#endif /* BONDING_DEBUG */
39 34
40#define IS_UP(dev) \ 35#define IS_UP(dev) \
41 ((((dev)->flags & IFF_UP) == IFF_UP) && \ 36 ((((dev)->flags & IFF_UP) == IFF_UP) && \
@@ -126,6 +121,7 @@ struct bond_params {
126 int xmit_policy; 121 int xmit_policy;
127 int miimon; 122 int miimon;
128 int num_grat_arp; 123 int num_grat_arp;
124 int num_unsol_na;
129 int arp_interval; 125 int arp_interval;
130 int arp_validate; 126 int arp_validate;
131 int use_carrier; 127 int use_carrier;
@@ -133,6 +129,7 @@ struct bond_params {
133 int updelay; 129 int updelay;
134 int downdelay; 130 int downdelay;
135 int lacp_fast; 131 int lacp_fast;
132 int ad_select;
136 char primary[IFNAMSIZ]; 133 char primary[IFNAMSIZ];
137 __be32 arp_targets[BOND_MAX_ARP_TARGETS]; 134 __be32 arp_targets[BOND_MAX_ARP_TARGETS];
138}; 135};
@@ -148,6 +145,9 @@ struct vlan_entry {
148 struct list_head vlan_list; 145 struct list_head vlan_list;
149 __be32 vlan_ip; 146 __be32 vlan_ip;
150 unsigned short vlan_id; 147 unsigned short vlan_id;
148#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
149 struct in6_addr vlan_ipv6;
150#endif
151}; 151};
152 152
153struct slave { 153struct slave {
@@ -195,6 +195,7 @@ struct bonding {
195 rwlock_t curr_slave_lock; 195 rwlock_t curr_slave_lock;
196 s8 kill_timers; 196 s8 kill_timers;
197 s8 send_grat_arp; 197 s8 send_grat_arp;
198 s8 send_unsol_na;
198 s8 setup_by_slave; 199 s8 setup_by_slave;
199 struct net_device_stats stats; 200 struct net_device_stats stats;
200#ifdef CONFIG_PROC_FS 201#ifdef CONFIG_PROC_FS
@@ -218,6 +219,9 @@ struct bonding {
218 struct delayed_work arp_work; 219 struct delayed_work arp_work;
219 struct delayed_work alb_work; 220 struct delayed_work alb_work;
220 struct delayed_work ad_work; 221 struct delayed_work ad_work;
222#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
223 struct in6_addr master_ipv6;
224#endif
221}; 225};
222 226
223/** 227/**
@@ -245,7 +249,13 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
245 return NULL; 249 return NULL;
246 } 250 }
247 251
248 return (struct bonding *)slave->dev->master->priv; 252 return (struct bonding *)netdev_priv(slave->dev->master);
253}
254
255static inline bool bond_is_lb(const struct bonding *bond)
256{
257 return bond->params.mode == BOND_MODE_TLB
258 || bond->params.mode == BOND_MODE_ALB;
249} 259}
250 260
251#define BOND_FOM_NONE 0 261#define BOND_FOM_NONE 0
@@ -275,7 +285,7 @@ static inline unsigned long slave_last_rx(struct bonding *bond,
275 285
276static inline void bond_set_slave_inactive_flags(struct slave *slave) 286static inline void bond_set_slave_inactive_flags(struct slave *slave)
277{ 287{
278 struct bonding *bond = slave->dev->master->priv; 288 struct bonding *bond = netdev_priv(slave->dev->master);
279 if (bond->params.mode != BOND_MODE_TLB && 289 if (bond->params.mode != BOND_MODE_TLB &&
280 bond->params.mode != BOND_MODE_ALB) 290 bond->params.mode != BOND_MODE_ALB)
281 slave->state = BOND_STATE_BACKUP; 291 slave->state = BOND_STATE_BACKUP;
@@ -327,7 +337,7 @@ void bond_mii_monitor(struct work_struct *);
327void bond_loadbalance_arp_mon(struct work_struct *); 337void bond_loadbalance_arp_mon(struct work_struct *);
328void bond_activebackup_arp_mon(struct work_struct *); 338void bond_activebackup_arp_mon(struct work_struct *);
329void bond_set_mode_ops(struct bonding *bond, int mode); 339void bond_set_mode_ops(struct bonding *bond, int mode);
330int bond_parse_parm(const char *mode_arg, struct bond_parm_tbl *tbl); 340int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
331void bond_select_active_slave(struct bonding *bond); 341void bond_select_active_slave(struct bonding *bond);
332void bond_change_active_slave(struct bonding *bond, struct slave *new_active); 342void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
333void bond_register_arp(struct bonding *); 343void bond_register_arp(struct bonding *);
@@ -335,11 +345,35 @@ void bond_unregister_arp(struct bonding *);
335 345
336/* exported from bond_main.c */ 346/* exported from bond_main.c */
337extern struct list_head bond_dev_list; 347extern struct list_head bond_dev_list;
338extern struct bond_parm_tbl bond_lacp_tbl[]; 348extern const struct bond_parm_tbl bond_lacp_tbl[];
339extern struct bond_parm_tbl bond_mode_tbl[]; 349extern const struct bond_parm_tbl bond_mode_tbl[];
340extern struct bond_parm_tbl xmit_hashtype_tbl[]; 350extern const struct bond_parm_tbl xmit_hashtype_tbl[];
341extern struct bond_parm_tbl arp_validate_tbl[]; 351extern const struct bond_parm_tbl arp_validate_tbl[];
342extern struct bond_parm_tbl fail_over_mac_tbl[]; 352extern const struct bond_parm_tbl fail_over_mac_tbl[];
353extern struct bond_params bonding_defaults;
354extern struct bond_parm_tbl ad_select_tbl[];
355
356/* exported from bond_sysfs.c */
357extern struct rw_semaphore bonding_rwsem;
358
359#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
360void bond_send_unsolicited_na(struct bonding *bond);
361void bond_register_ipv6_notifier(void);
362void bond_unregister_ipv6_notifier(void);
363#else
364static inline void bond_send_unsolicited_na(struct bonding *bond)
365{
366 return;
367}
368static inline void bond_register_ipv6_notifier(void)
369{
370 return;
371}
372static inline void bond_unregister_ipv6_notifier(void)
373{
374 return;
375}
376#endif
343 377
344#endif /* _LINUX_BONDING_H */ 378#endif /* _LINUX_BONDING_H */
345 379
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 103f0f1df280..a10c1d7b3b0a 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -128,26 +128,30 @@ static int vcan_tx(struct sk_buff *skb, struct net_device *dev)
128 return NETDEV_TX_OK; 128 return NETDEV_TX_OK;
129} 129}
130 130
131static const struct net_device_ops vcan_netdev_ops = {
132 .ndo_start_xmit = vcan_tx,
133};
134
131static void vcan_setup(struct net_device *dev) 135static void vcan_setup(struct net_device *dev)
132{ 136{
133 dev->type = ARPHRD_CAN; 137 dev->type = ARPHRD_CAN;
134 dev->mtu = sizeof(struct can_frame); 138 dev->mtu = sizeof(struct can_frame);
135 dev->hard_header_len = 0; 139 dev->hard_header_len = 0;
136 dev->addr_len = 0; 140 dev->addr_len = 0;
137 dev->tx_queue_len = 0; 141 dev->tx_queue_len = 0;
138 dev->flags = IFF_NOARP; 142 dev->flags = IFF_NOARP;
139 143
140 /* set flags according to driver capabilities */ 144 /* set flags according to driver capabilities */
141 if (echo) 145 if (echo)
142 dev->flags |= IFF_ECHO; 146 dev->flags |= IFF_ECHO;
143 147
144 dev->hard_start_xmit = vcan_tx; 148 dev->netdev_ops = &vcan_netdev_ops;
145 dev->destructor = free_netdev; 149 dev->destructor = free_netdev;
146} 150}
147 151
148static struct rtnl_link_ops vcan_link_ops __read_mostly = { 152static struct rtnl_link_ops vcan_link_ops __read_mostly = {
149 .kind = "vcan", 153 .kind = "vcan",
150 .setup = vcan_setup, 154 .setup = vcan_setup,
151}; 155};
152 156
153static __init int vcan_init_module(void) 157static __init int vcan_init_module(void)
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 86909cfb14de..321f43d9f0e2 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2347,7 +2347,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2347 drops = 0; 2347 drops = 0;
2348 while (1) { 2348 while (1) {
2349 struct cas_rx_comp *rxc = rxcs + entry; 2349 struct cas_rx_comp *rxc = rxcs + entry;
2350 struct sk_buff *skb; 2350 struct sk_buff *uninitialized_var(skb);
2351 int type, len; 2351 int type, len;
2352 u64 words[4]; 2352 u64 words[4];
2353 int i, dring; 2353 int i, dring;
@@ -2405,7 +2405,6 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2405 cp->net_stats[ring].rx_packets++; 2405 cp->net_stats[ring].rx_packets++;
2406 cp->net_stats[ring].rx_bytes += len; 2406 cp->net_stats[ring].rx_bytes += len;
2407 spin_unlock(&cp->stat_lock[ring]); 2407 spin_unlock(&cp->stat_lock[ring]);
2408 cp->dev->last_rx = jiffies;
2409 2408
2410 next: 2409 next:
2411 npackets++; 2410 npackets++;
@@ -2507,7 +2506,7 @@ static irqreturn_t cas_interruptN(int irq, void *dev_id)
2507 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2506 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2508#ifdef USE_NAPI 2507#ifdef USE_NAPI
2509 cas_mask_intr(cp); 2508 cas_mask_intr(cp);
2510 netif_rx_schedule(dev, &cp->napi); 2509 netif_rx_schedule(&cp->napi);
2511#else 2510#else
2512 cas_rx_ringN(cp, ring, 0); 2511 cas_rx_ringN(cp, ring, 0);
2513#endif 2512#endif
@@ -2558,7 +2557,7 @@ static irqreturn_t cas_interrupt1(int irq, void *dev_id)
2558 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ 2557 if (status & INTR_RX_DONE_ALT) { /* handle rx separately */
2559#ifdef USE_NAPI 2558#ifdef USE_NAPI
2560 cas_mask_intr(cp); 2559 cas_mask_intr(cp);
2561 netif_rx_schedule(dev, &cp->napi); 2560 netif_rx_schedule(&cp->napi);
2562#else 2561#else
2563 cas_rx_ringN(cp, 1, 0); 2562 cas_rx_ringN(cp, 1, 0);
2564#endif 2563#endif
@@ -2614,7 +2613,7 @@ static irqreturn_t cas_interrupt(int irq, void *dev_id)
2614 if (status & INTR_RX_DONE) { 2613 if (status & INTR_RX_DONE) {
2615#ifdef USE_NAPI 2614#ifdef USE_NAPI
2616 cas_mask_intr(cp); 2615 cas_mask_intr(cp);
2617 netif_rx_schedule(dev, &cp->napi); 2616 netif_rx_schedule(&cp->napi);
2618#else 2617#else
2619 cas_rx_ringN(cp, 0, 0); 2618 cas_rx_ringN(cp, 0, 0);
2620#endif 2619#endif
@@ -2692,7 +2691,7 @@ rx_comp:
2692#endif 2691#endif
2693 spin_unlock_irqrestore(&cp->lock, flags); 2692 spin_unlock_irqrestore(&cp->lock, flags);
2694 if (enable_intr) { 2693 if (enable_intr) {
2695 netif_rx_complete(dev, napi); 2694 netif_rx_complete(napi);
2696 cas_unmask_intr(cp); 2695 cas_unmask_intr(cp);
2697 } 2696 }
2698 return credits; 2697 return credits;
@@ -4988,7 +4987,6 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
4988 int i, err, pci_using_dac; 4987 int i, err, pci_using_dac;
4989 u16 pci_cmd; 4988 u16 pci_cmd;
4990 u8 orig_cacheline_size = 0, cas_cacheline_size = 0; 4989 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
4991 DECLARE_MAC_BUF(mac);
4992 4990
4993 if (cas_version_printed++ == 0) 4991 if (cas_version_printed++ == 0)
4994 printk(KERN_INFO "%s", version); 4992 printk(KERN_INFO "%s", version);
@@ -5201,12 +5199,12 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5201 5199
5202 i = readl(cp->regs + REG_BIM_CFG); 5200 i = readl(cp->regs + REG_BIM_CFG);
5203 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " 5201 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
5204 "Ethernet[%d] %s\n", dev->name, 5202 "Ethernet[%d] %pM\n", dev->name,
5205 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", 5203 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5206 (i & BIM_CFG_32BIT) ? "32" : "64", 5204 (i & BIM_CFG_32BIT) ? "32" : "64",
5207 (i & BIM_CFG_66MHZ) ? "66" : "33", 5205 (i & BIM_CFG_66MHZ) ? "66" : "33",
5208 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, 5206 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5209 print_mac(mac, dev->dev_addr)); 5207 dev->dev_addr);
5210 5208
5211 pci_set_drvdata(pdev, dev); 5209 pci_set_drvdata(pdev, dev);
5212 cp->hw_running = 1; 5210 cp->hw_running = 1;
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 638c9a27a7a6..9b6011e7678e 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -120,7 +120,7 @@ static const char pci_speed[][4] = {
120 */ 120 */
121static void t1_set_rxmode(struct net_device *dev) 121static void t1_set_rxmode(struct net_device *dev)
122{ 122{
123 struct adapter *adapter = dev->priv; 123 struct adapter *adapter = dev->ml_priv;
124 struct cmac *mac = adapter->port[dev->if_port].mac; 124 struct cmac *mac = adapter->port[dev->if_port].mac;
125 struct t1_rx_mode rm; 125 struct t1_rx_mode rm;
126 126
@@ -252,7 +252,7 @@ static void cxgb_down(struct adapter *adapter)
252static int cxgb_open(struct net_device *dev) 252static int cxgb_open(struct net_device *dev)
253{ 253{
254 int err; 254 int err;
255 struct adapter *adapter = dev->priv; 255 struct adapter *adapter = dev->ml_priv;
256 int other_ports = adapter->open_device_map & PORT_MASK; 256 int other_ports = adapter->open_device_map & PORT_MASK;
257 257
258 napi_enable(&adapter->napi); 258 napi_enable(&adapter->napi);
@@ -272,7 +272,7 @@ static int cxgb_open(struct net_device *dev)
272 272
273static int cxgb_close(struct net_device *dev) 273static int cxgb_close(struct net_device *dev)
274{ 274{
275 struct adapter *adapter = dev->priv; 275 struct adapter *adapter = dev->ml_priv;
276 struct port_info *p = &adapter->port[dev->if_port]; 276 struct port_info *p = &adapter->port[dev->if_port];
277 struct cmac *mac = p->mac; 277 struct cmac *mac = p->mac;
278 278
@@ -298,7 +298,7 @@ static int cxgb_close(struct net_device *dev)
298 298
299static struct net_device_stats *t1_get_stats(struct net_device *dev) 299static struct net_device_stats *t1_get_stats(struct net_device *dev)
300{ 300{
301 struct adapter *adapter = dev->priv; 301 struct adapter *adapter = dev->ml_priv;
302 struct port_info *p = &adapter->port[dev->if_port]; 302 struct port_info *p = &adapter->port[dev->if_port];
303 struct net_device_stats *ns = &p->netstats; 303 struct net_device_stats *ns = &p->netstats;
304 const struct cmac_statistics *pstats; 304 const struct cmac_statistics *pstats;
@@ -346,14 +346,14 @@ static struct net_device_stats *t1_get_stats(struct net_device *dev)
346 346
347static u32 get_msglevel(struct net_device *dev) 347static u32 get_msglevel(struct net_device *dev)
348{ 348{
349 struct adapter *adapter = dev->priv; 349 struct adapter *adapter = dev->ml_priv;
350 350
351 return adapter->msg_enable; 351 return adapter->msg_enable;
352} 352}
353 353
354static void set_msglevel(struct net_device *dev, u32 val) 354static void set_msglevel(struct net_device *dev, u32 val)
355{ 355{
356 struct adapter *adapter = dev->priv; 356 struct adapter *adapter = dev->ml_priv;
357 357
358 adapter->msg_enable = val; 358 adapter->msg_enable = val;
359} 359}
@@ -434,7 +434,7 @@ static int get_regs_len(struct net_device *dev)
434 434
435static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 435static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
436{ 436{
437 struct adapter *adapter = dev->priv; 437 struct adapter *adapter = dev->ml_priv;
438 438
439 strcpy(info->driver, DRV_NAME); 439 strcpy(info->driver, DRV_NAME);
440 strcpy(info->version, DRV_VERSION); 440 strcpy(info->version, DRV_VERSION);
@@ -461,7 +461,7 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
461static void get_stats(struct net_device *dev, struct ethtool_stats *stats, 461static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
462 u64 *data) 462 u64 *data)
463{ 463{
464 struct adapter *adapter = dev->priv; 464 struct adapter *adapter = dev->ml_priv;
465 struct cmac *mac = adapter->port[dev->if_port].mac; 465 struct cmac *mac = adapter->port[dev->if_port].mac;
466 const struct cmac_statistics *s; 466 const struct cmac_statistics *s;
467 const struct sge_intr_counts *t; 467 const struct sge_intr_counts *t;
@@ -552,7 +552,7 @@ static inline void reg_block_dump(struct adapter *ap, void *buf,
552static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 552static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
553 void *buf) 553 void *buf)
554{ 554{
555 struct adapter *ap = dev->priv; 555 struct adapter *ap = dev->ml_priv;
556 556
557 /* 557 /*
558 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision 558 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
@@ -574,7 +574,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
574 574
575static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 575static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
576{ 576{
577 struct adapter *adapter = dev->priv; 577 struct adapter *adapter = dev->ml_priv;
578 struct port_info *p = &adapter->port[dev->if_port]; 578 struct port_info *p = &adapter->port[dev->if_port];
579 579
580 cmd->supported = p->link_config.supported; 580 cmd->supported = p->link_config.supported;
@@ -634,7 +634,7 @@ static int speed_duplex_to_caps(int speed, int duplex)
634 634
635static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 635static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
636{ 636{
637 struct adapter *adapter = dev->priv; 637 struct adapter *adapter = dev->ml_priv;
638 struct port_info *p = &adapter->port[dev->if_port]; 638 struct port_info *p = &adapter->port[dev->if_port];
639 struct link_config *lc = &p->link_config; 639 struct link_config *lc = &p->link_config;
640 640
@@ -669,7 +669,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
669static void get_pauseparam(struct net_device *dev, 669static void get_pauseparam(struct net_device *dev,
670 struct ethtool_pauseparam *epause) 670 struct ethtool_pauseparam *epause)
671{ 671{
672 struct adapter *adapter = dev->priv; 672 struct adapter *adapter = dev->ml_priv;
673 struct port_info *p = &adapter->port[dev->if_port]; 673 struct port_info *p = &adapter->port[dev->if_port];
674 674
675 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0; 675 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
@@ -680,7 +680,7 @@ static void get_pauseparam(struct net_device *dev,
680static int set_pauseparam(struct net_device *dev, 680static int set_pauseparam(struct net_device *dev,
681 struct ethtool_pauseparam *epause) 681 struct ethtool_pauseparam *epause)
682{ 682{
683 struct adapter *adapter = dev->priv; 683 struct adapter *adapter = dev->ml_priv;
684 struct port_info *p = &adapter->port[dev->if_port]; 684 struct port_info *p = &adapter->port[dev->if_port];
685 struct link_config *lc = &p->link_config; 685 struct link_config *lc = &p->link_config;
686 686
@@ -709,14 +709,14 @@ static int set_pauseparam(struct net_device *dev,
709 709
710static u32 get_rx_csum(struct net_device *dev) 710static u32 get_rx_csum(struct net_device *dev)
711{ 711{
712 struct adapter *adapter = dev->priv; 712 struct adapter *adapter = dev->ml_priv;
713 713
714 return (adapter->flags & RX_CSUM_ENABLED) != 0; 714 return (adapter->flags & RX_CSUM_ENABLED) != 0;
715} 715}
716 716
717static int set_rx_csum(struct net_device *dev, u32 data) 717static int set_rx_csum(struct net_device *dev, u32 data)
718{ 718{
719 struct adapter *adapter = dev->priv; 719 struct adapter *adapter = dev->ml_priv;
720 720
721 if (data) 721 if (data)
722 adapter->flags |= RX_CSUM_ENABLED; 722 adapter->flags |= RX_CSUM_ENABLED;
@@ -727,7 +727,7 @@ static int set_rx_csum(struct net_device *dev, u32 data)
727 727
728static int set_tso(struct net_device *dev, u32 value) 728static int set_tso(struct net_device *dev, u32 value)
729{ 729{
730 struct adapter *adapter = dev->priv; 730 struct adapter *adapter = dev->ml_priv;
731 731
732 if (!(adapter->flags & TSO_CAPABLE)) 732 if (!(adapter->flags & TSO_CAPABLE))
733 return value ? -EOPNOTSUPP : 0; 733 return value ? -EOPNOTSUPP : 0;
@@ -736,7 +736,7 @@ static int set_tso(struct net_device *dev, u32 value)
736 736
737static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 737static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
738{ 738{
739 struct adapter *adapter = dev->priv; 739 struct adapter *adapter = dev->ml_priv;
740 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; 740 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
741 741
742 e->rx_max_pending = MAX_RX_BUFFERS; 742 e->rx_max_pending = MAX_RX_BUFFERS;
@@ -752,7 +752,7 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
752 752
753static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 753static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
754{ 754{
755 struct adapter *adapter = dev->priv; 755 struct adapter *adapter = dev->ml_priv;
756 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0; 756 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
757 757
758 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending || 758 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
@@ -776,7 +776,7 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
776 776
777static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 777static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
778{ 778{
779 struct adapter *adapter = dev->priv; 779 struct adapter *adapter = dev->ml_priv;
780 780
781 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; 781 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
782 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; 782 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
@@ -787,7 +787,7 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
787 787
788static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 788static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
789{ 789{
790 struct adapter *adapter = dev->priv; 790 struct adapter *adapter = dev->ml_priv;
791 791
792 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs; 792 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
793 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs; 793 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
@@ -797,7 +797,7 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
797 797
798static int get_eeprom_len(struct net_device *dev) 798static int get_eeprom_len(struct net_device *dev)
799{ 799{
800 struct adapter *adapter = dev->priv; 800 struct adapter *adapter = dev->ml_priv;
801 801
802 return t1_is_asic(adapter) ? EEPROM_SIZE : 0; 802 return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
803} 803}
@@ -810,7 +810,7 @@ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
810{ 810{
811 int i; 811 int i;
812 u8 buf[EEPROM_SIZE] __attribute__((aligned(4))); 812 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
813 struct adapter *adapter = dev->priv; 813 struct adapter *adapter = dev->ml_priv;
814 814
815 e->magic = EEPROM_MAGIC(adapter); 815 e->magic = EEPROM_MAGIC(adapter);
816 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32)) 816 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
@@ -848,7 +848,7 @@ static const struct ethtool_ops t1_ethtool_ops = {
848 848
849static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 849static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
850{ 850{
851 struct adapter *adapter = dev->priv; 851 struct adapter *adapter = dev->ml_priv;
852 struct mii_ioctl_data *data = if_mii(req); 852 struct mii_ioctl_data *data = if_mii(req);
853 853
854 switch (cmd) { 854 switch (cmd) {
@@ -887,7 +887,7 @@ static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
887static int t1_change_mtu(struct net_device *dev, int new_mtu) 887static int t1_change_mtu(struct net_device *dev, int new_mtu)
888{ 888{
889 int ret; 889 int ret;
890 struct adapter *adapter = dev->priv; 890 struct adapter *adapter = dev->ml_priv;
891 struct cmac *mac = adapter->port[dev->if_port].mac; 891 struct cmac *mac = adapter->port[dev->if_port].mac;
892 892
893 if (!mac->ops->set_mtu) 893 if (!mac->ops->set_mtu)
@@ -902,7 +902,7 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
902 902
903static int t1_set_mac_addr(struct net_device *dev, void *p) 903static int t1_set_mac_addr(struct net_device *dev, void *p)
904{ 904{
905 struct adapter *adapter = dev->priv; 905 struct adapter *adapter = dev->ml_priv;
906 struct cmac *mac = adapter->port[dev->if_port].mac; 906 struct cmac *mac = adapter->port[dev->if_port].mac;
907 struct sockaddr *addr = p; 907 struct sockaddr *addr = p;
908 908
@@ -915,10 +915,10 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
915} 915}
916 916
917#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 917#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
918static void vlan_rx_register(struct net_device *dev, 918static void t1_vlan_rx_register(struct net_device *dev,
919 struct vlan_group *grp) 919 struct vlan_group *grp)
920{ 920{
921 struct adapter *adapter = dev->priv; 921 struct adapter *adapter = dev->ml_priv;
922 922
923 spin_lock_irq(&adapter->async_lock); 923 spin_lock_irq(&adapter->async_lock);
924 adapter->vlan_grp = grp; 924 adapter->vlan_grp = grp;
@@ -931,7 +931,7 @@ static void vlan_rx_register(struct net_device *dev,
931static void t1_netpoll(struct net_device *dev) 931static void t1_netpoll(struct net_device *dev)
932{ 932{
933 unsigned long flags; 933 unsigned long flags;
934 struct adapter *adapter = dev->priv; 934 struct adapter *adapter = dev->ml_priv;
935 935
936 local_irq_save(flags); 936 local_irq_save(flags);
937 t1_interrupt(adapter->pdev->irq, adapter); 937 t1_interrupt(adapter->pdev->irq, adapter);
@@ -1010,6 +1010,24 @@ void t1_fatal_err(struct adapter *adapter)
1010 adapter->name); 1010 adapter->name);
1011} 1011}
1012 1012
1013static const struct net_device_ops cxgb_netdev_ops = {
1014 .ndo_open = cxgb_open,
1015 .ndo_stop = cxgb_close,
1016 .ndo_start_xmit = t1_start_xmit,
1017 .ndo_get_stats = t1_get_stats,
1018 .ndo_validate_addr = eth_validate_addr,
1019 .ndo_set_multicast_list = t1_set_rxmode,
1020 .ndo_do_ioctl = t1_ioctl,
1021 .ndo_change_mtu = t1_change_mtu,
1022 .ndo_set_mac_address = t1_set_mac_addr,
1023#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1024 .ndo_vlan_rx_register = t1_vlan_rx_register,
1025#endif
1026#ifdef CONFIG_NET_POLL_CONTROLLER
1027 .ndo_poll_controller = t1_netpoll,
1028#endif
1029};
1030
1013static int __devinit init_one(struct pci_dev *pdev, 1031static int __devinit init_one(struct pci_dev *pdev,
1014 const struct pci_device_id *ent) 1032 const struct pci_device_id *ent)
1015{ 1033{
@@ -1077,7 +1095,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1077 SET_NETDEV_DEV(netdev, &pdev->dev); 1095 SET_NETDEV_DEV(netdev, &pdev->dev);
1078 1096
1079 if (!adapter) { 1097 if (!adapter) {
1080 adapter = netdev->priv; 1098 adapter = netdev_priv(netdev);
1081 adapter->pdev = pdev; 1099 adapter->pdev = pdev;
1082 adapter->port[0].dev = netdev; /* so we don't leak it */ 1100 adapter->port[0].dev = netdev; /* so we don't leak it */
1083 1101
@@ -1118,7 +1136,7 @@ static int __devinit init_one(struct pci_dev *pdev,
1118 netdev->if_port = i; 1136 netdev->if_port = i;
1119 netdev->mem_start = mmio_start; 1137 netdev->mem_start = mmio_start;
1120 netdev->mem_end = mmio_start + mmio_len - 1; 1138 netdev->mem_end = mmio_start + mmio_len - 1;
1121 netdev->priv = adapter; 1139 netdev->ml_priv = adapter;
1122 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 1140 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1123 netdev->features |= NETIF_F_LLTX; 1141 netdev->features |= NETIF_F_LLTX;
1124 1142
@@ -1130,7 +1148,6 @@ static int __devinit init_one(struct pci_dev *pdev,
1130 adapter->flags |= VLAN_ACCEL_CAPABLE; 1148 adapter->flags |= VLAN_ACCEL_CAPABLE;
1131 netdev->features |= 1149 netdev->features |=
1132 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 1150 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1133 netdev->vlan_rx_register = vlan_rx_register;
1134#endif 1151#endif
1135 1152
1136 /* T204: disable TSO */ 1153 /* T204: disable TSO */
@@ -1140,19 +1157,10 @@ static int __devinit init_one(struct pci_dev *pdev,
1140 } 1157 }
1141 } 1158 }
1142 1159
1143 netdev->open = cxgb_open; 1160 netdev->netdev_ops = &cxgb_netdev_ops;
1144 netdev->stop = cxgb_close;
1145 netdev->hard_start_xmit = t1_start_xmit;
1146 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ? 1161 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1147 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt); 1162 sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1148 netdev->get_stats = t1_get_stats; 1163
1149 netdev->set_multicast_list = t1_set_rxmode;
1150 netdev->do_ioctl = t1_ioctl;
1151 netdev->change_mtu = t1_change_mtu;
1152 netdev->set_mac_address = t1_set_mac_addr;
1153#ifdef CONFIG_NET_POLL_CONTROLLER
1154 netdev->poll_controller = t1_netpoll;
1155#endif
1156 netif_napi_add(netdev, &adapter->napi, t1_poll, 64); 1164 netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1157 1165
1158 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); 1166 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
@@ -1382,7 +1390,7 @@ static inline void t1_sw_reset(struct pci_dev *pdev)
1382static void __devexit remove_one(struct pci_dev *pdev) 1390static void __devexit remove_one(struct pci_dev *pdev)
1383{ 1391{
1384 struct net_device *dev = pci_get_drvdata(pdev); 1392 struct net_device *dev = pci_get_drvdata(pdev);
1385 struct adapter *adapter = dev->priv; 1393 struct adapter *adapter = dev->ml_priv;
1386 int i; 1394 int i;
1387 1395
1388 for_each_port(adapter, i) { 1396 for_each_port(adapter, i) {
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 7092df50ff78..d984b7995763 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1381,7 +1381,6 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1381 st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id()); 1381 st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
1382 1382
1383 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev); 1383 skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
1384 skb->dev->last_rx = jiffies;
1385 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff && 1384 if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
1386 skb->protocol == htons(ETH_P_IP) && 1385 skb->protocol == htons(ETH_P_IP) &&
1387 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) { 1386 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
@@ -1610,11 +1609,10 @@ static int process_pure_responses(struct adapter *adapter)
1610int t1_poll(struct napi_struct *napi, int budget) 1609int t1_poll(struct napi_struct *napi, int budget)
1611{ 1610{
1612 struct adapter *adapter = container_of(napi, struct adapter, napi); 1611 struct adapter *adapter = container_of(napi, struct adapter, napi);
1613 struct net_device *dev = adapter->port[0].dev;
1614 int work_done = process_responses(adapter, budget); 1612 int work_done = process_responses(adapter, budget);
1615 1613
1616 if (likely(work_done < budget)) { 1614 if (likely(work_done < budget)) {
1617 netif_rx_complete(dev, napi); 1615 netif_rx_complete(napi);
1618 writel(adapter->sge->respQ.cidx, 1616 writel(adapter->sge->respQ.cidx,
1619 adapter->regs + A_SG_SLEEPING); 1617 adapter->regs + A_SG_SLEEPING);
1620 } 1618 }
@@ -1628,13 +1626,11 @@ irqreturn_t t1_interrupt(int irq, void *data)
1628 int handled; 1626 int handled;
1629 1627
1630 if (likely(responses_pending(adapter))) { 1628 if (likely(responses_pending(adapter))) {
1631 struct net_device *dev = sge->netdev;
1632
1633 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); 1629 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1634 1630
1635 if (napi_schedule_prep(&adapter->napi)) { 1631 if (napi_schedule_prep(&adapter->napi)) {
1636 if (process_pure_responses(adapter)) 1632 if (process_pure_responses(adapter))
1637 __netif_rx_schedule(dev, &adapter->napi); 1633 __netif_rx_schedule(&adapter->napi);
1638 else { 1634 else {
1639 /* no data, no NAPI needed */ 1635 /* no data, no NAPI needed */
1640 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); 1636 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
@@ -1782,7 +1778,7 @@ static inline int eth_hdr_len(const void *data)
1782 */ 1778 */
1783int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) 1779int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1784{ 1780{
1785 struct adapter *adapter = dev->priv; 1781 struct adapter *adapter = dev->ml_priv;
1786 struct sge *sge = adapter->sge; 1782 struct sge *sge = adapter->sge;
1787 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], 1783 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port],
1788 smp_processor_id()); 1784 smp_processor_id());
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 017a5361b980..f66548751c38 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -428,7 +428,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
428 printk(KERN_WARNING "%s: rx: polling, but no queue\n", 428 printk(KERN_WARNING "%s: rx: polling, but no queue\n",
429 priv->dev->name); 429 priv->dev->name);
430 spin_unlock(&priv->rx_lock); 430 spin_unlock(&priv->rx_lock);
431 netif_rx_complete(priv->dev, napi); 431 netif_rx_complete(napi);
432 return 0; 432 return 0;
433 } 433 }
434 434
@@ -514,7 +514,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
514 if (processed == 0) { 514 if (processed == 0) {
515 /* we ran out of packets to read, 515 /* we ran out of packets to read,
516 * revert to interrupt-driven mode */ 516 * revert to interrupt-driven mode */
517 netif_rx_complete(priv->dev, napi); 517 netif_rx_complete(napi);
518 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 518 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
519 return 0; 519 return 0;
520 } 520 }
@@ -536,7 +536,7 @@ fatal_error:
536 } 536 }
537 537
538 spin_unlock(&priv->rx_lock); 538 spin_unlock(&priv->rx_lock);
539 netif_rx_complete(priv->dev, napi); 539 netif_rx_complete(napi);
540 netif_tx_stop_all_queues(priv->dev); 540 netif_tx_stop_all_queues(priv->dev);
541 napi_disable(&priv->napi); 541 napi_disable(&priv->napi);
542 542
@@ -802,9 +802,9 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
802 802
803 if (status & MAC_INT_RX) { 803 if (status & MAC_INT_RX) {
804 queue = (status >> 8) & 7; 804 queue = (status >> 8) & 7;
805 if (netif_rx_schedule_prep(dev, &priv->napi)) { 805 if (netif_rx_schedule_prep(&priv->napi)) {
806 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); 806 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
807 __netif_rx_schedule(dev, &priv->napi); 807 __netif_rx_schedule(&priv->napi);
808 } 808 }
809 } 809 }
810 810
@@ -1103,7 +1103,6 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1103 struct cpmac_priv *priv; 1103 struct cpmac_priv *priv;
1104 struct net_device *dev; 1104 struct net_device *dev;
1105 struct plat_cpmac_data *pdata; 1105 struct plat_cpmac_data *pdata;
1106 DECLARE_MAC_BUF(mac);
1107 1106
1108 pdata = pdev->dev.platform_data; 1107 pdata = pdev->dev.platform_data;
1109 1108
@@ -1180,8 +1179,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1180 if (netif_msg_probe(priv)) { 1179 if (netif_msg_probe(priv)) {
1181 printk(KERN_INFO 1180 printk(KERN_INFO
1182 "cpmac: device %s (regs: %p, irq: %d, phy: %s, " 1181 "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
1183 "mac: %s)\n", dev->name, (void *)mem->start, dev->irq, 1182 "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq,
1184 priv->phy_name, print_mac(mac, dev->dev_addr)); 1183 priv->phy_name, dev->dev_addr);
1185 } 1184 }
1186 return 0; 1185 return 0;
1187 1186
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 7e8a63106bdf..c9806c58b2fd 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -419,7 +419,6 @@ e100_set_mac_address(struct net_device *dev, void *p)
419{ 419{
420 struct net_local *np = netdev_priv(dev); 420 struct net_local *np = netdev_priv(dev);
421 struct sockaddr *addr = p; 421 struct sockaddr *addr = p;
422 DECLARE_MAC_BUF(mac);
423 422
424 spin_lock(&np->lock); /* preemption protection */ 423 spin_lock(&np->lock); /* preemption protection */
425 424
@@ -440,8 +439,7 @@ e100_set_mac_address(struct net_device *dev, void *p)
440 439
441 /* show it in the log as well */ 440 /* show it in the log as well */
442 441
443 printk(KERN_INFO "%s: changed MAC to %s\n", 442 printk(KERN_INFO "%s: changed MAC to %pM\n", dev->name, dev->dev_addr);
444 dev->name, print_mac(mac, dev->dev_addr));
445 443
446 spin_unlock(&np->lock); 444 spin_unlock(&np->lock);
447 445
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 7107620f615d..d548a45d59d5 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -521,7 +521,6 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
521 unsigned rev_type = 0; 521 unsigned rev_type = 0;
522 int eeprom_buff[CHKSUM_LEN]; 522 int eeprom_buff[CHKSUM_LEN];
523 int retval; 523 int retval;
524 DECLARE_MAC_BUF(mac);
525 524
526 /* Initialize the device structure. */ 525 /* Initialize the device structure. */
527 if (!modular) { 526 if (!modular) {
@@ -846,7 +845,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
846 } 845 }
847 846
848 /* print the ethernet address. */ 847 /* print the ethernet address. */
849 printk(", MAC %s", print_mac(mac, dev->dev_addr)); 848 printk(", MAC %pM", dev->dev_addr);
850 849
851 dev->open = net_open; 850 dev->open = net_open;
852 dev->stop = net_close; 851 dev->stop = net_close;
@@ -1025,14 +1024,13 @@ skip_this_frame:
1025 } 1024 }
1026 skb->protocol=eth_type_trans(skb,dev); 1025 skb->protocol=eth_type_trans(skb,dev);
1027 netif_rx(skb); 1026 netif_rx(skb);
1028 dev->last_rx = jiffies;
1029 lp->stats.rx_packets++; 1027 lp->stats.rx_packets++;
1030 lp->stats.rx_bytes += length; 1028 lp->stats.rx_bytes += length;
1031} 1029}
1032 1030
1033#endif /* ALLOW_DMA */ 1031#endif /* ALLOW_DMA */
1034 1032
1035void __init reset_chip(struct net_device *dev) 1033static void __init reset_chip(struct net_device *dev)
1036{ 1034{
1037#if !defined(CONFIG_MACH_MX31ADS) 1035#if !defined(CONFIG_MACH_MX31ADS)
1038#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01) 1036#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01)
@@ -1719,7 +1717,6 @@ net_rx(struct net_device *dev)
1719 1717
1720 skb->protocol=eth_type_trans(skb,dev); 1718 skb->protocol=eth_type_trans(skb,dev);
1721 netif_rx(skb); 1719 netif_rx(skb);
1722 dev->last_rx = jiffies;
1723 lp->stats.rx_packets++; 1720 lp->stats.rx_packets++;
1724 lp->stats.rx_bytes += length; 1721 lp->stats.rx_bytes += length;
1725} 1722}
@@ -1817,11 +1814,10 @@ static int set_mac_address(struct net_device *dev, void *p)
1817 1814
1818 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 1815 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1819 1816
1820 if (net_debug) { 1817 if (net_debug)
1821 DECLARE_MAC_BUF(mac); 1818 printk("%s: Setting MAC address to %pM.\n",
1822 printk("%s: Setting MAC address to %s.\n", 1819 dev->name, dev->dev_addr);
1823 dev->name, print_mac(mac, dev->dev_addr)); 1820
1824 }
1825 /* set the Ethernet address */ 1821 /* set the Ethernet address */
1826 for (i=0; i < ETH_ALEN/2; i++) 1822 for (i=0; i < ETH_ALEN/2; i++)
1827 writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8)); 1823 writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index bc8e2413abd2..5b346f9eaa8b 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -63,6 +63,7 @@ struct port_info {
63 struct link_config link_config; 63 struct link_config link_config;
64 struct net_device_stats netstats; 64 struct net_device_stats netstats;
65 int activity; 65 int activity;
66 __be32 iscsi_ipv4addr;
66}; 67};
67 68
68enum { /* adapter flags */ 69enum { /* adapter flags */
@@ -196,6 +197,7 @@ struct sge_qset { /* an SGE queue set */
196 int lro_frag_len; 197 int lro_frag_len;
197 void *lro_va; 198 void *lro_va;
198 struct net_device *netdev; 199 struct net_device *netdev;
200 struct netdev_queue *tx_q; /* associated netdev TX queue */
199 unsigned long txq_stopped; /* which Tx queues are stopped */ 201 unsigned long txq_stopped; /* which Tx queues are stopped */
200 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */ 202 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
201 unsigned long port_stats[SGE_PSTAT_MAX]; 203 unsigned long port_stats[SGE_PSTAT_MAX];
@@ -294,7 +296,8 @@ int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
294void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p); 296void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
295int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, 297int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
296 int irq_vec_idx, const struct qset_params *p, 298 int irq_vec_idx, const struct qset_params *p,
297 int ntxq, struct net_device *dev); 299 int ntxq, struct net_device *dev,
300 struct netdev_queue *netdevq);
298int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, 301int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
299 unsigned char *data); 302 unsigned char *data);
300irqreturn_t t3_sge_intr_msix(int irq, void *cookie); 303irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index e312d315a42d..db4f4f575b6a 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -714,7 +714,7 @@ int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data);
714int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data); 714int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data);
715int t3_seeprom_wp(struct adapter *adapter, int enable); 715int t3_seeprom_wp(struct adapter *adapter, int enable);
716int t3_get_tp_version(struct adapter *adapter, u32 *vers); 716int t3_get_tp_version(struct adapter *adapter, u32 *vers);
717int t3_check_tpsram_version(struct adapter *adapter, int *must_load); 717int t3_check_tpsram_version(struct adapter *adapter);
718int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram, 718int t3_check_tpsram(struct adapter *adapter, const u8 *tp_ram,
719 unsigned int size); 719 unsigned int size);
720int t3_set_proto_sram(struct adapter *adap, const u8 *data); 720int t3_set_proto_sram(struct adapter *adap, const u8 *data);
@@ -722,7 +722,7 @@ int t3_read_flash(struct adapter *adapter, unsigned int addr,
722 unsigned int nwords, u32 *data, int byte_oriented); 722 unsigned int nwords, u32 *data, int byte_oriented);
723int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size); 723int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
724int t3_get_fw_version(struct adapter *adapter, u32 *vers); 724int t3_get_fw_version(struct adapter *adapter, u32 *vers);
725int t3_check_fw_version(struct adapter *adapter, int *must_load); 725int t3_check_fw_version(struct adapter *adapter);
726int t3_init_hw(struct adapter *adapter, u32 fw_params); 726int t3_init_hw(struct adapter *adapter, u32 fw_params);
727void mac_prep(struct cmac *mac, struct adapter *adapter, int index); 727void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
728void early_hw_init(struct adapter *adapter, const struct adapter_info *ai); 728void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
diff --git a/drivers/net/cxgb3/cxgb3_ctl_defs.h b/drivers/net/cxgb3/cxgb3_ctl_defs.h
index 1d8d46eb3c96..369fe711fd7f 100644
--- a/drivers/net/cxgb3/cxgb3_ctl_defs.h
+++ b/drivers/net/cxgb3/cxgb3_ctl_defs.h
@@ -57,6 +57,9 @@ enum {
57 RDMA_GET_MIB = 19, 57 RDMA_GET_MIB = 19,
58 58
59 GET_RX_PAGE_INFO = 50, 59 GET_RX_PAGE_INFO = 50,
60 GET_ISCSI_IPV4ADDR = 51,
61
62 GET_EMBEDDED_INFO = 70,
60}; 63};
61 64
62/* 65/*
@@ -86,6 +89,12 @@ struct iff_mac {
86 u16 vlan_tag; 89 u16 vlan_tag;
87}; 90};
88 91
92/* Structure used to request a port's iSCSI IPv4 address */
93struct iscsi_ipv4addr {
94 struct net_device *dev; /* the net_device */
95 __be32 ipv4addr; /* the return iSCSI IPv4 address */
96};
97
89struct pci_dev; 98struct pci_dev;
90 99
91/* 100/*
@@ -169,4 +178,12 @@ struct ofld_page_info {
169 unsigned int page_size; /* Page size, should be a power of 2 */ 178 unsigned int page_size; /* Page size, should be a power of 2 */
170 unsigned int num; /* Number of pages */ 179 unsigned int num; /* Number of pages */
171}; 180};
181
182/*
183 * Structure used to get firmware and protocol engine versions.
184 */
185struct ch_embedded_info {
186 u32 fw_vers;
187 u32 tp_vers;
188};
172#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */ 189#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 2c341f83d327..2847f947499d 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -494,6 +494,36 @@ static void enable_all_napi(struct adapter *adap)
494} 494}
495 495
496/** 496/**
497 * set_qset_lro - Turn a queue set's LRO capability on and off
498 * @dev: the device the qset is attached to
499 * @qset_idx: the queue set index
500 * @val: the LRO switch
501 *
502 * Sets LRO on or off for a particular queue set.
503 * the device's features flag is updated to reflect the LRO
504 * capability when all queues belonging to the device are
505 * in the same state.
506 */
507static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
508{
509 struct port_info *pi = netdev_priv(dev);
510 struct adapter *adapter = pi->adapter;
511 int i, lro_on = 1;
512
513 adapter->params.sge.qset[qset_idx].lro = !!val;
514 adapter->sge.qs[qset_idx].lro_enabled = !!val;
515
516 /* let ethtool report LRO on only if all queues are LRO enabled */
517 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518 lro_on &= adapter->params.sge.qset[i].lro;
519
520 if (lro_on)
521 dev->features |= NETIF_F_LRO;
522 else
523 dev->features &= ~NETIF_F_LRO;
524}
525
526/**
497 * setup_sge_qsets - configure SGE Tx/Rx/response queues 527 * setup_sge_qsets - configure SGE Tx/Rx/response queues
498 * @adap: the adapter 528 * @adap: the adapter
499 * 529 *
@@ -516,12 +546,12 @@ static int setup_sge_qsets(struct adapter *adap)
516 pi->qs = &adap->sge.qs[pi->first_qset]; 546 pi->qs = &adap->sge.qs[pi->first_qset];
517 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; 547 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
518 ++j, ++qset_idx) { 548 ++j, ++qset_idx) {
519 if (!pi->rx_csum_offload) 549 set_qset_lro(dev, qset_idx, pi->rx_csum_offload);
520 adap->params.sge.qset[qset_idx].lro = 0;
521 err = t3_sge_alloc_qset(adap, qset_idx, 1, 550 err = t3_sge_alloc_qset(adap, qset_idx, 1,
522 (adap->flags & USING_MSIX) ? qset_idx + 1 : 551 (adap->flags & USING_MSIX) ? qset_idx + 1 :
523 irq_idx, 552 irq_idx,
524 &adap->params.sge.qset[qset_idx], ntxq, dev); 553 &adap->params.sge.qset[qset_idx], ntxq, dev,
554 netdev_get_tx_queue(dev, j));
525 if (err) { 555 if (err) {
526 t3_stop_sge_timers(adap); 556 t3_stop_sge_timers(adap);
527 t3_free_sge_resources(adap); 557 t3_free_sge_resources(adap);
@@ -824,8 +854,8 @@ static int bind_qsets(struct adapter *adap)
824 return err; 854 return err;
825} 855}
826 856
827#define FW_FNAME "t3fw-%d.%d.%d.bin" 857#define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
828#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin" 858#define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
829 859
830static int upgrade_fw(struct adapter *adap) 860static int upgrade_fw(struct adapter *adap)
831{ 861{
@@ -928,21 +958,22 @@ release_tpsram:
928static int cxgb_up(struct adapter *adap) 958static int cxgb_up(struct adapter *adap)
929{ 959{
930 int err; 960 int err;
931 int must_load;
932 961
933 if (!(adap->flags & FULL_INIT_DONE)) { 962 if (!(adap->flags & FULL_INIT_DONE)) {
934 err = t3_check_fw_version(adap, &must_load); 963 err = t3_check_fw_version(adap);
935 if (err == -EINVAL) { 964 if (err == -EINVAL) {
936 err = upgrade_fw(adap); 965 err = upgrade_fw(adap);
937 if (err && must_load) 966 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
938 goto out; 967 FW_VERSION_MAJOR, FW_VERSION_MINOR,
968 FW_VERSION_MICRO, err ? "failed" : "succeeded");
939 } 969 }
940 970
941 err = t3_check_tpsram_version(adap, &must_load); 971 err = t3_check_tpsram_version(adap);
942 if (err == -EINVAL) { 972 if (err == -EINVAL) {
943 err = update_tpsram(adap); 973 err = update_tpsram(adap);
944 if (err && must_load) 974 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
945 goto out; 975 TP_VERSION_MAJOR, TP_VERSION_MINOR,
976 TP_VERSION_MICRO, err ? "failed" : "succeeded");
946 } 977 }
947 978
948 /* 979 /*
@@ -1136,9 +1167,10 @@ static int cxgb_open(struct net_device *dev)
1136 "Could not initialize offload capabilities\n"); 1167 "Could not initialize offload capabilities\n");
1137 } 1168 }
1138 1169
1170 dev->real_num_tx_queues = pi->nqsets;
1139 link_start(dev); 1171 link_start(dev);
1140 t3_port_intr_enable(adapter, pi->port_id); 1172 t3_port_intr_enable(adapter, pi->port_id);
1141 netif_start_queue(dev); 1173 netif_tx_start_all_queues(dev);
1142 if (!other_ports) 1174 if (!other_ports)
1143 schedule_chk_task(adapter); 1175 schedule_chk_task(adapter);
1144 1176
@@ -1151,7 +1183,7 @@ static int cxgb_close(struct net_device *dev)
1151 struct adapter *adapter = pi->adapter; 1183 struct adapter *adapter = pi->adapter;
1152 1184
1153 t3_port_intr_disable(adapter, pi->port_id); 1185 t3_port_intr_disable(adapter, pi->port_id);
1154 netif_stop_queue(dev); 1186 netif_tx_stop_all_queues(dev);
1155 pi->phy.ops->power_down(&pi->phy, 1); 1187 pi->phy.ops->power_down(&pi->phy, 1);
1156 netif_carrier_off(dev); 1188 netif_carrier_off(dev);
1157 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX); 1189 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
@@ -1634,13 +1666,10 @@ static int set_rx_csum(struct net_device *dev, u32 data)
1634 1666
1635 p->rx_csum_offload = data; 1667 p->rx_csum_offload = data;
1636 if (!data) { 1668 if (!data) {
1637 struct adapter *adap = p->adapter;
1638 int i; 1669 int i;
1639 1670
1640 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { 1671 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1641 adap->params.sge.qset[i].lro = 0; 1672 set_qset_lro(dev, i, 0);
1642 adap->sge.qs[i].lro_enabled = 0;
1643 }
1644 } 1673 }
1645 return 0; 1674 return 0;
1646} 1675}
@@ -1795,6 +1824,25 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1795 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1824 memset(&wol->sopass, 0, sizeof(wol->sopass));
1796} 1825}
1797 1826
1827static int cxgb3_set_flags(struct net_device *dev, u32 data)
1828{
1829 struct port_info *pi = netdev_priv(dev);
1830 int i;
1831
1832 if (data & ETH_FLAG_LRO) {
1833 if (!pi->rx_csum_offload)
1834 return -EINVAL;
1835
1836 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1837 set_qset_lro(dev, i, 1);
1838
1839 } else
1840 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1841 set_qset_lro(dev, i, 0);
1842
1843 return 0;
1844}
1845
1798static const struct ethtool_ops cxgb_ethtool_ops = { 1846static const struct ethtool_ops cxgb_ethtool_ops = {
1799 .get_settings = get_settings, 1847 .get_settings = get_settings,
1800 .set_settings = set_settings, 1848 .set_settings = set_settings,
@@ -1824,6 +1872,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
1824 .get_regs = get_regs, 1872 .get_regs = get_regs,
1825 .get_wol = get_wol, 1873 .get_wol = get_wol,
1826 .set_tso = ethtool_op_set_tso, 1874 .set_tso = ethtool_op_set_tso,
1875 .get_flags = ethtool_op_get_flags,
1876 .set_flags = cxgb3_set_flags,
1827}; 1877};
1828 1878
1829static int in_range(int val, int lo, int hi) 1879static int in_range(int val, int lo, int hi)
@@ -1940,11 +1990,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1940 } 1990 }
1941 } 1991 }
1942 } 1992 }
1943 if (t.lro >= 0) { 1993 if (t.lro >= 0)
1944 struct sge_qset *qs = &adapter->sge.qs[t.qset_idx]; 1994 set_qset_lro(dev, t.qset_idx, t.lro);
1945 q->lro = t.lro; 1995
1946 qs->lro_enabled = t.lro;
1947 }
1948 break; 1996 break;
1949 } 1997 }
1950 case CHELSIO_GET_QSET_PARAMS:{ 1998 case CHELSIO_GET_QSET_PARAMS:{
@@ -2783,6 +2831,22 @@ static void __devinit print_port_info(struct adapter *adap,
2783 } 2831 }
2784} 2832}
2785 2833
2834static const struct net_device_ops cxgb_netdev_ops = {
2835 .ndo_open = cxgb_open,
2836 .ndo_stop = cxgb_close,
2837 .ndo_start_xmit = t3_eth_xmit,
2838 .ndo_get_stats = cxgb_get_stats,
2839 .ndo_validate_addr = eth_validate_addr,
2840 .ndo_set_multicast_list = cxgb_set_rxmode,
2841 .ndo_do_ioctl = cxgb_ioctl,
2842 .ndo_change_mtu = cxgb_change_mtu,
2843 .ndo_set_mac_address = cxgb_set_mac_addr,
2844 .ndo_vlan_rx_register = vlan_rx_register,
2845#ifdef CONFIG_NET_POLL_CONTROLLER
2846 .ndo_poll_controller = cxgb_netpoll,
2847#endif
2848};
2849
2786static int __devinit init_one(struct pci_dev *pdev, 2850static int __devinit init_one(struct pci_dev *pdev,
2787 const struct pci_device_id *ent) 2851 const struct pci_device_id *ent)
2788{ 2852{
@@ -2871,7 +2935,7 @@ static int __devinit init_one(struct pci_dev *pdev,
2871 for (i = 0; i < ai->nports; ++i) { 2935 for (i = 0; i < ai->nports; ++i) {
2872 struct net_device *netdev; 2936 struct net_device *netdev;
2873 2937
2874 netdev = alloc_etherdev(sizeof(struct port_info)); 2938 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
2875 if (!netdev) { 2939 if (!netdev) {
2876 err = -ENOMEM; 2940 err = -ENOMEM;
2877 goto out_free_dev; 2941 goto out_free_dev;
@@ -2885,6 +2949,7 @@ static int __devinit init_one(struct pci_dev *pdev,
2885 pi->rx_csum_offload = 1; 2949 pi->rx_csum_offload = 1;
2886 pi->port_id = i; 2950 pi->port_id = i;
2887 netif_carrier_off(netdev); 2951 netif_carrier_off(netdev);
2952 netif_tx_stop_all_queues(netdev);
2888 netdev->irq = pdev->irq; 2953 netdev->irq = pdev->irq;
2889 netdev->mem_start = mmio_start; 2954 netdev->mem_start = mmio_start;
2890 netdev->mem_end = mmio_start + mmio_len - 1; 2955 netdev->mem_end = mmio_start + mmio_len - 1;
@@ -2894,20 +2959,7 @@ static int __devinit init_one(struct pci_dev *pdev,
2894 netdev->features |= NETIF_F_HIGHDMA; 2959 netdev->features |= NETIF_F_HIGHDMA;
2895 2960
2896 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2961 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2897 netdev->vlan_rx_register = vlan_rx_register; 2962 netdev->netdev_ops = &cxgb_netdev_ops;
2898
2899 netdev->open = cxgb_open;
2900 netdev->stop = cxgb_close;
2901 netdev->hard_start_xmit = t3_eth_xmit;
2902 netdev->get_stats = cxgb_get_stats;
2903 netdev->set_multicast_list = cxgb_set_rxmode;
2904 netdev->do_ioctl = cxgb_ioctl;
2905 netdev->change_mtu = cxgb_change_mtu;
2906 netdev->set_mac_address = cxgb_set_mac_addr;
2907#ifdef CONFIG_NET_POLL_CONTROLLER
2908 netdev->poll_controller = cxgb_netpoll;
2909#endif
2910
2911 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); 2963 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2912 } 2964 }
2913 2965
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 265aa8a15afa..2d7f69aff1d9 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -182,7 +182,9 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
182static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req, 182static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
183 void *data) 183 void *data)
184{ 184{
185 int i;
185 int ret = 0; 186 int ret = 0;
187 unsigned int val = 0;
186 struct ulp_iscsi_info *uiip = data; 188 struct ulp_iscsi_info *uiip = data;
187 189
188 switch (req) { 190 switch (req) {
@@ -191,32 +193,55 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
191 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT); 193 uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
192 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT); 194 uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
193 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK); 195 uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
196
197 val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ);
198 for (i = 0; i < 4; i++, val >>= 8)
199 uiip->pgsz_factor[i] = val & 0xFF;
200
201 val = t3_read_reg(adapter, A_TP_PARA_REG7);
202 uiip->max_txsz =
203 uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
204 (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1);
194 /* 205 /*
195 * On tx, the iscsi pdu has to be <= tx page size and has to 206 * On tx, the iscsi pdu has to be <= tx page size and has to
196 * fit into the Tx PM FIFO. 207 * fit into the Tx PM FIFO.
197 */ 208 */
198 uiip->max_txsz = min(adapter->params.tp.tx_pg_size, 209 val = min(adapter->params.tp.tx_pg_size,
199 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17); 210 t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
200 /* on rx, the iscsi pdu has to be < rx page size and the 211 uiip->max_txsz = min(val, uiip->max_txsz);
201 whole pdu + cpl headers has to fit into one sge buffer */ 212
202 uiip->max_rxsz = min_t(unsigned int, 213 /* set MaxRxData to 16224 */
203 adapter->params.tp.rx_pg_size, 214 val = t3_read_reg(adapter, A_TP_PARA_REG2);
204 (adapter->sge.qs[0].fl[1].buf_size - 215 if ((val >> S_MAXRXDATA) != 0x3f60) {
205 sizeof(struct cpl_rx_data) * 2 - 216 val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
206 sizeof(struct cpl_rx_data_ddp))); 217 val |= V_MAXRXDATA(0x3f60);
218 printk(KERN_INFO
219 "%s, iscsi set MaxRxData to 16224 (0x%x).\n",
220 adapter->name, val);
221 t3_write_reg(adapter, A_TP_PARA_REG2, val);
222 }
223
224 /*
225 * on rx, the iscsi pdu has to be < rx page size and the
226 * the max rx data length programmed in TP
227 */
228 val = min(adapter->params.tp.rx_pg_size,
229 ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
230 S_MAXRXDATA) & M_MAXRXDATA);
231 uiip->max_rxsz = min(val, uiip->max_rxsz);
207 break; 232 break;
208 case ULP_ISCSI_SET_PARAMS: 233 case ULP_ISCSI_SET_PARAMS:
209 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask); 234 t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
210 /* set MaxRxData and MaxCoalesceSize to 16224 */
211 t3_write_reg(adapter, A_TP_PARA_REG2, 0x3f603f60);
212 /* program the ddp page sizes */ 235 /* program the ddp page sizes */
213 { 236 for (i = 0; i < 4; i++)
214 int i; 237 val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
215 unsigned int val = 0; 238 if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
216 for (i = 0; i < 4; i++) 239 printk(KERN_INFO
217 val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i); 240 "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n",
218 if (val) 241 adapter->name, val, uiip->pgsz_factor[0],
219 t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val); 242 uiip->pgsz_factor[1], uiip->pgsz_factor[2],
243 uiip->pgsz_factor[3]);
244 t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
220 } 245 }
221 break; 246 break;
222 default: 247 default:
@@ -407,6 +432,21 @@ static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
407 rx_page_info->page_size = tp->rx_pg_size; 432 rx_page_info->page_size = tp->rx_pg_size;
408 rx_page_info->num = tp->rx_num_pgs; 433 rx_page_info->num = tp->rx_num_pgs;
409 break; 434 break;
435 case GET_ISCSI_IPV4ADDR: {
436 struct iscsi_ipv4addr *p = data;
437 struct port_info *pi = netdev_priv(p->dev);
438 p->ipv4addr = pi->iscsi_ipv4addr;
439 break;
440 }
441 case GET_EMBEDDED_INFO: {
442 struct ch_embedded_info *e = data;
443
444 spin_lock(&adapter->stats_lock);
445 t3_get_fw_version(adapter, &e->fw_vers);
446 t3_get_tp_version(adapter, &e->tp_vers);
447 spin_unlock(&adapter->stats_lock);
448 break;
449 }
410 default: 450 default:
411 return -EOPNOTSUPP; 451 return -EOPNOTSUPP;
412 } 452 }
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index c6480be0bc1f..6c641a889471 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -36,6 +36,7 @@
36#include <linux/ip.h> 36#include <linux/ip.h>
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <net/arp.h>
39#include "common.h" 40#include "common.h"
40#include "regs.h" 41#include "regs.h"
41#include "sge_defs.h" 42#include "sge_defs.h"
@@ -549,16 +550,15 @@ static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
549 550
550 if (!p) 551 if (!p)
551 return NULL; 552 return NULL;
552 if (sw_size) { 553 if (sw_size && metadata) {
553 s = kcalloc(nelem, sw_size, GFP_KERNEL); 554 s = kcalloc(nelem, sw_size, GFP_KERNEL);
554 555
555 if (!s) { 556 if (!s) {
556 dma_free_coherent(&pdev->dev, len, p, *phys); 557 dma_free_coherent(&pdev->dev, len, p, *phys);
557 return NULL; 558 return NULL;
558 } 559 }
559 }
560 if (metadata)
561 *(void **)metadata = s; 560 *(void **)metadata = s;
561 }
562 memset(p, 0, len); 562 memset(p, 0, len);
563 return p; 563 return p;
564} 564}
@@ -1121,10 +1121,10 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1121 htonl(V_WR_TID(q->token))); 1121 htonl(V_WR_TID(q->token)));
1122} 1122}
1123 1123
1124static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs, 1124static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1125 struct sge_txq *q) 1125 struct sge_qset *qs, struct sge_txq *q)
1126{ 1126{
1127 netif_stop_queue(dev); 1127 netif_tx_stop_queue(txq);
1128 set_bit(TXQ_ETH, &qs->txq_stopped); 1128 set_bit(TXQ_ETH, &qs->txq_stopped);
1129 q->stops++; 1129 q->stops++;
1130} 1130}
@@ -1138,11 +1138,13 @@ static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs,
1138 */ 1138 */
1139int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) 1139int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1140{ 1140{
1141 int qidx;
1141 unsigned int ndesc, pidx, credits, gen, compl; 1142 unsigned int ndesc, pidx, credits, gen, compl;
1142 const struct port_info *pi = netdev_priv(dev); 1143 const struct port_info *pi = netdev_priv(dev);
1143 struct adapter *adap = pi->adapter; 1144 struct adapter *adap = pi->adapter;
1144 struct sge_qset *qs = pi->qs; 1145 struct netdev_queue *txq;
1145 struct sge_txq *q = &qs->txq[TXQ_ETH]; 1146 struct sge_qset *qs;
1147 struct sge_txq *q;
1146 1148
1147 /* 1149 /*
1148 * The chip min packet length is 9 octets but play safe and reject 1150 * The chip min packet length is 9 octets but play safe and reject
@@ -1153,6 +1155,11 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1153 return NETDEV_TX_OK; 1155 return NETDEV_TX_OK;
1154 } 1156 }
1155 1157
1158 qidx = skb_get_queue_mapping(skb);
1159 qs = &pi->qs[qidx];
1160 q = &qs->txq[TXQ_ETH];
1161 txq = netdev_get_tx_queue(dev, qidx);
1162
1156 spin_lock(&q->lock); 1163 spin_lock(&q->lock);
1157 reclaim_completed_tx(adap, q); 1164 reclaim_completed_tx(adap, q);
1158 1165
@@ -1160,7 +1167,7 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1160 ndesc = calc_tx_descs(skb); 1167 ndesc = calc_tx_descs(skb);
1161 1168
1162 if (unlikely(credits < ndesc)) { 1169 if (unlikely(credits < ndesc)) {
1163 t3_stop_queue(dev, qs, q); 1170 t3_stop_tx_queue(txq, qs, q);
1164 dev_err(&adap->pdev->dev, 1171 dev_err(&adap->pdev->dev,
1165 "%s: Tx ring %u full while queue awake!\n", 1172 "%s: Tx ring %u full while queue awake!\n",
1166 dev->name, q->cntxt_id & 7); 1173 dev->name, q->cntxt_id & 7);
@@ -1170,12 +1177,12 @@ int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1170 1177
1171 q->in_use += ndesc; 1178 q->in_use += ndesc;
1172 if (unlikely(credits - ndesc < q->stop_thres)) { 1179 if (unlikely(credits - ndesc < q->stop_thres)) {
1173 t3_stop_queue(dev, qs, q); 1180 t3_stop_tx_queue(txq, qs, q);
1174 1181
1175 if (should_restart_tx(q) && 1182 if (should_restart_tx(q) &&
1176 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { 1183 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1177 q->restarts++; 1184 q->restarts++;
1178 netif_wake_queue(dev); 1185 netif_tx_wake_queue(txq);
1179 } 1186 }
1180 } 1187 }
1181 1188
@@ -1839,7 +1846,7 @@ static void restart_tx(struct sge_qset *qs)
1839 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { 1846 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1840 qs->txq[TXQ_ETH].restarts++; 1847 qs->txq[TXQ_ETH].restarts++;
1841 if (netif_running(qs->netdev)) 1848 if (netif_running(qs->netdev))
1842 netif_wake_queue(qs->netdev); 1849 netif_tx_wake_queue(qs->tx_q);
1843 } 1850 }
1844 1851
1845 if (test_bit(TXQ_OFLD, &qs->txq_stopped) && 1852 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
@@ -1857,6 +1864,54 @@ static void restart_tx(struct sge_qset *qs)
1857} 1864}
1858 1865
1859/** 1866/**
1867 * cxgb3_arp_process - process an ARP request probing a private IP address
1868 * @adapter: the adapter
1869 * @skb: the skbuff containing the ARP request
1870 *
1871 * Check if the ARP request is probing the private IP address
1872 * dedicated to iSCSI, generate an ARP reply if so.
1873 */
1874static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
1875{
1876 struct net_device *dev = skb->dev;
1877 struct port_info *pi;
1878 struct arphdr *arp;
1879 unsigned char *arp_ptr;
1880 unsigned char *sha;
1881 __be32 sip, tip;
1882
1883 if (!dev)
1884 return;
1885
1886 skb_reset_network_header(skb);
1887 arp = arp_hdr(skb);
1888
1889 if (arp->ar_op != htons(ARPOP_REQUEST))
1890 return;
1891
1892 arp_ptr = (unsigned char *)(arp + 1);
1893 sha = arp_ptr;
1894 arp_ptr += dev->addr_len;
1895 memcpy(&sip, arp_ptr, sizeof(sip));
1896 arp_ptr += sizeof(sip);
1897 arp_ptr += dev->addr_len;
1898 memcpy(&tip, arp_ptr, sizeof(tip));
1899
1900 pi = netdev_priv(dev);
1901 if (tip != pi->iscsi_ipv4addr)
1902 return;
1903
1904 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1905 dev->dev_addr, sha);
1906
1907}
1908
1909static inline int is_arp(struct sk_buff *skb)
1910{
1911 return skb->protocol == htons(ETH_P_ARP);
1912}
1913
1914/**
1860 * rx_eth - process an ingress ethernet packet 1915 * rx_eth - process an ingress ethernet packet
1861 * @adap: the adapter 1916 * @adap: the adapter
1862 * @rq: the response queue that received the packet 1917 * @rq: the response queue that received the packet
@@ -1876,11 +1931,10 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1876 1931
1877 skb_pull(skb, sizeof(*p) + pad); 1932 skb_pull(skb, sizeof(*p) + pad);
1878 skb->protocol = eth_type_trans(skb, adap->port[p->iff]); 1933 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
1879 skb->dev->last_rx = jiffies;
1880 pi = netdev_priv(skb->dev); 1934 pi = netdev_priv(skb->dev);
1881 if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) && 1935 if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) &&
1882 !p->fragment) { 1936 !p->fragment) {
1883 rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; 1937 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
1884 skb->ip_summed = CHECKSUM_UNNECESSARY; 1938 skb->ip_summed = CHECKSUM_UNNECESSARY;
1885 } else 1939 } else
1886 skb->ip_summed = CHECKSUM_NONE; 1940 skb->ip_summed = CHECKSUM_NONE;
@@ -1895,16 +1949,28 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1895 grp, 1949 grp,
1896 ntohs(p->vlan), 1950 ntohs(p->vlan),
1897 p); 1951 p);
1898 else 1952 else {
1953 if (unlikely(pi->iscsi_ipv4addr &&
1954 is_arp(skb))) {
1955 unsigned short vtag = ntohs(p->vlan) &
1956 VLAN_VID_MASK;
1957 skb->dev = vlan_group_get_device(grp,
1958 vtag);
1959 cxgb3_arp_process(adap, skb);
1960 }
1899 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan), 1961 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
1900 rq->polling); 1962 rq->polling);
1963 }
1901 else 1964 else
1902 dev_kfree_skb_any(skb); 1965 dev_kfree_skb_any(skb);
1903 } else if (rq->polling) { 1966 } else if (rq->polling) {
1904 if (lro) 1967 if (lro)
1905 lro_receive_skb(&qs->lro_mgr, skb, p); 1968 lro_receive_skb(&qs->lro_mgr, skb, p);
1906 else 1969 else {
1970 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
1971 cxgb3_arp_process(adap, skb);
1907 netif_receive_skb(skb); 1972 netif_receive_skb(skb);
1973 }
1908 } else 1974 } else
1909 netif_rx(skb); 1975 netif_rx(skb);
1910} 1976}
@@ -2308,7 +2374,7 @@ next_fl:
2308 2374
2309static inline int is_pure_response(const struct rsp_desc *r) 2375static inline int is_pure_response(const struct rsp_desc *r)
2310{ 2376{
2311 u32 n = ntohl(r->flags) & (F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID); 2377 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2312 2378
2313 return (n | r->len_cq) == 0; 2379 return (n | r->len_cq) == 0;
2314} 2380}
@@ -2826,6 +2892,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2826 * @p: configuration parameters for this queue set 2892 * @p: configuration parameters for this queue set
2827 * @ntxq: number of Tx queues for the queue set 2893 * @ntxq: number of Tx queues for the queue set
2828 * @netdev: net device associated with this queue set 2894 * @netdev: net device associated with this queue set
2895 * @netdevq: net device TX queue associated with this queue set
2829 * 2896 *
2830 * Allocate resources and initialize an SGE queue set. A queue set 2897 * Allocate resources and initialize an SGE queue set. A queue set
2831 * comprises a response queue, two Rx free-buffer queues, and up to 3 2898 * comprises a response queue, two Rx free-buffer queues, and up to 3
@@ -2834,7 +2901,8 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
2834 */ 2901 */
2835int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, 2902int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2836 int irq_vec_idx, const struct qset_params *p, 2903 int irq_vec_idx, const struct qset_params *p,
2837 int ntxq, struct net_device *dev) 2904 int ntxq, struct net_device *dev,
2905 struct netdev_queue *netdevq)
2838{ 2906{
2839 int i, avail, ret = -ENOMEM; 2907 int i, avail, ret = -ENOMEM;
2840 struct sge_qset *q = &adapter->sge.qs[id]; 2908 struct sge_qset *q = &adapter->sge.qs[id];
@@ -2970,6 +3038,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2970 3038
2971 q->adap = adapter; 3039 q->adap = adapter;
2972 q->netdev = dev; 3040 q->netdev = dev;
3041 q->tx_q = netdevq;
2973 t3_update_qset_coalesce(q, p); 3042 t3_update_qset_coalesce(q, p);
2974 3043
2975 init_lro_mgr(q, lro_mgr); 3044 init_lro_mgr(q, lro_mgr);
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 9a0898b0dbce..2d1433077a8e 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -925,11 +925,10 @@ int t3_get_tp_version(struct adapter *adapter, u32 *vers)
925/** 925/**
926 * t3_check_tpsram_version - read the tp sram version 926 * t3_check_tpsram_version - read the tp sram version
927 * @adapter: the adapter 927 * @adapter: the adapter
928 * @must_load: set to 1 if loading a new microcode image is required
929 * 928 *
930 * Reads the protocol sram version from flash. 929 * Reads the protocol sram version from flash.
931 */ 930 */
932int t3_check_tpsram_version(struct adapter *adapter, int *must_load) 931int t3_check_tpsram_version(struct adapter *adapter)
933{ 932{
934 int ret; 933 int ret;
935 u32 vers; 934 u32 vers;
@@ -938,7 +937,6 @@ int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
938 if (adapter->params.rev == T3_REV_A) 937 if (adapter->params.rev == T3_REV_A)
939 return 0; 938 return 0;
940 939
941 *must_load = 1;
942 940
943 ret = t3_get_tp_version(adapter, &vers); 941 ret = t3_get_tp_version(adapter, &vers);
944 if (ret) 942 if (ret)
@@ -949,13 +947,7 @@ int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
949 947
950 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) 948 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
951 return 0; 949 return 0;
952
953 if (major != TP_VERSION_MAJOR)
954 CH_ERR(adapter, "found wrong TP version (%u.%u), "
955 "driver needs version %d.%d\n", major, minor,
956 TP_VERSION_MAJOR, TP_VERSION_MINOR);
957 else { 950 else {
958 *must_load = 0;
959 CH_ERR(adapter, "found wrong TP version (%u.%u), " 951 CH_ERR(adapter, "found wrong TP version (%u.%u), "
960 "driver compiled for version %d.%d\n", major, minor, 952 "driver compiled for version %d.%d\n", major, minor,
961 TP_VERSION_MAJOR, TP_VERSION_MINOR); 953 TP_VERSION_MAJOR, TP_VERSION_MINOR);
@@ -1012,18 +1004,16 @@ int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1012/** 1004/**
1013 * t3_check_fw_version - check if the FW is compatible with this driver 1005 * t3_check_fw_version - check if the FW is compatible with this driver
1014 * @adapter: the adapter 1006 * @adapter: the adapter
1015 * @must_load: set to 1 if loading a new FW image is required 1007 *
1016
1017 * Checks if an adapter's FW is compatible with the driver. Returns 0 1008 * Checks if an adapter's FW is compatible with the driver. Returns 0
1018 * if the versions are compatible, a negative error otherwise. 1009 * if the versions are compatible, a negative error otherwise.
1019 */ 1010 */
1020int t3_check_fw_version(struct adapter *adapter, int *must_load) 1011int t3_check_fw_version(struct adapter *adapter)
1021{ 1012{
1022 int ret; 1013 int ret;
1023 u32 vers; 1014 u32 vers;
1024 unsigned int type, major, minor; 1015 unsigned int type, major, minor;
1025 1016
1026 *must_load = 1;
1027 ret = t3_get_fw_version(adapter, &vers); 1017 ret = t3_get_fw_version(adapter, &vers);
1028 if (ret) 1018 if (ret)
1029 return ret; 1019 return ret;
@@ -1035,17 +1025,11 @@ int t3_check_fw_version(struct adapter *adapter, int *must_load)
1035 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR && 1025 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1036 minor == FW_VERSION_MINOR) 1026 minor == FW_VERSION_MINOR)
1037 return 0; 1027 return 0;
1038 1028 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1039 if (major != FW_VERSION_MAJOR)
1040 CH_ERR(adapter, "found wrong FW version(%u.%u), "
1041 "driver needs version %u.%u\n", major, minor,
1042 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1043 else if (minor < FW_VERSION_MINOR) {
1044 *must_load = 0;
1045 CH_WARN(adapter, "found old FW minor version(%u.%u), " 1029 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1046 "driver compiled for version %u.%u\n", major, minor, 1030 "driver compiled for version %u.%u\n", major, minor,
1047 FW_VERSION_MAJOR, FW_VERSION_MINOR); 1031 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1048 } else { 1032 else {
1049 CH_WARN(adapter, "found newer FW version(%u.%u), " 1033 CH_WARN(adapter, "found newer FW version(%u.%u), "
1050 "driver compiled for version %u.%u\n", major, minor, 1034 "driver compiled for version %u.%u\n", major, minor,
1051 FW_VERSION_MAJOR, FW_VERSION_MINOR); 1035 FW_VERSION_MAJOR, FW_VERSION_MINOR);
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index bb8698a86754..b1b25c37aa10 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -35,7 +35,7 @@
35#define DRV_DESC "Chelsio T3 Network Driver" 35#define DRV_DESC "Chelsio T3 Network Driver"
36#define DRV_NAME "cxgb3" 36#define DRV_NAME "cxgb3"
37/* Driver version */ 37/* Driver version */
38#define DRV_VERSION "1.1.0-ko" 38#define DRV_VERSION "1.1.1-ko"
39 39
40/* Firmware version */ 40/* Firmware version */
41#define FW_VERSION_MAJOR 7 41#define FW_VERSION_MAJOR 7
diff --git a/drivers/net/cxgb3/vsc8211.c b/drivers/net/cxgb3/vsc8211.c
index 33f956bd6b59..d07130971b8f 100644
--- a/drivers/net/cxgb3/vsc8211.c
+++ b/drivers/net/cxgb3/vsc8211.c
@@ -262,6 +262,7 @@ static int vsc8211_get_link_status_fiber(struct cphy *cphy, int *link_ok,
262 return 0; 262 return 0;
263} 263}
264 264
265#ifdef UNUSED
265/* 266/*
266 * Enable/disable auto MDI/MDI-X in forced link speed mode. 267 * Enable/disable auto MDI/MDI-X in forced link speed mode.
267 */ 268 */
@@ -301,6 +302,7 @@ int vsc8211_set_speed_duplex(struct cphy *phy, int speed, int duplex)
301 err = vsc8211_set_automdi(phy, 1); 302 err = vsc8211_set_automdi(phy, 1);
302 return err; 303 return err;
303} 304}
305#endif /* UNUSED */
304 306
305static int vsc8211_power_down(struct cphy *cphy, int enable) 307static int vsc8211_power_down(struct cphy *cphy, int enable)
306{ 308{
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index cb849b091f98..970f820ba814 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -369,7 +369,6 @@ static void de600_rx_intr(struct net_device *dev)
369 netif_rx(skb); 369 netif_rx(skb);
370 370
371 /* update stats */ 371 /* update stats */
372 dev->last_rx = jiffies;
373 dev->stats.rx_packets++; /* count all receives */ 372 dev->stats.rx_packets++; /* count all receives */
374 dev->stats.rx_bytes += size; /* count all received bytes */ 373 dev->stats.rx_bytes += size; /* count all received bytes */
375 374
@@ -384,7 +383,6 @@ static struct net_device * __init de600_probe(void)
384 int i; 383 int i;
385 struct net_device *dev; 384 struct net_device *dev;
386 int err; 385 int err;
387 DECLARE_MAC_BUF(mac);
388 386
389 dev = alloc_etherdev(0); 387 dev = alloc_etherdev(0);
390 if (!dev) 388 if (!dev)
@@ -439,7 +437,7 @@ static struct net_device * __init de600_probe(void)
439 goto out1; 437 goto out1;
440 } 438 }
441 439
442 printk(", Ethernet Address: %s\n", print_mac(mac, dev->dev_addr)); 440 printk(", Ethernet Address: %pM\n", dev->dev_addr);
443 441
444 dev->open = de600_open; 442 dev->open = de600_open;
445 dev->stop = de600_close; 443 dev->stop = de600_close;
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index d454e143483e..bdfa89403389 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -686,7 +686,6 @@ static int de620_rx_intr(struct net_device *dev)
686 PRINTK(("Read %d bytes\n", size)); 686 PRINTK(("Read %d bytes\n", size));
687 skb->protocol=eth_type_trans(skb,dev); 687 skb->protocol=eth_type_trans(skb,dev);
688 netif_rx(skb); /* deliver it "upstairs" */ 688 netif_rx(skb); /* deliver it "upstairs" */
689 dev->last_rx = jiffies;
690 /* count all receives */ 689 /* count all receives */
691 dev->stats.rx_packets++; 690 dev->stats.rx_packets++;
692 dev->stats.rx_bytes += size; 691 dev->stats.rx_bytes += size;
@@ -800,7 +799,6 @@ struct net_device * __init de620_probe(int unit)
800 struct net_device *dev; 799 struct net_device *dev;
801 int err = -ENOMEM; 800 int err = -ENOMEM;
802 int i; 801 int i;
803 DECLARE_MAC_BUF(mac);
804 802
805 dev = alloc_etherdev(0); 803 dev = alloc_etherdev(0);
806 if (!dev) 804 if (!dev)
@@ -853,7 +851,7 @@ struct net_device * __init de620_probe(int unit)
853 dev->broadcast[i] = 0xff; 851 dev->broadcast[i] = 0xff;
854 } 852 }
855 853
856 printk(", Ethernet Address: %s", print_mac(mac, dev->dev_addr)); 854 printk(", Ethernet Address: %pM", dev->dev_addr);
857 855
858 printk(" (%dk RAM,", 856 printk(" (%dk RAM,",
859 (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64); 857 (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64);
@@ -876,10 +874,7 @@ struct net_device * __init de620_probe(int unit)
876 if (de620_debug) { 874 if (de620_debug) {
877 printk("\nEEPROM contents:\n"); 875 printk("\nEEPROM contents:\n");
878 printk("RAM_Size = 0x%02X\n", nic_data.RAM_Size); 876 printk("RAM_Size = 0x%02X\n", nic_data.RAM_Size);
879 printk("NodeID = %02X:%02X:%02X:%02X:%02X:%02X\n", 877 printk("NodeID = %pM\n", nic_data.NodeID);
880 nic_data.NodeID[0], nic_data.NodeID[1],
881 nic_data.NodeID[2], nic_data.NodeID[3],
882 nic_data.NodeID[4], nic_data.NodeID[5]);
883 printk("Model = %d\n", nic_data.Model); 878 printk("Model = %d\n", nic_data.Model);
884 printk("Media = %d\n", nic_data.Media); 879 printk("Media = %d\n", nic_data.Media);
885 printk("SCR = 0x%02x\n", nic_data.SCR); 880 printk("SCR = 0x%02x\n", nic_data.SCR);
@@ -1008,20 +1003,3 @@ void cleanup_module(void)
1008} 1003}
1009#endif /* MODULE */ 1004#endif /* MODULE */
1010MODULE_LICENSE("GPL"); 1005MODULE_LICENSE("GPL");
1011
1012
1013/*
1014 * (add '-DMODULE' when compiling as loadable module)
1015 *
1016 * compile-command:
1017 * gcc -D__KERNEL__ -Wall -Wstrict-prototypes -O2 \
1018 * -fomit-frame-pointer -m486 \
1019 * -I/usr/src/linux/include -I../../net/inet -c de620.c
1020*/
1021/*
1022 * Local variables:
1023 * kernel-compile-command: "gcc -D__KERNEL__ -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
1024 * module-compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
1025 * compile-command: "gcc -D__KERNEL__ -DMODULE -Ilinux/include -I../../net/inet -Wall -Wstrict-prototypes -O2 -m486 -c de620.c"
1026 * End:
1027 */
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 3e3506411ac0..7ce3053530f9 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -622,7 +622,6 @@ static int lance_rx(struct net_device *dev)
622 622
623 skb->protocol = eth_type_trans(skb, dev); 623 skb->protocol = eth_type_trans(skb, dev);
624 netif_rx(skb); 624 netif_rx(skb);
625 dev->last_rx = jiffies;
626 dev->stats.rx_packets++; 625 dev->stats.rx_packets++;
627 } 626 }
628 627
@@ -1023,7 +1022,6 @@ static int __init dec_lance_probe(struct device *bdev, const int type)
1023 int i, ret; 1022 int i, ret;
1024 unsigned long esar_base; 1023 unsigned long esar_base;
1025 unsigned char *esar; 1024 unsigned char *esar;
1026 DECLARE_MAC_BUF(mac);
1027 1025
1028 if (dec_lance_debug && version_printed++ == 0) 1026 if (dec_lance_debug && version_printed++ == 0)
1029 printk(version); 1027 printk(version);
@@ -1035,7 +1033,7 @@ static int __init dec_lance_probe(struct device *bdev, const int type)
1035 dev = root_lance_dev; 1033 dev = root_lance_dev;
1036 while (dev) { 1034 while (dev) {
1037 i++; 1035 i++;
1038 lp = (struct lance_private *)dev->priv; 1036 lp = netdev_priv(dev);
1039 dev = lp->next; 1037 dev = lp->next;
1040 } 1038 }
1041 snprintf(name, sizeof(name), fmt, i); 1039 snprintf(name, sizeof(name), fmt, i);
@@ -1223,8 +1221,7 @@ static int __init dec_lance_probe(struct device *bdev, const int type)
1223 for (i = 0; i < 6; i++) 1221 for (i = 0; i < 6; i++)
1224 dev->dev_addr[i] = esar[i * 4]; 1222 dev->dev_addr[i] = esar[i * 4];
1225 1223
1226 printk(", addr = %s, irq = %d\n", 1224 printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
1227 print_mac(mac, dev->dev_addr), dev->irq);
1228 1225
1229 dev->open = &lance_open; 1226 dev->open = &lance_open;
1230 dev->stop = &lance_close; 1227 dev->stop = &lance_close;
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index c062aacf229c..6445cedd5868 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -477,6 +477,15 @@ static void dfx_get_bars(struct device *bdev,
477 } 477 }
478} 478}
479 479
480static const struct net_device_ops dfx_netdev_ops = {
481 .ndo_open = dfx_open,
482 .ndo_stop = dfx_close,
483 .ndo_start_xmit = dfx_xmt_queue_pkt,
484 .ndo_get_stats = dfx_ctl_get_stats,
485 .ndo_set_multicast_list = dfx_ctl_set_multicast_list,
486 .ndo_set_mac_address = dfx_ctl_set_mac_address,
487};
488
480/* 489/*
481 * ================ 490 * ================
482 * = dfx_register = 491 * = dfx_register =
@@ -511,7 +520,7 @@ static int __devinit dfx_register(struct device *bdev)
511 int dfx_bus_pci = DFX_BUS_PCI(bdev); 520 int dfx_bus_pci = DFX_BUS_PCI(bdev);
512 int dfx_bus_tc = DFX_BUS_TC(bdev); 521 int dfx_bus_tc = DFX_BUS_TC(bdev);
513 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 522 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
514 char *print_name = bdev->bus_id; 523 const char *print_name = dev_name(bdev);
515 struct net_device *dev; 524 struct net_device *dev;
516 DFX_board_t *bp; /* board pointer */ 525 DFX_board_t *bp; /* board pointer */
517 resource_size_t bar_start = 0; /* pointer to port */ 526 resource_size_t bar_start = 0; /* pointer to port */
@@ -573,13 +582,7 @@ static int __devinit dfx_register(struct device *bdev)
573 } 582 }
574 583
575 /* Initialize new device structure */ 584 /* Initialize new device structure */
576 585 dev->netdev_ops = &dfx_netdev_ops;
577 dev->get_stats = dfx_ctl_get_stats;
578 dev->open = dfx_open;
579 dev->stop = dfx_close;
580 dev->hard_start_xmit = dfx_xmt_queue_pkt;
581 dev->set_multicast_list = dfx_ctl_set_multicast_list;
582 dev->set_mac_address = dfx_ctl_set_mac_address;
583 586
584 if (dfx_bus_pci) 587 if (dfx_bus_pci)
585 pci_set_master(to_pci_dev(bdev)); 588 pci_set_master(to_pci_dev(bdev));
@@ -3103,7 +3106,6 @@ static void dfx_rcv_queue_process(
3103 netif_rx(skb); 3106 netif_rx(skb);
3104 3107
3105 /* Update the rcv counters */ 3108 /* Update the rcv counters */
3106 bp->dev->last_rx = jiffies;
3107 bp->rcv_total_frames++; 3109 bp->rcv_total_frames++;
3108 if (*(p_buff + RCV_BUFF_K_DA) & 0x01) 3110 if (*(p_buff + RCV_BUFF_K_DA) & 0x01)
3109 bp->rcv_multicast_frames++; 3111 bp->rcv_multicast_frames++;
@@ -3741,10 +3743,3 @@ MODULE_AUTHOR("Lawrence V. Stefani");
3741MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver " 3743MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver "
3742 DRV_VERSION " " DRV_RELDATE); 3744 DRV_VERSION " " DRV_RELDATE);
3743MODULE_LICENSE("GPL"); 3745MODULE_LICENSE("GPL");
3744
3745
3746/*
3747 * Local variables:
3748 * kernel-compile-command: "gcc -D__KERNEL__ -I/root/linux/include -Wall -Wstrict-prototypes -O2 -pipe -fomit-frame-pointer -fno-strength-reduce -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2 -c defxx.c"
3749 * End:
3750 */
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index ace39ec0a367..e4cef491dc73 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -573,7 +573,6 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
573 s16 nicsr; 573 s16 nicsr;
574 u_long ioaddr; 574 u_long ioaddr;
575 u_long mem_start; 575 u_long mem_start;
576 DECLARE_MAC_BUF(mac);
577 576
578 /* 577 /*
579 * We are now supposed to enter this function with the 578 * We are now supposed to enter this function with the
@@ -601,7 +600,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
601 return -ENXIO; 600 return -ENXIO;
602 } 601 }
603 602
604 lp = (struct depca_private *) dev->priv; 603 lp = netdev_priv(dev);
605 mem_start = lp->mem_start; 604 mem_start = lp->mem_start;
606 605
607 if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown) 606 if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown)
@@ -633,7 +632,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
633 632
634 printk(", h/w address "); 633 printk(", h/w address ");
635 status = get_hw_addr(dev); 634 status = get_hw_addr(dev);
636 printk("%s", print_mac(mac, dev->dev_addr)); 635 printk("%pM", dev->dev_addr);
637 if (status != 0) { 636 if (status != 0) {
638 printk(" which has an Ethernet PROM CRC error.\n"); 637 printk(" which has an Ethernet PROM CRC error.\n");
639 return -ENXIO; 638 return -ENXIO;
@@ -821,7 +820,7 @@ out_priv:
821 820
822static int depca_open(struct net_device *dev) 821static int depca_open(struct net_device *dev)
823{ 822{
824 struct depca_private *lp = (struct depca_private *) dev->priv; 823 struct depca_private *lp = netdev_priv(dev);
825 u_long ioaddr = dev->base_addr; 824 u_long ioaddr = dev->base_addr;
826 s16 nicsr; 825 s16 nicsr;
827 int status = 0; 826 int status = 0;
@@ -866,7 +865,7 @@ static int depca_open(struct net_device *dev)
866/* Initialize the lance Rx and Tx descriptor rings. */ 865/* Initialize the lance Rx and Tx descriptor rings. */
867static void depca_init_ring(struct net_device *dev) 866static void depca_init_ring(struct net_device *dev)
868{ 867{
869 struct depca_private *lp = (struct depca_private *) dev->priv; 868 struct depca_private *lp = netdev_priv(dev);
870 u_int i; 869 u_int i;
871 u_long offset; 870 u_long offset;
872 871
@@ -924,7 +923,7 @@ static void depca_tx_timeout(struct net_device *dev)
924*/ 923*/
925static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev) 924static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev)
926{ 925{
927 struct depca_private *lp = (struct depca_private *) dev->priv; 926 struct depca_private *lp = netdev_priv(dev);
928 u_long ioaddr = dev->base_addr; 927 u_long ioaddr = dev->base_addr;
929 int status = 0; 928 int status = 0;
930 929
@@ -972,7 +971,7 @@ static irqreturn_t depca_interrupt(int irq, void *dev_id)
972 return IRQ_NONE; 971 return IRQ_NONE;
973 } 972 }
974 973
975 lp = (struct depca_private *) dev->priv; 974 lp = netdev_priv(dev);
976 ioaddr = dev->base_addr; 975 ioaddr = dev->base_addr;
977 976
978 spin_lock(&lp->lock); 977 spin_lock(&lp->lock);
@@ -1010,7 +1009,7 @@ static irqreturn_t depca_interrupt(int irq, void *dev_id)
1010/* Called with lp->lock held */ 1009/* Called with lp->lock held */
1011static int depca_rx(struct net_device *dev) 1010static int depca_rx(struct net_device *dev)
1012{ 1011{
1013 struct depca_private *lp = (struct depca_private *) dev->priv; 1012 struct depca_private *lp = netdev_priv(dev);
1014 int i, entry; 1013 int i, entry;
1015 s32 status; 1014 s32 status;
1016 1015
@@ -1057,7 +1056,6 @@ static int depca_rx(struct net_device *dev)
1057 /* 1056 /*
1058 ** Update stats 1057 ** Update stats
1059 */ 1058 */
1060 dev->last_rx = jiffies;
1061 dev->stats.rx_packets++; 1059 dev->stats.rx_packets++;
1062 dev->stats.rx_bytes += pkt_len; 1060 dev->stats.rx_bytes += pkt_len;
1063 for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) { 1061 for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) {
@@ -1108,7 +1106,7 @@ static int depca_rx(struct net_device *dev)
1108*/ 1106*/
1109static int depca_tx(struct net_device *dev) 1107static int depca_tx(struct net_device *dev)
1110{ 1108{
1111 struct depca_private *lp = (struct depca_private *) dev->priv; 1109 struct depca_private *lp = netdev_priv(dev);
1112 int entry; 1110 int entry;
1113 s32 status; 1111 s32 status;
1114 u_long ioaddr = dev->base_addr; 1112 u_long ioaddr = dev->base_addr;
@@ -1149,7 +1147,7 @@ static int depca_tx(struct net_device *dev)
1149 1147
1150static int depca_close(struct net_device *dev) 1148static int depca_close(struct net_device *dev)
1151{ 1149{
1152 struct depca_private *lp = (struct depca_private *) dev->priv; 1150 struct depca_private *lp = netdev_priv(dev);
1153 s16 nicsr; 1151 s16 nicsr;
1154 u_long ioaddr = dev->base_addr; 1152 u_long ioaddr = dev->base_addr;
1155 1153
@@ -1185,7 +1183,7 @@ static int depca_close(struct net_device *dev)
1185 1183
1186static void LoadCSRs(struct net_device *dev) 1184static void LoadCSRs(struct net_device *dev)
1187{ 1185{
1188 struct depca_private *lp = (struct depca_private *) dev->priv; 1186 struct depca_private *lp = netdev_priv(dev);
1189 u_long ioaddr = dev->base_addr; 1187 u_long ioaddr = dev->base_addr;
1190 1188
1191 outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */ 1189 outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */
@@ -1202,7 +1200,7 @@ static void LoadCSRs(struct net_device *dev)
1202 1200
1203static int InitRestartDepca(struct net_device *dev) 1201static int InitRestartDepca(struct net_device *dev)
1204{ 1202{
1205 struct depca_private *lp = (struct depca_private *) dev->priv; 1203 struct depca_private *lp = netdev_priv(dev);
1206 u_long ioaddr = dev->base_addr; 1204 u_long ioaddr = dev->base_addr;
1207 int i, status = 0; 1205 int i, status = 0;
1208 1206
@@ -1234,7 +1232,7 @@ static int InitRestartDepca(struct net_device *dev)
1234*/ 1232*/
1235static void set_multicast_list(struct net_device *dev) 1233static void set_multicast_list(struct net_device *dev)
1236{ 1234{
1237 struct depca_private *lp = (struct depca_private *) dev->priv; 1235 struct depca_private *lp = netdev_priv(dev);
1238 u_long ioaddr = dev->base_addr; 1236 u_long ioaddr = dev->base_addr;
1239 1237
1240 netif_stop_queue(dev); 1238 netif_stop_queue(dev);
@@ -1263,7 +1261,7 @@ static void set_multicast_list(struct net_device *dev)
1263*/ 1261*/
1264static void SetMulticastFilter(struct net_device *dev) 1262static void SetMulticastFilter(struct net_device *dev)
1265{ 1263{
1266 struct depca_private *lp = (struct depca_private *) dev->priv; 1264 struct depca_private *lp = netdev_priv(dev);
1267 struct dev_mc_list *dmi = dev->mc_list; 1265 struct dev_mc_list *dmi = dev->mc_list;
1268 char *addrs; 1266 char *addrs;
1269 int i, j, bit, byte; 1267 int i, j, bit, byte;
@@ -1431,7 +1429,7 @@ static int __init depca_mca_probe(struct device *device)
1431 1429
1432 dev->irq = irq; 1430 dev->irq = irq;
1433 dev->base_addr = iobase; 1431 dev->base_addr = iobase;
1434 lp = dev->priv; 1432 lp = netdev_priv(dev);
1435 lp->depca_bus = DEPCA_BUS_MCA; 1433 lp->depca_bus = DEPCA_BUS_MCA;
1436 lp->adapter = depca_mca_adapter_type[mdev->index]; 1434 lp->adapter = depca_mca_adapter_type[mdev->index];
1437 lp->mem_start = mem_start; 1435 lp->mem_start = mem_start;
@@ -1534,7 +1532,7 @@ static int __init depca_isa_probe (struct platform_device *device)
1534 dev->base_addr = ioaddr; 1532 dev->base_addr = ioaddr;
1535 dev->irq = irq; /* Use whatever value the user gave 1533 dev->irq = irq; /* Use whatever value the user gave
1536 * us, and 0 if he didn't. */ 1534 * us, and 0 if he didn't. */
1537 lp = dev->priv; 1535 lp = netdev_priv(dev);
1538 lp->depca_bus = DEPCA_BUS_ISA; 1536 lp->depca_bus = DEPCA_BUS_ISA;
1539 lp->adapter = adapter; 1537 lp->adapter = adapter;
1540 lp->mem_start = mem_start; 1538 lp->mem_start = mem_start;
@@ -1558,6 +1556,7 @@ static int __init depca_isa_probe (struct platform_device *device)
1558#ifdef CONFIG_EISA 1556#ifdef CONFIG_EISA
1559static int __init depca_eisa_probe (struct device *device) 1557static int __init depca_eisa_probe (struct device *device)
1560{ 1558{
1559 enum depca_type adapter = unknown;
1561 struct eisa_device *edev; 1560 struct eisa_device *edev;
1562 struct net_device *dev; 1561 struct net_device *dev;
1563 struct depca_private *lp; 1562 struct depca_private *lp;
@@ -1576,11 +1575,15 @@ static int __init depca_eisa_probe (struct device *device)
1576 * the EISA configuration structures (yet... :-), just rely on 1575 * the EISA configuration structures (yet... :-), just rely on
1577 * the ISA probing to sort it out... */ 1576 * the ISA probing to sort it out... */
1578 1577
1579 depca_shmem_probe (&mem_start); 1578 adapter = depca_shmem_probe (&mem_start);
1579 if (adapter == unknown) {
1580 status = -ENODEV;
1581 goto out_free;
1582 }
1580 1583
1581 dev->base_addr = ioaddr; 1584 dev->base_addr = ioaddr;
1582 dev->irq = irq; 1585 dev->irq = irq;
1583 lp = dev->priv; 1586 lp = netdev_priv(dev);
1584 lp->depca_bus = DEPCA_BUS_EISA; 1587 lp->depca_bus = DEPCA_BUS_EISA;
1585 lp->adapter = edev->id.driver_data; 1588 lp->adapter = edev->id.driver_data;
1586 lp->mem_start = mem_start; 1589 lp->mem_start = mem_start;
@@ -1605,7 +1608,7 @@ static int __devexit depca_device_remove (struct device *device)
1605 int bus; 1608 int bus;
1606 1609
1607 dev = device->driver_data; 1610 dev = device->driver_data;
1608 lp = dev->priv; 1611 lp = netdev_priv(dev);
1609 1612
1610 unregister_netdev (dev); 1613 unregister_netdev (dev);
1611 iounmap (lp->sh_mem); 1614 iounmap (lp->sh_mem);
@@ -1747,7 +1750,7 @@ static int __init DevicePresent(u_long ioaddr)
1747static int __init get_hw_addr(struct net_device *dev) 1750static int __init get_hw_addr(struct net_device *dev)
1748{ 1751{
1749 u_long ioaddr = dev->base_addr; 1752 u_long ioaddr = dev->base_addr;
1750 struct depca_private *lp = dev->priv; 1753 struct depca_private *lp = netdev_priv(dev);
1751 int i, k, tmp, status = 0; 1754 int i, k, tmp, status = 0;
1752 u_short j, x, chksum; 1755 u_short j, x, chksum;
1753 1756
@@ -1782,7 +1785,7 @@ static int __init get_hw_addr(struct net_device *dev)
1782*/ 1785*/
1783static int load_packet(struct net_device *dev, struct sk_buff *skb) 1786static int load_packet(struct net_device *dev, struct sk_buff *skb)
1784{ 1787{
1785 struct depca_private *lp = (struct depca_private *) dev->priv; 1788 struct depca_private *lp = netdev_priv(dev);
1786 int i, entry, end, len, status = 0; 1789 int i, entry, end, len, status = 0;
1787 1790
1788 entry = lp->tx_new; /* Ring around buffer number. */ 1791 entry = lp->tx_new; /* Ring around buffer number. */
@@ -1837,11 +1840,10 @@ static int load_packet(struct net_device *dev, struct sk_buff *skb)
1837 1840
1838static void depca_dbg_open(struct net_device *dev) 1841static void depca_dbg_open(struct net_device *dev)
1839{ 1842{
1840 struct depca_private *lp = (struct depca_private *) dev->priv; 1843 struct depca_private *lp = netdev_priv(dev);
1841 u_long ioaddr = dev->base_addr; 1844 u_long ioaddr = dev->base_addr;
1842 struct depca_init *p = &lp->init_block; 1845 struct depca_init *p = &lp->init_block;
1843 int i; 1846 int i;
1844 DECLARE_MAC_BUF(mac);
1845 1847
1846 if (depca_debug > 1) { 1848 if (depca_debug > 1) {
1847 /* Do not copy the shadow init block into shared memory */ 1849 /* Do not copy the shadow init block into shared memory */
@@ -1880,7 +1882,7 @@ static void depca_dbg_open(struct net_device *dev)
1880 printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base)); 1882 printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base));
1881 printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start); 1883 printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start);
1882 printk(" mode: 0x%4.4x\n", p->mode); 1884 printk(" mode: 0x%4.4x\n", p->mode);
1883 printk(" physical address: %s\n", print_mac(mac, p->phys_addr)); 1885 printk(" physical address: %pM\n", p->phys_addr);
1884 printk(" multicast hash table: "); 1886 printk(" multicast hash table: ");
1885 for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) { 1887 for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) {
1886 printk("%2.2x:", p->mcast_table[i]); 1888 printk("%2.2x:", p->mcast_table[i]);
@@ -1909,7 +1911,7 @@ static void depca_dbg_open(struct net_device *dev)
1909*/ 1911*/
1910static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1912static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1911{ 1913{
1912 struct depca_private *lp = (struct depca_private *) dev->priv; 1914 struct depca_private *lp = netdev_priv(dev);
1913 struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru; 1915 struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru;
1914 int i, status = 0; 1916 int i, status = 0;
1915 u_long ioaddr = dev->base_addr; 1917 u_long ioaddr = dev->base_addr;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index f8037110a522..c749e9fb47ef 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -85,6 +85,19 @@ static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
85 85
86static const struct ethtool_ops ethtool_ops; 86static const struct ethtool_ops ethtool_ops;
87 87
88static const struct net_device_ops netdev_ops = {
89 .ndo_open = rio_open,
90 .ndo_start_xmit = start_xmit,
91 .ndo_stop = rio_close,
92 .ndo_get_stats = get_stats,
93 .ndo_validate_addr = eth_validate_addr,
94 .ndo_set_mac_address = eth_mac_addr,
95 .ndo_set_multicast_list = set_multicast,
96 .ndo_do_ioctl = rio_ioctl,
97 .ndo_tx_timeout = rio_tx_timeout,
98 .ndo_change_mtu = change_mtu,
99};
100
88static int __devinit 101static int __devinit
89rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) 102rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
90{ 103{
@@ -97,7 +110,6 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
97 static int version_printed; 110 static int version_printed;
98 void *ring_space; 111 void *ring_space;
99 dma_addr_t ring_dma; 112 dma_addr_t ring_dma;
100 DECLARE_MAC_BUF(mac);
101 113
102 if (!version_printed++) 114 if (!version_printed++)
103 printk ("%s", version); 115 printk ("%s", version);
@@ -198,15 +210,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
198 else if (tx_coalesce > TX_RING_SIZE-1) 210 else if (tx_coalesce > TX_RING_SIZE-1)
199 tx_coalesce = TX_RING_SIZE - 1; 211 tx_coalesce = TX_RING_SIZE - 1;
200 } 212 }
201 dev->open = &rio_open; 213 dev->netdev_ops = &netdev_ops;
202 dev->hard_start_xmit = &start_xmit;
203 dev->stop = &rio_close;
204 dev->get_stats = &get_stats;
205 dev->set_multicast_list = &set_multicast;
206 dev->do_ioctl = &rio_ioctl;
207 dev->tx_timeout = &rio_tx_timeout;
208 dev->watchdog_timeo = TX_TIMEOUT; 214 dev->watchdog_timeo = TX_TIMEOUT;
209 dev->change_mtu = &change_mtu;
210 SET_ETHTOOL_OPS(dev, &ethtool_ops); 215 SET_ETHTOOL_OPS(dev, &ethtool_ops);
211#if 0 216#if 0
212 dev->features = NETIF_F_IP_CSUM; 217 dev->features = NETIF_F_IP_CSUM;
@@ -257,8 +262,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
257 262
258 card_idx++; 263 card_idx++;
259 264
260 printk (KERN_INFO "%s: %s, %s, IRQ %d\n", 265 printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
261 dev->name, np->name, print_mac(mac, dev->dev_addr), irq); 266 dev->name, np->name, dev->dev_addr, irq);
262 if (tx_coalesce > 1) 267 if (tx_coalesce > 1)
263 printk(KERN_INFO "tx_coalesce:\t%d packets\n", 268 printk(KERN_INFO "tx_coalesce:\t%d packets\n",
264 tx_coalesce); 269 tx_coalesce);
@@ -892,7 +897,6 @@ receive_packet (struct net_device *dev)
892 } 897 }
893#endif 898#endif
894 netif_rx (skb); 899 netif_rx (skb);
895 dev->last_rx = jiffies;
896 } 900 }
897 entry = (entry + 1) % RX_RING_SIZE; 901 entry = (entry + 1) % RX_RING_SIZE;
898 } 902 }
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 5a9083e3f443..bcf92917bbf3 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -137,7 +137,7 @@ typedef struct board_info {
137 137
138static inline board_info_t *to_dm9000_board(struct net_device *dev) 138static inline board_info_t *to_dm9000_board(struct net_device *dev)
139{ 139{
140 return dev->priv; 140 return netdev_priv(dev);
141} 141}
142 142
143/* DM9000 network board routine ---------------------------- */ 143/* DM9000 network board routine ---------------------------- */
@@ -626,7 +626,7 @@ static unsigned char dm9000_type_to_char(enum dm9000_type type)
626static void 626static void
627dm9000_hash_table(struct net_device *dev) 627dm9000_hash_table(struct net_device *dev)
628{ 628{
629 board_info_t *db = (board_info_t *) dev->priv; 629 board_info_t *db = netdev_priv(dev);
630 struct dev_mc_list *mcptr = dev->mc_list; 630 struct dev_mc_list *mcptr = dev->mc_list;
631 int mc_cnt = dev->mc_count; 631 int mc_cnt = dev->mc_count;
632 int i, oft; 632 int i, oft;
@@ -677,7 +677,7 @@ dm9000_hash_table(struct net_device *dev)
677static void 677static void
678dm9000_init_dm9000(struct net_device *dev) 678dm9000_init_dm9000(struct net_device *dev)
679{ 679{
680 board_info_t *db = dev->priv; 680 board_info_t *db = netdev_priv(dev);
681 unsigned int imr; 681 unsigned int imr;
682 682
683 dm9000_dbg(db, 1, "entering %s\n", __func__); 683 dm9000_dbg(db, 1, "entering %s\n", __func__);
@@ -723,7 +723,7 @@ dm9000_init_dm9000(struct net_device *dev)
723/* Our watchdog timed out. Called by the networking layer */ 723/* Our watchdog timed out. Called by the networking layer */
724static void dm9000_timeout(struct net_device *dev) 724static void dm9000_timeout(struct net_device *dev)
725{ 725{
726 board_info_t *db = (board_info_t *) dev->priv; 726 board_info_t *db = netdev_priv(dev);
727 u8 reg_save; 727 u8 reg_save;
728 unsigned long flags; 728 unsigned long flags;
729 729
@@ -751,7 +751,7 @@ static int
751dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev) 751dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
752{ 752{
753 unsigned long flags; 753 unsigned long flags;
754 board_info_t *db = dev->priv; 754 board_info_t *db = netdev_priv(dev);
755 755
756 dm9000_dbg(db, 3, "%s:\n", __func__); 756 dm9000_dbg(db, 3, "%s:\n", __func__);
757 757
@@ -831,7 +831,7 @@ struct dm9000_rxhdr {
831static void 831static void
832dm9000_rx(struct net_device *dev) 832dm9000_rx(struct net_device *dev)
833{ 833{
834 board_info_t *db = (board_info_t *) dev->priv; 834 board_info_t *db = netdev_priv(dev);
835 struct dm9000_rxhdr rxhdr; 835 struct dm9000_rxhdr rxhdr;
836 struct sk_buff *skb; 836 struct sk_buff *skb;
837 u8 rxbyte, *rdptr; 837 u8 rxbyte, *rdptr;
@@ -928,7 +928,7 @@ dm9000_rx(struct net_device *dev)
928static irqreturn_t dm9000_interrupt(int irq, void *dev_id) 928static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
929{ 929{
930 struct net_device *dev = dev_id; 930 struct net_device *dev = dev_id;
931 board_info_t *db = dev->priv; 931 board_info_t *db = netdev_priv(dev);
932 int int_status; 932 int int_status;
933 u8 reg_save; 933 u8 reg_save;
934 934
@@ -996,7 +996,7 @@ static void dm9000_poll_controller(struct net_device *dev)
996static int 996static int
997dm9000_open(struct net_device *dev) 997dm9000_open(struct net_device *dev)
998{ 998{
999 board_info_t *db = dev->priv; 999 board_info_t *db = netdev_priv(dev);
1000 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK; 1000 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1001 1001
1002 if (netif_msg_ifup(db)) 1002 if (netif_msg_ifup(db))
@@ -1046,7 +1046,7 @@ static void dm9000_msleep(board_info_t *db, unsigned int ms)
1046static int 1046static int
1047dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) 1047dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
1048{ 1048{
1049 board_info_t *db = (board_info_t *) dev->priv; 1049 board_info_t *db = netdev_priv(dev);
1050 unsigned long flags; 1050 unsigned long flags;
1051 unsigned int reg_save; 1051 unsigned int reg_save;
1052 int ret; 1052 int ret;
@@ -1093,7 +1093,7 @@ static void
1093dm9000_phy_write(struct net_device *dev, 1093dm9000_phy_write(struct net_device *dev,
1094 int phyaddr_unused, int reg, int value) 1094 int phyaddr_unused, int reg, int value)
1095{ 1095{
1096 board_info_t *db = (board_info_t *) dev->priv; 1096 board_info_t *db = netdev_priv(dev);
1097 unsigned long flags; 1097 unsigned long flags;
1098 unsigned long reg_save; 1098 unsigned long reg_save;
1099 1099
@@ -1134,7 +1134,7 @@ dm9000_phy_write(struct net_device *dev,
1134static void 1134static void
1135dm9000_shutdown(struct net_device *dev) 1135dm9000_shutdown(struct net_device *dev)
1136{ 1136{
1137 board_info_t *db = dev->priv; 1137 board_info_t *db = netdev_priv(dev);
1138 1138
1139 /* RESET device */ 1139 /* RESET device */
1140 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ 1140 dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
@@ -1150,7 +1150,7 @@ dm9000_shutdown(struct net_device *dev)
1150static int 1150static int
1151dm9000_stop(struct net_device *ndev) 1151dm9000_stop(struct net_device *ndev)
1152{ 1152{
1153 board_info_t *db = ndev->priv; 1153 board_info_t *db = netdev_priv(ndev);
1154 1154
1155 if (netif_msg_ifdown(db)) 1155 if (netif_msg_ifdown(db))
1156 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 1156 dev_dbg(db->dev, "shutting down %s\n", ndev->name);
@@ -1197,7 +1197,7 @@ dm9000_probe(struct platform_device *pdev)
1197 dev_dbg(&pdev->dev, "dm9000_probe()\n"); 1197 dev_dbg(&pdev->dev, "dm9000_probe()\n");
1198 1198
1199 /* setup board info structure */ 1199 /* setup board info structure */
1200 db = ndev->priv; 1200 db = netdev_priv(ndev);
1201 memset(db, 0, sizeof(*db)); 1201 memset(db, 0, sizeof(*db));
1202 1202
1203 db->dev = &pdev->dev; 1203 db->dev = &pdev->dev;
@@ -1385,13 +1385,11 @@ dm9000_probe(struct platform_device *pdev)
1385 platform_set_drvdata(pdev, ndev); 1385 platform_set_drvdata(pdev, ndev);
1386 ret = register_netdev(ndev); 1386 ret = register_netdev(ndev);
1387 1387
1388 if (ret == 0) { 1388 if (ret == 0)
1389 DECLARE_MAC_BUF(mac); 1389 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
1390 printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %s (%s)\n",
1391 ndev->name, dm9000_type_to_char(db->type), 1390 ndev->name, dm9000_type_to_char(db->type),
1392 db->io_addr, db->io_data, ndev->irq, 1391 db->io_addr, db->io_data, ndev->irq,
1393 print_mac(mac, ndev->dev_addr), mac_src); 1392 ndev->dev_addr, mac_src);
1394 }
1395 return 0; 1393 return 0;
1396 1394
1397out: 1395out:
@@ -1410,7 +1408,7 @@ dm9000_drv_suspend(struct platform_device *dev, pm_message_t state)
1410 board_info_t *db; 1408 board_info_t *db;
1411 1409
1412 if (ndev) { 1410 if (ndev) {
1413 db = (board_info_t *) ndev->priv; 1411 db = netdev_priv(ndev);
1414 db->in_suspend = 1; 1412 db->in_suspend = 1;
1415 1413
1416 if (netif_running(ndev)) { 1414 if (netif_running(ndev)) {
@@ -1425,7 +1423,7 @@ static int
1425dm9000_drv_resume(struct platform_device *dev) 1423dm9000_drv_resume(struct platform_device *dev)
1426{ 1424{
1427 struct net_device *ndev = platform_get_drvdata(dev); 1425 struct net_device *ndev = platform_get_drvdata(dev);
1428 board_info_t *db = (board_info_t *) ndev->priv; 1426 board_info_t *db = netdev_priv(ndev);
1429 1427
1430 if (ndev) { 1428 if (ndev) {
1431 1429
@@ -1449,7 +1447,7 @@ dm9000_drv_remove(struct platform_device *pdev)
1449 platform_set_drvdata(pdev, NULL); 1447 platform_set_drvdata(pdev, NULL);
1450 1448
1451 unregister_netdev(ndev); 1449 unregister_netdev(ndev);
1452 dm9000_release_board(pdev, (board_info_t *) ndev->priv); 1450 dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev));
1453 free_netdev(ndev); /* free device structure */ 1451 free_netdev(ndev); /* free device structure */
1454 1452
1455 dev_dbg(&pdev->dev, "released and freed device\n"); 1453 dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 84e14f397d9a..8ebd7d789405 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -57,18 +57,23 @@ static void set_multicast_list(struct net_device *dev)
57{ 57{
58} 58}
59 59
60static const struct net_device_ops dummy_netdev_ops = {
61 .ndo_start_xmit = dummy_xmit,
62 .ndo_validate_addr = eth_validate_addr,
63 .ndo_set_multicast_list = set_multicast_list,
64 .ndo_set_mac_address = dummy_set_address,
65};
66
60static void dummy_setup(struct net_device *dev) 67static void dummy_setup(struct net_device *dev)
61{ 68{
69 ether_setup(dev);
70
62 /* Initialize the device structure. */ 71 /* Initialize the device structure. */
63 dev->hard_start_xmit = dummy_xmit; 72 dev->netdev_ops = &dummy_netdev_ops;
64 dev->set_multicast_list = set_multicast_list;
65 dev->set_mac_address = dummy_set_address;
66 dev->destructor = free_netdev; 73 dev->destructor = free_netdev;
67 74
68 /* Fill in device structure with ethernet-generic values. */ 75 /* Fill in device structure with ethernet-generic values. */
69 ether_setup(dev);
70 dev->tx_queue_len = 0; 76 dev->tx_queue_len = 0;
71 dev->change_mtu = NULL;
72 dev->flags |= IFF_NOARP; 77 dev->flags |= IFF_NOARP;
73 dev->flags &= ~IFF_MULTICAST; 78 dev->flags &= ~IFF_MULTICAST;
74 random_ether_addr(dev->dev_addr); 79 random_ether_addr(dev->dev_addr);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index e8bfcce6b319..9f38b16ccbbd 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1580,11 +1580,13 @@ static void e100_watchdog(unsigned long data)
1580 mii_ethtool_gset(&nic->mii, &cmd); 1580 mii_ethtool_gset(&nic->mii, &cmd);
1581 1581
1582 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { 1582 if(mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1583 DPRINTK(LINK, INFO, "link up, %sMbps, %s-duplex\n", 1583 printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n",
1584 cmd.speed == SPEED_100 ? "100" : "10", 1584 nic->netdev->name,
1585 cmd.duplex == DUPLEX_FULL ? "full" : "half"); 1585 cmd.speed == SPEED_100 ? "100" : "10",
1586 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1586 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { 1587 } else if(!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1587 DPRINTK(LINK, INFO, "link down\n"); 1588 printk(KERN_INFO "e100: %s NIC Link is Down\n",
1589 nic->netdev->name);
1588 } 1590 }
1589 1591
1590 mii_check_link(&nic->mii); 1592 mii_check_link(&nic->mii);
@@ -1880,7 +1882,6 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1880 } else { 1882 } else {
1881 dev->stats.rx_packets++; 1883 dev->stats.rx_packets++;
1882 dev->stats.rx_bytes += actual_size; 1884 dev->stats.rx_bytes += actual_size;
1883 nic->netdev->last_rx = jiffies;
1884 netif_receive_skb(skb); 1885 netif_receive_skb(skb);
1885 if(work_done) 1886 if(work_done)
1886 (*work_done)++; 1887 (*work_done)++;
@@ -2048,9 +2049,9 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
2048 if(stat_ack & stat_ack_rnr) 2049 if(stat_ack & stat_ack_rnr)
2049 nic->ru_running = RU_SUSPENDED; 2050 nic->ru_running = RU_SUSPENDED;
2050 2051
2051 if(likely(netif_rx_schedule_prep(netdev, &nic->napi))) { 2052 if(likely(netif_rx_schedule_prep(&nic->napi))) {
2052 e100_disable_irq(nic); 2053 e100_disable_irq(nic);
2053 __netif_rx_schedule(netdev, &nic->napi); 2054 __netif_rx_schedule(&nic->napi);
2054 } 2055 }
2055 2056
2056 return IRQ_HANDLED; 2057 return IRQ_HANDLED;
@@ -2059,7 +2060,6 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
2059static int e100_poll(struct napi_struct *napi, int budget) 2060static int e100_poll(struct napi_struct *napi, int budget)
2060{ 2061{
2061 struct nic *nic = container_of(napi, struct nic, napi); 2062 struct nic *nic = container_of(napi, struct nic, napi);
2062 struct net_device *netdev = nic->netdev;
2063 unsigned int work_done = 0; 2063 unsigned int work_done = 0;
2064 2064
2065 e100_rx_clean(nic, &work_done, budget); 2065 e100_rx_clean(nic, &work_done, budget);
@@ -2067,7 +2067,7 @@ static int e100_poll(struct napi_struct *napi, int budget)
2067 2067
2068 /* If budget not fully consumed, exit the polling mode */ 2068 /* If budget not fully consumed, exit the polling mode */
2069 if (work_done < budget) { 2069 if (work_done < budget) {
2070 netif_rx_complete(netdev, napi); 2070 netif_rx_complete(napi);
2071 e100_enable_irq(nic); 2071 e100_enable_irq(nic);
2072 } 2072 }
2073 2073
@@ -2322,7 +2322,8 @@ static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2322{ 2322{
2323 struct nic *nic = netdev_priv(netdev); 2323 struct nic *nic = netdev_priv(netdev);
2324 2324
2325 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) 2325 if ((wol->wolopts && wol->wolopts != WAKE_MAGIC) ||
2326 !device_can_wakeup(&nic->pdev->dev))
2326 return -EOPNOTSUPP; 2327 return -EOPNOTSUPP;
2327 2328
2328 if(wol->wolopts) 2329 if(wol->wolopts)
@@ -2330,6 +2331,8 @@ static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2330 else 2331 else
2331 nic->flags &= ~wol_magic; 2332 nic->flags &= ~wol_magic;
2332 2333
2334 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2335
2333 e100_exec_cb(nic, NULL, e100_configure); 2336 e100_exec_cb(nic, NULL, e100_configure);
2334 2337
2335 return 0; 2338 return 0;
@@ -2610,13 +2613,27 @@ static int e100_close(struct net_device *netdev)
2610 return 0; 2613 return 0;
2611} 2614}
2612 2615
2616static const struct net_device_ops e100_netdev_ops = {
2617 .ndo_open = e100_open,
2618 .ndo_stop = e100_close,
2619 .ndo_start_xmit = e100_xmit_frame,
2620 .ndo_validate_addr = eth_validate_addr,
2621 .ndo_set_multicast_list = e100_set_multicast_list,
2622 .ndo_set_mac_address = e100_set_mac_address,
2623 .ndo_change_mtu = e100_change_mtu,
2624 .ndo_do_ioctl = e100_do_ioctl,
2625 .ndo_tx_timeout = e100_tx_timeout,
2626#ifdef CONFIG_NET_POLL_CONTROLLER
2627 .ndo_poll_controller = e100_netpoll,
2628#endif
2629};
2630
2613static int __devinit e100_probe(struct pci_dev *pdev, 2631static int __devinit e100_probe(struct pci_dev *pdev,
2614 const struct pci_device_id *ent) 2632 const struct pci_device_id *ent)
2615{ 2633{
2616 struct net_device *netdev; 2634 struct net_device *netdev;
2617 struct nic *nic; 2635 struct nic *nic;
2618 int err; 2636 int err;
2619 DECLARE_MAC_BUF(mac);
2620 2637
2621 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) { 2638 if(!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2622 if(((1 << debug) - 1) & NETIF_MSG_PROBE) 2639 if(((1 << debug) - 1) & NETIF_MSG_PROBE)
@@ -2624,19 +2641,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2624 return -ENOMEM; 2641 return -ENOMEM;
2625 } 2642 }
2626 2643
2627 netdev->open = e100_open; 2644 netdev->netdev_ops = &e100_netdev_ops;
2628 netdev->stop = e100_close;
2629 netdev->hard_start_xmit = e100_xmit_frame;
2630 netdev->set_multicast_list = e100_set_multicast_list;
2631 netdev->set_mac_address = e100_set_mac_address;
2632 netdev->change_mtu = e100_change_mtu;
2633 netdev->do_ioctl = e100_do_ioctl;
2634 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops); 2645 SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
2635 netdev->tx_timeout = e100_tx_timeout;
2636 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD; 2646 netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
2637#ifdef CONFIG_NET_POLL_CONTROLLER
2638 netdev->poll_controller = e100_netpoll;
2639#endif
2640 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 2647 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2641 2648
2642 nic = netdev_priv(netdev); 2649 nic = netdev_priv(netdev);
@@ -2734,8 +2741,10 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2734 2741
2735 /* Wol magic packet can be enabled from eeprom */ 2742 /* Wol magic packet can be enabled from eeprom */
2736 if((nic->mac >= mac_82558_D101_A4) && 2743 if((nic->mac >= mac_82558_D101_A4) &&
2737 (nic->eeprom[eeprom_id] & eeprom_id_wol)) 2744 (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
2738 nic->flags |= wol_magic; 2745 nic->flags |= wol_magic;
2746 device_set_wakeup_enable(&pdev->dev, true);
2747 }
2739 2748
2740 /* ack any pending wake events, disable PME */ 2749 /* ack any pending wake events, disable PME */
2741 pci_pme_active(pdev, false); 2750 pci_pme_active(pdev, false);
@@ -2746,9 +2755,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2746 goto err_out_free; 2755 goto err_out_free;
2747 } 2756 }
2748 2757
2749 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %s\n", 2758 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
2750 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), 2759 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2751 pdev->irq, print_mac(mac, netdev->dev_addr)); 2760 pdev->irq, netdev->dev_addr);
2752 2761
2753 return 0; 2762 return 0;
2754 2763
@@ -2794,11 +2803,10 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
2794 pci_save_state(pdev); 2803 pci_save_state(pdev);
2795 2804
2796 if ((nic->flags & wol_magic) | e100_asf(nic)) { 2805 if ((nic->flags & wol_magic) | e100_asf(nic)) {
2797 pci_enable_wake(pdev, PCI_D3hot, 1); 2806 if (pci_enable_wake(pdev, PCI_D3cold, true))
2798 pci_enable_wake(pdev, PCI_D3cold, 1); 2807 pci_enable_wake(pdev, PCI_D3hot, true);
2799 } else { 2808 } else {
2800 pci_enable_wake(pdev, PCI_D3hot, 0); 2809 pci_enable_wake(pdev, PCI_D3hot, false);
2801 pci_enable_wake(pdev, PCI_D3cold, 0);
2802 } 2810 }
2803 2811
2804 pci_disable_device(pdev); 2812 pci_disable_device(pdev);
@@ -2843,7 +2851,7 @@ static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel
2843 struct nic *nic = netdev_priv(netdev); 2851 struct nic *nic = netdev_priv(netdev);
2844 2852
2845 /* Similar to calling e100_down(), but avoids adapter I/O. */ 2853 /* Similar to calling e100_down(), but avoids adapter I/O. */
2846 netdev->stop(netdev); 2854 e100_close(netdev);
2847 2855
2848 /* Detach; put netif into a state similar to hotplug unplug. */ 2856 /* Detach; put netif into a state similar to hotplug unplug. */
2849 napi_enable(&nic->napi); 2857 napi_enable(&nic->napi);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 62f62970f978..f5581de04757 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -284,7 +284,6 @@ struct e1000_adapter {
284 int cleaned_count); 284 int cleaned_count);
285 struct e1000_rx_ring *rx_ring; /* One per active queue */ 285 struct e1000_rx_ring *rx_ring; /* One per active queue */
286 struct napi_struct napi; 286 struct napi_struct napi;
287 struct net_device *polling_netdev; /* One per active queue */
288 287
289 int num_tx_queues; 288 int num_tx_queues;
290 int num_rx_queues; 289 int num_rx_queues;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 872799b746f5..26474c92193f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -888,6 +888,26 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
888 } 888 }
889} 889}
890 890
891static const struct net_device_ops e1000_netdev_ops = {
892 .ndo_open = e1000_open,
893 .ndo_stop = e1000_close,
894 .ndo_start_xmit = e1000_xmit_frame,
895 .ndo_get_stats = e1000_get_stats,
896 .ndo_set_rx_mode = e1000_set_rx_mode,
897 .ndo_set_mac_address = e1000_set_mac,
898 .ndo_tx_timeout = e1000_tx_timeout,
899 .ndo_change_mtu = e1000_change_mtu,
900 .ndo_do_ioctl = e1000_ioctl,
901 .ndo_validate_addr = eth_validate_addr,
902
903 .ndo_vlan_rx_register = e1000_vlan_rx_register,
904 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
905 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
906#ifdef CONFIG_NET_POLL_CONTROLLER
907 .ndo_poll_controller = e1000_netpoll,
908#endif
909};
910
891/** 911/**
892 * e1000_probe - Device Initialization Routine 912 * e1000_probe - Device Initialization Routine
893 * @pdev: PCI device information struct 913 * @pdev: PCI device information struct
@@ -912,7 +932,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
912 u16 eeprom_data = 0; 932 u16 eeprom_data = 0;
913 u16 eeprom_apme_mask = E1000_EEPROM_APME; 933 u16 eeprom_apme_mask = E1000_EEPROM_APME;
914 int bars, need_ioport; 934 int bars, need_ioport;
915 DECLARE_MAC_BUF(mac);
916 935
917 /* do not allocate ioport bars when not needed */ 936 /* do not allocate ioport bars when not needed */
918 need_ioport = e1000_is_need_ioport(pdev); 937 need_ioport = e1000_is_need_ioport(pdev);
@@ -967,8 +986,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
967 hw->back = adapter; 986 hw->back = adapter;
968 987
969 err = -EIO; 988 err = -EIO;
970 hw->hw_addr = ioremap(pci_resource_start(pdev, BAR_0), 989 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
971 pci_resource_len(pdev, BAR_0));
972 if (!hw->hw_addr) 990 if (!hw->hw_addr)
973 goto err_ioremap; 991 goto err_ioremap;
974 992
@@ -983,24 +1001,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
983 } 1001 }
984 } 1002 }
985 1003
986 netdev->open = &e1000_open; 1004 netdev->netdev_ops = &e1000_netdev_ops;
987 netdev->stop = &e1000_close;
988 netdev->hard_start_xmit = &e1000_xmit_frame;
989 netdev->get_stats = &e1000_get_stats;
990 netdev->set_rx_mode = &e1000_set_rx_mode;
991 netdev->set_mac_address = &e1000_set_mac;
992 netdev->change_mtu = &e1000_change_mtu;
993 netdev->do_ioctl = &e1000_ioctl;
994 e1000_set_ethtool_ops(netdev); 1005 e1000_set_ethtool_ops(netdev);
995 netdev->tx_timeout = &e1000_tx_timeout;
996 netdev->watchdog_timeo = 5 * HZ; 1006 netdev->watchdog_timeo = 5 * HZ;
997 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 1007 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
998 netdev->vlan_rx_register = e1000_vlan_rx_register; 1008
999 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
1000 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
1001#ifdef CONFIG_NET_POLL_CONTROLLER
1002 netdev->poll_controller = e1000_netpoll;
1003#endif
1004 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1009 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1005 1010
1006 adapter->bd_number = cards_found; 1011 adapter->bd_number = cards_found;
@@ -1016,9 +1021,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1016 * because it depends on mac_type */ 1021 * because it depends on mac_type */
1017 if ((hw->mac_type == e1000_ich8lan) && 1022 if ((hw->mac_type == e1000_ich8lan) &&
1018 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 1023 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
1019 hw->flash_address = 1024 hw->flash_address = pci_ioremap_bar(pdev, 1);
1020 ioremap(pci_resource_start(pdev, 1),
1021 pci_resource_len(pdev, 1));
1022 if (!hw->flash_address) 1025 if (!hw->flash_address)
1023 goto err_flashmap; 1026 goto err_flashmap;
1024 } 1027 }
@@ -1195,7 +1198,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1195 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" : 1198 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
1196 "32-bit")); 1199 "32-bit"));
1197 1200
1198 printk("%s\n", print_mac(mac, netdev->dev_addr)); 1201 printk("%pM\n", netdev->dev_addr);
1199 1202
1200 if (hw->bus_type == e1000_bus_type_pci_express) { 1203 if (hw->bus_type == e1000_bus_type_pci_express) {
1201 DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no " 1204 DPRINTK(PROBE, WARNING, "This device (id %04x:%04x) will no "
@@ -1239,12 +1242,8 @@ err_eeprom:
1239 if (hw->flash_address) 1242 if (hw->flash_address)
1240 iounmap(hw->flash_address); 1243 iounmap(hw->flash_address);
1241err_flashmap: 1244err_flashmap:
1242 for (i = 0; i < adapter->num_rx_queues; i++)
1243 dev_put(&adapter->polling_netdev[i]);
1244
1245 kfree(adapter->tx_ring); 1245 kfree(adapter->tx_ring);
1246 kfree(adapter->rx_ring); 1246 kfree(adapter->rx_ring);
1247 kfree(adapter->polling_netdev);
1248err_sw_init: 1247err_sw_init:
1249 iounmap(hw->hw_addr); 1248 iounmap(hw->hw_addr);
1250err_ioremap: 1249err_ioremap:
@@ -1272,7 +1271,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
1272 struct net_device *netdev = pci_get_drvdata(pdev); 1271 struct net_device *netdev = pci_get_drvdata(pdev);
1273 struct e1000_adapter *adapter = netdev_priv(netdev); 1272 struct e1000_adapter *adapter = netdev_priv(netdev);
1274 struct e1000_hw *hw = &adapter->hw; 1273 struct e1000_hw *hw = &adapter->hw;
1275 int i;
1276 1274
1277 cancel_work_sync(&adapter->reset_task); 1275 cancel_work_sync(&adapter->reset_task);
1278 1276
@@ -1282,9 +1280,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
1282 * would have already happened in close and is redundant. */ 1280 * would have already happened in close and is redundant. */
1283 e1000_release_hw_control(adapter); 1281 e1000_release_hw_control(adapter);
1284 1282
1285 for (i = 0; i < adapter->num_rx_queues; i++)
1286 dev_put(&adapter->polling_netdev[i]);
1287
1288 unregister_netdev(netdev); 1283 unregister_netdev(netdev);
1289 1284
1290 if (!e1000_check_phy_reset_block(hw)) 1285 if (!e1000_check_phy_reset_block(hw))
@@ -1292,7 +1287,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
1292 1287
1293 kfree(adapter->tx_ring); 1288 kfree(adapter->tx_ring);
1294 kfree(adapter->rx_ring); 1289 kfree(adapter->rx_ring);
1295 kfree(adapter->polling_netdev);
1296 1290
1297 iounmap(hw->hw_addr); 1291 iounmap(hw->hw_addr);
1298 if (hw->flash_address) 1292 if (hw->flash_address)
@@ -1318,7 +1312,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1318 struct e1000_hw *hw = &adapter->hw; 1312 struct e1000_hw *hw = &adapter->hw;
1319 struct net_device *netdev = adapter->netdev; 1313 struct net_device *netdev = adapter->netdev;
1320 struct pci_dev *pdev = adapter->pdev; 1314 struct pci_dev *pdev = adapter->pdev;
1321 int i;
1322 1315
1323 /* PCI config space info */ 1316 /* PCI config space info */
1324 1317
@@ -1375,11 +1368,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1375 return -ENOMEM; 1368 return -ENOMEM;
1376 } 1369 }
1377 1370
1378 for (i = 0; i < adapter->num_rx_queues; i++) {
1379 adapter->polling_netdev[i].priv = adapter;
1380 dev_hold(&adapter->polling_netdev[i]);
1381 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
1382 }
1383 spin_lock_init(&adapter->tx_queue_lock); 1371 spin_lock_init(&adapter->tx_queue_lock);
1384 1372
1385 /* Explicitly disable IRQ since the NIC can be in any state. */ 1373 /* Explicitly disable IRQ since the NIC can be in any state. */
@@ -1397,8 +1385,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1397 * @adapter: board private structure to initialize 1385 * @adapter: board private structure to initialize
1398 * 1386 *
1399 * We allocate one ring per queue at run-time since we don't know the 1387 * We allocate one ring per queue at run-time since we don't know the
1400 * number of queues at compile-time. The polling_netdev array is 1388 * number of queues at compile-time.
1401 * intended for Multiqueue, but should work fine with a single queue.
1402 **/ 1389 **/
1403 1390
1404static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 1391static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
@@ -1415,15 +1402,6 @@ static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1415 return -ENOMEM; 1402 return -ENOMEM;
1416 } 1403 }
1417 1404
1418 adapter->polling_netdev = kcalloc(adapter->num_rx_queues,
1419 sizeof(struct net_device),
1420 GFP_KERNEL);
1421 if (!adapter->polling_netdev) {
1422 kfree(adapter->tx_ring);
1423 kfree(adapter->rx_ring);
1424 return -ENOMEM;
1425 }
1426
1427 return E1000_SUCCESS; 1405 return E1000_SUCCESS;
1428} 1406}
1429 1407
@@ -2521,10 +2499,11 @@ static void e1000_watchdog(unsigned long data)
2521 &adapter->link_duplex); 2499 &adapter->link_duplex);
2522 2500
2523 ctrl = er32(CTRL); 2501 ctrl = er32(CTRL);
2524 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, " 2502 printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, "
2525 "Flow Control: %s\n", 2503 "Flow Control: %s\n",
2526 adapter->link_speed, 2504 netdev->name,
2527 adapter->link_duplex == FULL_DUPLEX ? 2505 adapter->link_speed,
2506 adapter->link_duplex == FULL_DUPLEX ?
2528 "Full Duplex" : "Half Duplex", 2507 "Full Duplex" : "Half Duplex",
2529 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2508 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2530 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2509 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
@@ -2600,7 +2579,8 @@ static void e1000_watchdog(unsigned long data)
2600 if (netif_carrier_ok(netdev)) { 2579 if (netif_carrier_ok(netdev)) {
2601 adapter->link_speed = 0; 2580 adapter->link_speed = 0;
2602 adapter->link_duplex = 0; 2581 adapter->link_duplex = 0;
2603 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 2582 printk(KERN_INFO "e1000: %s NIC Link is Down\n",
2583 netdev->name);
2604 netif_carrier_off(netdev); 2584 netif_carrier_off(netdev);
2605 netif_stop_queue(netdev); 2585 netif_stop_queue(netdev);
2606 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ)); 2586 mod_timer(&adapter->phy_info_timer, round_jiffies(jiffies + 2 * HZ));
@@ -3707,12 +3687,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
3707 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3687 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3708 } 3688 }
3709 3689
3710 if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { 3690 if (likely(netif_rx_schedule_prep(&adapter->napi))) {
3711 adapter->total_tx_bytes = 0; 3691 adapter->total_tx_bytes = 0;
3712 adapter->total_tx_packets = 0; 3692 adapter->total_tx_packets = 0;
3713 adapter->total_rx_bytes = 0; 3693 adapter->total_rx_bytes = 0;
3714 adapter->total_rx_packets = 0; 3694 adapter->total_rx_packets = 0;
3715 __netif_rx_schedule(netdev, &adapter->napi); 3695 __netif_rx_schedule(&adapter->napi);
3716 } else 3696 } else
3717 e1000_irq_enable(adapter); 3697 e1000_irq_enable(adapter);
3718 3698
@@ -3767,12 +3747,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
3767 ew32(IMC, ~0); 3747 ew32(IMC, ~0);
3768 E1000_WRITE_FLUSH(); 3748 E1000_WRITE_FLUSH();
3769 } 3749 }
3770 if (likely(netif_rx_schedule_prep(netdev, &adapter->napi))) { 3750 if (likely(netif_rx_schedule_prep(&adapter->napi))) {
3771 adapter->total_tx_bytes = 0; 3751 adapter->total_tx_bytes = 0;
3772 adapter->total_tx_packets = 0; 3752 adapter->total_tx_packets = 0;
3773 adapter->total_rx_bytes = 0; 3753 adapter->total_rx_bytes = 0;
3774 adapter->total_rx_packets = 0; 3754 adapter->total_rx_packets = 0;
3775 __netif_rx_schedule(netdev, &adapter->napi); 3755 __netif_rx_schedule(&adapter->napi);
3776 } else 3756 } else
3777 /* this really should not happen! if it does it is basically a 3757 /* this really should not happen! if it does it is basically a
3778 * bug, but not a hard error, so enable ints and continue */ 3758 * bug, but not a hard error, so enable ints and continue */
@@ -3791,8 +3771,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
3791 struct net_device *poll_dev = adapter->netdev; 3771 struct net_device *poll_dev = adapter->netdev;
3792 int tx_cleaned = 0, work_done = 0; 3772 int tx_cleaned = 0, work_done = 0;
3793 3773
3794 /* Must NOT use netdev_priv macro here. */ 3774 adapter = netdev_priv(poll_dev);
3795 adapter = poll_dev->priv;
3796 3775
3797 /* e1000_clean is called per-cpu. This lock protects 3776 /* e1000_clean is called per-cpu. This lock protects
3798 * tx_ring[0] from being cleaned by multiple cpus 3777 * tx_ring[0] from being cleaned by multiple cpus
@@ -3814,7 +3793,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
3814 if (work_done < budget) { 3793 if (work_done < budget) {
3815 if (likely(adapter->itr_setting & 3)) 3794 if (likely(adapter->itr_setting & 3))
3816 e1000_set_itr(adapter); 3795 e1000_set_itr(adapter);
3817 netif_rx_complete(poll_dev, napi); 3796 netif_rx_complete(napi);
3818 e1000_irq_enable(adapter); 3797 e1000_irq_enable(adapter);
3819 } 3798 }
3820 3799
@@ -4104,8 +4083,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4104 netif_receive_skb(skb); 4083 netif_receive_skb(skb);
4105 } 4084 }
4106 4085
4107 netdev->last_rx = jiffies;
4108
4109next_desc: 4086next_desc:
4110 rx_desc->status = 0; 4087 rx_desc->status = 0;
4111 4088
@@ -4789,7 +4766,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4789 pci_channel_state_t state) 4766 pci_channel_state_t state)
4790{ 4767{
4791 struct net_device *netdev = pci_get_drvdata(pdev); 4768 struct net_device *netdev = pci_get_drvdata(pdev);
4792 struct e1000_adapter *adapter = netdev->priv; 4769 struct e1000_adapter *adapter = netdev_priv(netdev);
4793 4770
4794 netif_device_detach(netdev); 4771 netif_device_detach(netdev);
4795 4772
@@ -4811,7 +4788,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4811static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 4788static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4812{ 4789{
4813 struct net_device *netdev = pci_get_drvdata(pdev); 4790 struct net_device *netdev = pci_get_drvdata(pdev);
4814 struct e1000_adapter *adapter = netdev->priv; 4791 struct e1000_adapter *adapter = netdev_priv(netdev);
4815 struct e1000_hw *hw = &adapter->hw; 4792 struct e1000_hw *hw = &adapter->hw;
4816 int err; 4793 int err;
4817 4794
@@ -4845,7 +4822,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4845static void e1000_io_resume(struct pci_dev *pdev) 4822static void e1000_io_resume(struct pci_dev *pdev)
4846{ 4823{
4847 struct net_device *netdev = pci_get_drvdata(pdev); 4824 struct net_device *netdev = pci_get_drvdata(pdev);
4848 struct e1000_adapter *adapter = netdev->priv; 4825 struct e1000_adapter *adapter = netdev_priv(netdev);
4849 struct e1000_hw *hw = &adapter->hw; 4826 struct e1000_hw *hw = &adapter->hw;
4850 4827
4851 e1000_init_manageability(adapter); 4828 e1000_init_manageability(adapter);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index b2c910c52df9..cf43ee743b3c 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -28,6 +28,7 @@
28 28
29/* 29/*
30 * 82571EB Gigabit Ethernet Controller 30 * 82571EB Gigabit Ethernet Controller
31 * 82571EB Gigabit Ethernet Controller (Copper)
31 * 82571EB Gigabit Ethernet Controller (Fiber) 32 * 82571EB Gigabit Ethernet Controller (Fiber)
32 * 82571EB Dual Port Gigabit Mezzanine Adapter 33 * 82571EB Dual Port Gigabit Mezzanine Adapter
33 * 82571EB Quad Port Gigabit Mezzanine Adapter 34 * 82571EB Quad Port Gigabit Mezzanine Adapter
@@ -331,8 +332,9 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
331 332
332 case e1000_82573: 333 case e1000_82573:
333 if (pdev->device == E1000_DEV_ID_82573L) { 334 if (pdev->device == E1000_DEV_ID_82573L) {
334 e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, 335 if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1,
335 &eeprom_data); 336 &eeprom_data) < 0)
337 break;
336 if (eeprom_data & NVM_WORD1A_ASPM_MASK) 338 if (eeprom_data & NVM_WORD1A_ASPM_MASK)
337 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; 339 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
338 } 340 }
@@ -973,6 +975,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
973 ew32(CTRL_EXT, reg); 975 ew32(CTRL_EXT, reg);
974 } 976 }
975 977
978 if (hw->mac.type == e1000_82571) {
979 reg = er32(PBA_ECC);
980 reg |= E1000_PBA_ECC_CORR_EN;
981 ew32(PBA_ECC, reg);
982 }
983
976 /* PCI-Ex Control Register */ 984 /* PCI-Ex Control Register */
977 if (hw->mac.type == e1000_82574) { 985 if (hw->mac.type == e1000_82574) {
978 reg = er32(GCR); 986 reg = er32(GCR);
@@ -1111,8 +1119,8 @@ static s32 e1000_setup_link_82571(struct e1000_hw *hw)
1111 * set it to full. 1119 * set it to full.
1112 */ 1120 */
1113 if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) && 1121 if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574) &&
1114 hw->fc.type == e1000_fc_default) 1122 hw->fc.requested_mode == e1000_fc_default)
1115 hw->fc.type = e1000_fc_full; 1123 hw->fc.requested_mode = e1000_fc_full;
1116 1124
1117 return e1000e_setup_link(hw); 1125 return e1000e_setup_link(hw);
1118} 1126}
@@ -1387,6 +1395,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
1387 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, 1395 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1388 .set_d3_lplu_state = e1000e_set_d3_lplu_state, 1396 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1389 .write_phy_reg = e1000e_write_phy_reg_igp, 1397 .write_phy_reg = e1000e_write_phy_reg_igp,
1398 .cfg_on_link_up = NULL,
1390}; 1399};
1391 1400
1392static struct e1000_phy_operations e82_phy_ops_m88 = { 1401static struct e1000_phy_operations e82_phy_ops_m88 = {
@@ -1403,6 +1412,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
1403 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, 1412 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1404 .set_d3_lplu_state = e1000e_set_d3_lplu_state, 1413 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1405 .write_phy_reg = e1000e_write_phy_reg_m88, 1414 .write_phy_reg = e1000e_write_phy_reg_m88,
1415 .cfg_on_link_up = NULL,
1406}; 1416};
1407 1417
1408static struct e1000_phy_operations e82_phy_ops_bm = { 1418static struct e1000_phy_operations e82_phy_ops_bm = {
@@ -1419,6 +1429,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
1419 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, 1429 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1420 .set_d3_lplu_state = e1000e_set_d3_lplu_state, 1430 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1421 .write_phy_reg = e1000e_write_phy_reg_bm2, 1431 .write_phy_reg = e1000e_write_phy_reg_bm2,
1432 .cfg_on_link_up = NULL,
1422}; 1433};
1423 1434
1424static struct e1000_nvm_operations e82571_nvm_ops = { 1435static struct e1000_nvm_operations e82571_nvm_ops = {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 48f79ecb82a0..e6caf29d4252 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -372,6 +372,13 @@
372#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ 372#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
373#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ 373#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */
374 374
375/* PBA ECC Register */
376#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
377#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */
378#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */
379#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
380#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */
381
375/* 382/*
376 * This defines the bits that are set in the Interrupt Mask 383 * This defines the bits that are set in the Interrupt Mask
377 * Set/Read Register. Each bit is documented below: 384 * Set/Read Register. Each bit is documented below:
@@ -565,6 +572,7 @@
565#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ 572#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */
566#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ 573#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */
567#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ 574#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
575#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
568 576
569#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ 577#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */
570#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ 578#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index c55fd6fdb91c..37bcb190eef8 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -193,6 +193,7 @@ struct e1000_adapter {
193 u16 mng_vlan_id; 193 u16 mng_vlan_id;
194 u16 link_speed; 194 u16 link_speed;
195 u16 link_duplex; 195 u16 link_duplex;
196 u16 eeprom_vers;
196 197
197 spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ 198 spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
198 199
@@ -388,6 +389,7 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
388extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); 389extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
389extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); 390extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
390extern void e1000e_update_stats(struct e1000_adapter *adapter); 391extern void e1000e_update_stats(struct e1000_adapter *adapter);
392extern bool e1000_has_link(struct e1000_adapter *adapter);
391extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 393extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
392extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 394extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
393 395
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index da9c09c248ed..8964838c686b 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -112,6 +112,11 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
112static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); 112static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
113static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); 113static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
114static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); 114static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
115static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
116static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
117 u16 *data);
118static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
119 u16 data);
115 120
116/** 121/**
117 * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. 122 * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
@@ -275,8 +280,6 @@ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
275 u16 mask; 280 u16 mask;
276 281
277 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; 282 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
278 mask |= E1000_SWFW_CSR_SM;
279
280 return e1000_acquire_swfw_sync_80003es2lan(hw, mask); 283 return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
281} 284}
282 285
@@ -292,7 +295,36 @@ static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
292 u16 mask; 295 u16 mask;
293 296
294 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; 297 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
295 mask |= E1000_SWFW_CSR_SM; 298 e1000_release_swfw_sync_80003es2lan(hw, mask);
299}
300
301/**
302 * e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register
303 * @hw: pointer to the HW structure
304 *
305 * Acquire the semaphore to access the Kumeran interface.
306 *
307 **/
308static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
309{
310 u16 mask;
311
312 mask = E1000_SWFW_CSR_SM;
313
314 return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
315}
316
317/**
318 * e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register
319 * @hw: pointer to the HW structure
320 *
321 * Release the semaphore used to access the Kumeran interface
322 **/
323static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
324{
325 u16 mask;
326
327 mask = E1000_SWFW_CSR_SM;
296 328
297 e1000_release_swfw_sync_80003es2lan(hw, mask); 329 e1000_release_swfw_sync_80003es2lan(hw, mask);
298} 330}
@@ -347,7 +379,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
347 u32 swmask = mask; 379 u32 swmask = mask;
348 u32 fwmask = mask << 16; 380 u32 fwmask = mask << 16;
349 s32 i = 0; 381 s32 i = 0;
350 s32 timeout = 200; 382 s32 timeout = 50;
351 383
352 while (i < timeout) { 384 while (i < timeout) {
353 if (e1000e_get_hw_semaphore(hw)) 385 if (e1000e_get_hw_semaphore(hw))
@@ -715,13 +747,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
715 ret_val = e1000e_get_speed_and_duplex_copper(hw, 747 ret_val = e1000e_get_speed_and_duplex_copper(hw,
716 speed, 748 speed,
717 duplex); 749 duplex);
718 if (ret_val) 750 hw->phy.ops.cfg_on_link_up(hw);
719 return ret_val;
720 if (*speed == SPEED_1000)
721 ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
722 else
723 ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw,
724 *duplex);
725 } else { 751 } else {
726 ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, 752 ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
727 speed, 753 speed,
@@ -763,8 +789,10 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
763 789
764 ctrl = er32(CTRL); 790 ctrl = er32(CTRL);
765 791
792 ret_val = e1000_acquire_phy_80003es2lan(hw);
766 hw_dbg(hw, "Issuing a global reset to MAC\n"); 793 hw_dbg(hw, "Issuing a global reset to MAC\n");
767 ew32(CTRL, ctrl | E1000_CTRL_RST); 794 ew32(CTRL, ctrl | E1000_CTRL_RST);
795 e1000_release_phy_80003es2lan(hw);
768 796
769 ret_val = e1000e_get_auto_rd_done(hw); 797 ret_val = e1000e_get_auto_rd_done(hw);
770 if (ret_val) 798 if (ret_val)
@@ -907,8 +935,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
907 struct e1000_phy_info *phy = &hw->phy; 935 struct e1000_phy_info *phy = &hw->phy;
908 s32 ret_val; 936 s32 ret_val;
909 u32 ctrl_ext; 937 u32 ctrl_ext;
910 u32 i = 0; 938 u16 data;
911 u16 data, data2;
912 939
913 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); 940 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
914 if (ret_val) 941 if (ret_val)
@@ -972,19 +999,20 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
972 } 999 }
973 1000
974 /* Bypass Rx and Tx FIFO's */ 1001 /* Bypass Rx and Tx FIFO's */
975 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, 1002 ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
1003 E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
976 E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | 1004 E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
977 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); 1005 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
978 if (ret_val) 1006 if (ret_val)
979 return ret_val; 1007 return ret_val;
980 1008
981 ret_val = e1000e_read_kmrn_reg(hw, 1009 ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
982 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, 1010 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
983 &data); 1011 &data);
984 if (ret_val) 1012 if (ret_val)
985 return ret_val; 1013 return ret_val;
986 data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; 1014 data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
987 ret_val = e1000e_write_kmrn_reg(hw, 1015 ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
988 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, 1016 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
989 data); 1017 data);
990 if (ret_val) 1018 if (ret_val)
@@ -1019,18 +1047,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
1019 if (ret_val) 1047 if (ret_val)
1020 return ret_val; 1048 return ret_val;
1021 1049
1022 do { 1050 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data);
1023 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 1051 if (ret_val)
1024 &data); 1052 return ret_val;
1025 if (ret_val)
1026 return ret_val;
1027
1028 ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL,
1029 &data2);
1030 if (ret_val)
1031 return ret_val;
1032 i++;
1033 } while ((data != data2) && (i < GG82563_MAX_KMRN_RETRY));
1034 1053
1035 data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; 1054 data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
1036 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); 1055 ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
@@ -1077,23 +1096,27 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
1077 * iteration and increase the max iterations when 1096 * iteration and increase the max iterations when
1078 * polling the phy; this fixes erroneous timeouts at 10Mbps. 1097 * polling the phy; this fixes erroneous timeouts at 10Mbps.
1079 */ 1098 */
1080 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); 1099 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
1100 0xFFFF);
1081 if (ret_val) 1101 if (ret_val)
1082 return ret_val; 1102 return ret_val;
1083 ret_val = e1000e_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data); 1103 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
1104 &reg_data);
1084 if (ret_val) 1105 if (ret_val)
1085 return ret_val; 1106 return ret_val;
1086 reg_data |= 0x3F; 1107 reg_data |= 0x3F;
1087 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data); 1108 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
1109 reg_data);
1088 if (ret_val) 1110 if (ret_val)
1089 return ret_val; 1111 return ret_val;
1090 ret_val = e1000e_read_kmrn_reg(hw, 1112 ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
1091 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, 1113 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1092 &reg_data); 1114 &reg_data);
1093 if (ret_val) 1115 if (ret_val)
1094 return ret_val; 1116 return ret_val;
1095 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; 1117 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
1096 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, 1118 ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
1119 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1097 reg_data); 1120 reg_data);
1098 if (ret_val) 1121 if (ret_val)
1099 return ret_val; 1122 return ret_val;
@@ -1108,6 +1131,35 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
1108} 1131}
1109 1132
1110/** 1133/**
1134 * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
1135 * @hw: pointer to the HW structure
1136 * @duplex: current duplex setting
1137 *
1138 * Configure the KMRN interface by applying last minute quirks for
1139 * 10/100 operation.
1140 **/
1141static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
1142{
1143 s32 ret_val = 0;
1144 u16 speed;
1145 u16 duplex;
1146
1147 if (hw->phy.media_type == e1000_media_type_copper) {
1148 ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
1149 &duplex);
1150 if (ret_val)
1151 return ret_val;
1152
1153 if (speed == SPEED_1000)
1154 ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
1155 else
1156 ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex);
1157 }
1158
1159 return ret_val;
1160}
1161
1162/**
1111 * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation 1163 * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
1112 * @hw: pointer to the HW structure 1164 * @hw: pointer to the HW structure
1113 * @duplex: current duplex setting 1165 * @duplex: current duplex setting
@@ -1123,8 +1175,9 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
1123 u16 reg_data, reg_data2; 1175 u16 reg_data, reg_data2;
1124 1176
1125 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; 1177 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
1126 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1178 ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
1127 reg_data); 1179 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1180 reg_data);
1128 if (ret_val) 1181 if (ret_val)
1129 return ret_val; 1182 return ret_val;
1130 1183
@@ -1170,8 +1223,9 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
1170 u32 i = 0; 1223 u32 i = 0;
1171 1224
1172 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; 1225 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
1173 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1226 ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
1174 reg_data); 1227 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1228 reg_data);
1175 if (ret_val) 1229 if (ret_val)
1176 return ret_val; 1230 return ret_val;
1177 1231
@@ -1199,6 +1253,71 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
1199} 1253}
1200 1254
1201/** 1255/**
1256 * e1000_read_kmrn_reg_80003es2lan - Read kumeran register
1257 * @hw: pointer to the HW structure
1258 * @offset: register offset to be read
1259 * @data: pointer to the read data
1260 *
1261 * Acquire semaphore, then read the PHY register at offset
1262 * using the kumeran interface. The information retrieved is stored in data.
1263 * Release the semaphore before exiting.
1264 **/
1265static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1266 u16 *data)
1267{
1268 u32 kmrnctrlsta;
1269 s32 ret_val = 0;
1270
1271 ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
1272 if (ret_val)
1273 return ret_val;
1274
1275 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
1276 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
1277 ew32(KMRNCTRLSTA, kmrnctrlsta);
1278
1279 udelay(2);
1280
1281 kmrnctrlsta = er32(KMRNCTRLSTA);
1282 *data = (u16)kmrnctrlsta;
1283
1284 e1000_release_mac_csr_80003es2lan(hw);
1285
1286 return ret_val;
1287}
1288
1289/**
1290 * e1000_write_kmrn_reg_80003es2lan - Write kumeran register
1291 * @hw: pointer to the HW structure
1292 * @offset: register offset to write to
1293 * @data: data to write at register offset
1294 *
1295 * Acquire semaphore, then write the data to PHY register
1296 * at the offset using the kumeran interface. Release semaphore
1297 * before exiting.
1298 **/
1299static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1300 u16 data)
1301{
1302 u32 kmrnctrlsta;
1303 s32 ret_val = 0;
1304
1305 ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
1306 if (ret_val)
1307 return ret_val;
1308
1309 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
1310 E1000_KMRNCTRLSTA_OFFSET) | data;
1311 ew32(KMRNCTRLSTA, kmrnctrlsta);
1312
1313 udelay(2);
1314
1315 e1000_release_mac_csr_80003es2lan(hw);
1316
1317 return ret_val;
1318}
1319
1320/**
1202 * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters 1321 * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
1203 * @hw: pointer to the HW structure 1322 * @hw: pointer to the HW structure
1204 * 1323 *
@@ -1276,6 +1395,7 @@ static struct e1000_phy_operations es2_phy_ops = {
1276 .set_d0_lplu_state = NULL, 1395 .set_d0_lplu_state = NULL,
1277 .set_d3_lplu_state = e1000e_set_d3_lplu_state, 1396 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1278 .write_phy_reg = e1000_write_phy_reg_gg82563_80003es2lan, 1397 .write_phy_reg = e1000_write_phy_reg_gg82563_80003es2lan,
1398 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
1279}; 1399};
1280 1400
1281static struct e1000_nvm_operations es2_nvm_ops = { 1401static struct e1000_nvm_operations es2_nvm_ops = {
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 62421ce96311..e48956d924b0 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -173,11 +173,8 @@ static int e1000_get_settings(struct net_device *netdev,
173static u32 e1000_get_link(struct net_device *netdev) 173static u32 e1000_get_link(struct net_device *netdev)
174{ 174{
175 struct e1000_adapter *adapter = netdev_priv(netdev); 175 struct e1000_adapter *adapter = netdev_priv(netdev);
176 struct e1000_hw *hw = &adapter->hw; 176
177 u32 status; 177 return e1000_has_link(adapter);
178
179 status = er32(STATUS);
180 return (status & E1000_STATUS_LU) ? 1 : 0;
181} 178}
182 179
183static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) 180static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
@@ -249,7 +246,7 @@ static int e1000_set_settings(struct net_device *netdev,
249 ADVERTISED_Autoneg; 246 ADVERTISED_Autoneg;
250 ecmd->advertising = hw->phy.autoneg_advertised; 247 ecmd->advertising = hw->phy.autoneg_advertised;
251 if (adapter->fc_autoneg) 248 if (adapter->fc_autoneg)
252 hw->fc.original_type = e1000_fc_default; 249 hw->fc.requested_mode = e1000_fc_default;
253 } else { 250 } else {
254 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { 251 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
255 clear_bit(__E1000_RESETTING, &adapter->state); 252 clear_bit(__E1000_RESETTING, &adapter->state);
@@ -279,11 +276,11 @@ static void e1000_get_pauseparam(struct net_device *netdev,
279 pause->autoneg = 276 pause->autoneg =
280 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 277 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
281 278
282 if (hw->fc.type == e1000_fc_rx_pause) { 279 if (hw->fc.current_mode == e1000_fc_rx_pause) {
283 pause->rx_pause = 1; 280 pause->rx_pause = 1;
284 } else if (hw->fc.type == e1000_fc_tx_pause) { 281 } else if (hw->fc.current_mode == e1000_fc_tx_pause) {
285 pause->tx_pause = 1; 282 pause->tx_pause = 1;
286 } else if (hw->fc.type == e1000_fc_full) { 283 } else if (hw->fc.current_mode == e1000_fc_full) {
287 pause->rx_pause = 1; 284 pause->rx_pause = 1;
288 pause->tx_pause = 1; 285 pause->tx_pause = 1;
289 } 286 }
@@ -301,19 +298,8 @@ static int e1000_set_pauseparam(struct net_device *netdev,
301 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 298 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
302 msleep(1); 299 msleep(1);
303 300
304 if (pause->rx_pause && pause->tx_pause)
305 hw->fc.type = e1000_fc_full;
306 else if (pause->rx_pause && !pause->tx_pause)
307 hw->fc.type = e1000_fc_rx_pause;
308 else if (!pause->rx_pause && pause->tx_pause)
309 hw->fc.type = e1000_fc_tx_pause;
310 else if (!pause->rx_pause && !pause->tx_pause)
311 hw->fc.type = e1000_fc_none;
312
313 hw->fc.original_type = hw->fc.type;
314
315 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 301 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
316 hw->fc.type = e1000_fc_default; 302 hw->fc.requested_mode = e1000_fc_default;
317 if (netif_running(adapter->netdev)) { 303 if (netif_running(adapter->netdev)) {
318 e1000e_down(adapter); 304 e1000e_down(adapter);
319 e1000e_up(adapter); 305 e1000e_up(adapter);
@@ -321,6 +307,17 @@ static int e1000_set_pauseparam(struct net_device *netdev,
321 e1000e_reset(adapter); 307 e1000e_reset(adapter);
322 } 308 }
323 } else { 309 } else {
310 if (pause->rx_pause && pause->tx_pause)
311 hw->fc.requested_mode = e1000_fc_full;
312 else if (pause->rx_pause && !pause->tx_pause)
313 hw->fc.requested_mode = e1000_fc_rx_pause;
314 else if (!pause->rx_pause && pause->tx_pause)
315 hw->fc.requested_mode = e1000_fc_tx_pause;
316 else if (!pause->rx_pause && !pause->tx_pause)
317 hw->fc.requested_mode = e1000_fc_none;
318
319 hw->fc.current_mode = hw->fc.requested_mode;
320
324 retval = ((hw->phy.media_type == e1000_media_type_fiber) ? 321 retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
325 hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); 322 hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw));
326 } 323 }
@@ -495,18 +492,19 @@ static int e1000_get_eeprom(struct net_device *netdev,
495 for (i = 0; i < last_word - first_word + 1; i++) { 492 for (i = 0; i < last_word - first_word + 1; i++) {
496 ret_val = e1000_read_nvm(hw, first_word + i, 1, 493 ret_val = e1000_read_nvm(hw, first_word + i, 1,
497 &eeprom_buff[i]); 494 &eeprom_buff[i]);
498 if (ret_val) { 495 if (ret_val)
499 /* a read error occurred, throw away the
500 * result */
501 memset(eeprom_buff, 0xff, sizeof(eeprom_buff));
502 break; 496 break;
503 }
504 } 497 }
505 } 498 }
506 499
507 /* Device's eeprom is always little-endian, word addressable */ 500 if (ret_val) {
508 for (i = 0; i < last_word - first_word + 1; i++) 501 /* a read error occurred, throw away the result */
509 le16_to_cpus(&eeprom_buff[i]); 502 memset(eeprom_buff, 0xff, sizeof(eeprom_buff));
503 } else {
504 /* Device's eeprom is always little-endian, word addressable */
505 for (i = 0; i < last_word - first_word + 1; i++)
506 le16_to_cpus(&eeprom_buff[i]);
507 }
510 508
511 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); 509 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
512 kfree(eeprom_buff); 510 kfree(eeprom_buff);
@@ -558,6 +556,9 @@ static int e1000_set_eeprom(struct net_device *netdev,
558 ret_val = e1000_read_nvm(hw, last_word, 1, 556 ret_val = e1000_read_nvm(hw, last_word, 1,
559 &eeprom_buff[last_word - first_word]); 557 &eeprom_buff[last_word - first_word]);
560 558
559 if (ret_val)
560 goto out;
561
561 /* Device's eeprom is always little-endian, word addressable */ 562 /* Device's eeprom is always little-endian, word addressable */
562 for (i = 0; i < last_word - first_word + 1; i++) 563 for (i = 0; i < last_word - first_word + 1; i++)
563 le16_to_cpus(&eeprom_buff[i]); 564 le16_to_cpus(&eeprom_buff[i]);
@@ -570,15 +571,18 @@ static int e1000_set_eeprom(struct net_device *netdev,
570 ret_val = e1000_write_nvm(hw, first_word, 571 ret_val = e1000_write_nvm(hw, first_word,
571 last_word - first_word + 1, eeprom_buff); 572 last_word - first_word + 1, eeprom_buff);
572 573
574 if (ret_val)
575 goto out;
576
573 /* 577 /*
574 * Update the checksum over the first part of the EEPROM if needed 578 * Update the checksum over the first part of the EEPROM if needed
575 * and flush shadow RAM for 82573 controllers 579 * and flush shadow RAM for applicable controllers
576 */ 580 */
577 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || 581 if ((first_word <= NVM_CHECKSUM_REG) ||
578 (hw->mac.type == e1000_82574) || 582 (hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82573))
579 (hw->mac.type == e1000_82573))) 583 ret_val = e1000e_update_nvm_checksum(hw);
580 e1000e_update_nvm_checksum(hw);
581 584
585out:
582 kfree(eeprom_buff); 586 kfree(eeprom_buff);
583 return ret_val; 587 return ret_val;
584} 588}
@@ -588,7 +592,6 @@ static void e1000_get_drvinfo(struct net_device *netdev,
588{ 592{
589 struct e1000_adapter *adapter = netdev_priv(netdev); 593 struct e1000_adapter *adapter = netdev_priv(netdev);
590 char firmware_version[32]; 594 char firmware_version[32];
591 u16 eeprom_data;
592 595
593 strncpy(drvinfo->driver, e1000e_driver_name, 32); 596 strncpy(drvinfo->driver, e1000e_driver_name, 32);
594 strncpy(drvinfo->version, e1000e_driver_version, 32); 597 strncpy(drvinfo->version, e1000e_driver_version, 32);
@@ -597,11 +600,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
597 * EEPROM image version # is reported as firmware version # for 600 * EEPROM image version # is reported as firmware version # for
598 * PCI-E controllers 601 * PCI-E controllers
599 */ 602 */
600 e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
601 sprintf(firmware_version, "%d.%d-%d", 603 sprintf(firmware_version, "%d.%d-%d",
602 (eeprom_data & 0xF000) >> 12, 604 (adapter->eeprom_vers & 0xF000) >> 12,
603 (eeprom_data & 0x0FF0) >> 4, 605 (adapter->eeprom_vers & 0x0FF0) >> 4,
604 eeprom_data & 0x000F); 606 (adapter->eeprom_vers & 0x000F));
605 607
606 strncpy(drvinfo->fw_version, firmware_version, 32); 608 strncpy(drvinfo->fw_version, firmware_version, 32);
607 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 609 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
@@ -865,7 +867,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
865 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 867 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
866 if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { 868 if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
867 *data = 1; 869 *data = 1;
868 break; 870 return *data;
869 } 871 }
870 checksum += temp; 872 checksum += temp;
871 } 873 }
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index f66ed37a7f76..f25e961c6b3b 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -87,6 +87,7 @@ enum e1e_registers {
87 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ 87 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
88 E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */ 88 E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
89 E1000_FLOP = 0x0103C, /* FLASH Opcode Register */ 89 E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
90 E1000_PBA_ECC = 0x01100, /* PBA ECC Register */
90 E1000_ERT = 0x02008, /* Early Rx Threshold - RW */ 91 E1000_ERT = 0x02008, /* Early Rx Threshold - RW */
91 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ 92 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
92 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ 93 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
@@ -436,7 +437,7 @@ enum e1000_rev_polarity{
436 e1000_rev_polarity_undefined = 0xFF 437 e1000_rev_polarity_undefined = 0xFF
437}; 438};
438 439
439enum e1000_fc_type { 440enum e1000_fc_mode {
440 e1000_fc_none = 0, 441 e1000_fc_none = 0,
441 e1000_fc_rx_pause, 442 e1000_fc_rx_pause,
442 e1000_fc_tx_pause, 443 e1000_fc_tx_pause,
@@ -738,6 +739,7 @@ struct e1000_phy_operations {
738 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); 739 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
739 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); 740 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
740 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); 741 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
742 s32 (*cfg_on_link_up)(struct e1000_hw *);
741}; 743};
742 744
743/* Function pointers for the NVM. */ 745/* Function pointers for the NVM. */
@@ -848,8 +850,8 @@ struct e1000_fc_info {
848 u16 pause_time; /* Flow control pause timer */ 850 u16 pause_time; /* Flow control pause timer */
849 bool send_xon; /* Flow control send XON */ 851 bool send_xon; /* Flow control send XON */
850 bool strict_ieee; /* Strict IEEE mode */ 852 bool strict_ieee; /* Strict IEEE mode */
851 enum e1000_fc_type type; /* Type of flow control */ 853 enum e1000_fc_mode current_mode; /* FC mode in effect */
852 enum e1000_fc_type original_type; 854 enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
853}; 855};
854 856
855struct e1000_dev_spec_82571 { 857struct e1000_dev_spec_82571 {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index d115a6d30f29..f2a5963b5a95 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -27,6 +27,7 @@
27*******************************************************************************/ 27*******************************************************************************/
28 28
29/* 29/*
30 * 82562G 10/100 Network Connection
30 * 82562G-2 10/100 Network Connection 31 * 82562G-2 10/100 Network Connection
31 * 82562GT 10/100 Network Connection 32 * 82562GT 10/100 Network Connection
32 * 82562GT-2 10/100 Network Connection 33 * 82562GT-2 10/100 Network Connection
@@ -40,6 +41,7 @@
40 * 82566MM Gigabit Network Connection 41 * 82566MM Gigabit Network Connection
41 * 82567LM Gigabit Network Connection 42 * 82567LM Gigabit Network Connection
42 * 82567LF Gigabit Network Connection 43 * 82567LF Gigabit Network Connection
44 * 82567V Gigabit Network Connection
43 * 82567LM-2 Gigabit Network Connection 45 * 82567LM-2 Gigabit Network Connection
44 * 82567LF-2 Gigabit Network Connection 46 * 82567LF-2 Gigabit Network Connection
45 * 82567V-2 Gigabit Network Connection 47 * 82567V-2 Gigabit Network Connection
@@ -92,6 +94,8 @@
92 94
93#define E1000_ICH_NVM_SIG_WORD 0x13 95#define E1000_ICH_NVM_SIG_WORD 0x13
94#define E1000_ICH_NVM_SIG_MASK 0xC000 96#define E1000_ICH_NVM_SIG_MASK 0xC000
97#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
98#define E1000_ICH_NVM_SIG_VALUE 0x80
95 99
96#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 100#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
97 101
@@ -956,45 +960,62 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
956 * @bank: pointer to the variable that returns the active bank 960 * @bank: pointer to the variable that returns the active bank
957 * 961 *
958 * Reads signature byte from the NVM using the flash access registers. 962 * Reads signature byte from the NVM using the flash access registers.
963 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
959 **/ 964 **/
960static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) 965static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
961{ 966{
967 u32 eecd;
962 struct e1000_nvm_info *nvm = &hw->nvm; 968 struct e1000_nvm_info *nvm = &hw->nvm;
963 /* flash bank size is in words */
964 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); 969 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
965 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; 970 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
966 u8 bank_high_byte = 0; 971 u8 sig_byte = 0;
972 s32 ret_val = 0;
967 973
968 if (hw->mac.type != e1000_ich10lan) { 974 switch (hw->mac.type) {
969 if (er32(EECD) & E1000_EECD_SEC1VAL) 975 case e1000_ich8lan:
970 *bank = 1; 976 case e1000_ich9lan:
971 else 977 eecd = er32(EECD);
972 *bank = 0; 978 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
973 } else { 979 E1000_EECD_SEC1VAL_VALID_MASK) {
974 /* 980 if (eecd & E1000_EECD_SEC1VAL)
975 * Make sure the signature for bank 0 is valid, 981 *bank = 1;
976 * if not check for bank1 982 else
977 */ 983 *bank = 0;
978 e1000_read_flash_byte_ich8lan(hw, act_offset, &bank_high_byte); 984
979 if ((bank_high_byte & 0xC0) == 0x80) { 985 return 0;
986 }
987 hw_dbg(hw, "Unable to determine valid NVM bank via EEC - "
988 "reading flash signature\n");
989 /* fall-thru */
990 default:
991 /* set bank to 0 in case flash read fails */
992 *bank = 0;
993
994 /* Check bank 0 */
995 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
996 &sig_byte);
997 if (ret_val)
998 return ret_val;
999 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
1000 E1000_ICH_NVM_SIG_VALUE) {
980 *bank = 0; 1001 *bank = 0;
981 } else { 1002 return 0;
982 /* 1003 }
983 * find if segment 1 is valid by verifying
984 * bit 15:14 = 10b in word 0x13
985 */
986 e1000_read_flash_byte_ich8lan(hw,
987 act_offset + bank1_offset,
988 &bank_high_byte);
989 1004
990 /* bank1 has a valid signature equivalent to SEC1V */ 1005 /* Check bank 1 */
991 if ((bank_high_byte & 0xC0) == 0x80) { 1006 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
992 *bank = 1; 1007 bank1_offset,
993 } else { 1008 &sig_byte);
994 hw_dbg(hw, "ERROR: EEPROM not present\n"); 1009 if (ret_val)
995 return -E1000_ERR_NVM; 1010 return ret_val;
996 } 1011 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
1012 E1000_ICH_NVM_SIG_VALUE) {
1013 *bank = 1;
1014 return 0;
997 } 1015 }
1016
1017 hw_dbg(hw, "ERROR: No valid NVM bank present\n");
1018 return -E1000_ERR_NVM;
998 } 1019 }
999 1020
1000 return 0; 1021 return 0;
@@ -1027,11 +1048,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1027 1048
1028 ret_val = e1000_acquire_swflag_ich8lan(hw); 1049 ret_val = e1000_acquire_swflag_ich8lan(hw);
1029 if (ret_val) 1050 if (ret_val)
1030 return ret_val; 1051 goto out;
1031 1052
1032 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1053 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1033 if (ret_val) 1054 if (ret_val)
1034 return ret_val; 1055 goto release;
1035 1056
1036 act_offset = (bank) ? nvm->flash_bank_size : 0; 1057 act_offset = (bank) ? nvm->flash_bank_size : 0;
1037 act_offset += offset; 1058 act_offset += offset;
@@ -1050,8 +1071,13 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1050 } 1071 }
1051 } 1072 }
1052 1073
1074release:
1053 e1000_release_swflag_ich8lan(hw); 1075 e1000_release_swflag_ich8lan(hw);
1054 1076
1077out:
1078 if (ret_val)
1079 hw_dbg(hw, "NVM read error: %d\n", ret_val);
1080
1055 return ret_val; 1081 return ret_val;
1056} 1082}
1057 1083
@@ -1340,14 +1366,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1340 1366
1341 ret_val = e1000e_update_nvm_checksum_generic(hw); 1367 ret_val = e1000e_update_nvm_checksum_generic(hw);
1342 if (ret_val) 1368 if (ret_val)
1343 return ret_val; 1369 goto out;
1344 1370
1345 if (nvm->type != e1000_nvm_flash_sw) 1371 if (nvm->type != e1000_nvm_flash_sw)
1346 return ret_val; 1372 goto out;
1347 1373
1348 ret_val = e1000_acquire_swflag_ich8lan(hw); 1374 ret_val = e1000_acquire_swflag_ich8lan(hw);
1349 if (ret_val) 1375 if (ret_val)
1350 return ret_val; 1376 goto out;
1351 1377
1352 /* 1378 /*
1353 * We're writing to the opposite bank so if we're on bank 1, 1379 * We're writing to the opposite bank so if we're on bank 1,
@@ -1355,17 +1381,27 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1355 * is going to be written 1381 * is going to be written
1356 */ 1382 */
1357 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1383 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1358 if (ret_val) 1384 if (ret_val) {
1359 return ret_val; 1385 e1000_release_swflag_ich8lan(hw);
1386 goto out;
1387 }
1360 1388
1361 if (bank == 0) { 1389 if (bank == 0) {
1362 new_bank_offset = nvm->flash_bank_size; 1390 new_bank_offset = nvm->flash_bank_size;
1363 old_bank_offset = 0; 1391 old_bank_offset = 0;
1364 e1000_erase_flash_bank_ich8lan(hw, 1); 1392 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
1393 if (ret_val) {
1394 e1000_release_swflag_ich8lan(hw);
1395 goto out;
1396 }
1365 } else { 1397 } else {
1366 old_bank_offset = nvm->flash_bank_size; 1398 old_bank_offset = nvm->flash_bank_size;
1367 new_bank_offset = 0; 1399 new_bank_offset = 0;
1368 e1000_erase_flash_bank_ich8lan(hw, 0); 1400 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
1401 if (ret_val) {
1402 e1000_release_swflag_ich8lan(hw);
1403 goto out;
1404 }
1369 } 1405 }
1370 1406
1371 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 1407 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
@@ -1377,9 +1413,11 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1377 if (dev_spec->shadow_ram[i].modified) { 1413 if (dev_spec->shadow_ram[i].modified) {
1378 data = dev_spec->shadow_ram[i].value; 1414 data = dev_spec->shadow_ram[i].value;
1379 } else { 1415 } else {
1380 e1000_read_flash_word_ich8lan(hw, 1416 ret_val = e1000_read_flash_word_ich8lan(hw, i +
1381 i + old_bank_offset, 1417 old_bank_offset,
1382 &data); 1418 &data);
1419 if (ret_val)
1420 break;
1383 } 1421 }
1384 1422
1385 /* 1423 /*
@@ -1420,7 +1458,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1420 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ 1458 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
1421 hw_dbg(hw, "Flash commit failed.\n"); 1459 hw_dbg(hw, "Flash commit failed.\n");
1422 e1000_release_swflag_ich8lan(hw); 1460 e1000_release_swflag_ich8lan(hw);
1423 return ret_val; 1461 goto out;
1424 } 1462 }
1425 1463
1426 /* 1464 /*
@@ -1430,14 +1468,18 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1430 * and we need to change bit 14 to 0b 1468 * and we need to change bit 14 to 0b
1431 */ 1469 */
1432 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 1470 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
1433 e1000_read_flash_word_ich8lan(hw, act_offset, &data); 1471 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
1472 if (ret_val) {
1473 e1000_release_swflag_ich8lan(hw);
1474 goto out;
1475 }
1434 data &= 0xBFFF; 1476 data &= 0xBFFF;
1435 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 1477 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
1436 act_offset * 2 + 1, 1478 act_offset * 2 + 1,
1437 (u8)(data >> 8)); 1479 (u8)(data >> 8));
1438 if (ret_val) { 1480 if (ret_val) {
1439 e1000_release_swflag_ich8lan(hw); 1481 e1000_release_swflag_ich8lan(hw);
1440 return ret_val; 1482 goto out;
1441 } 1483 }
1442 1484
1443 /* 1485 /*
@@ -1450,7 +1492,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1450 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 1492 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
1451 if (ret_val) { 1493 if (ret_val) {
1452 e1000_release_swflag_ich8lan(hw); 1494 e1000_release_swflag_ich8lan(hw);
1453 return ret_val; 1495 goto out;
1454 } 1496 }
1455 1497
1456 /* Great! Everything worked, we can now clear the cached entries. */ 1498 /* Great! Everything worked, we can now clear the cached entries. */
@@ -1468,6 +1510,10 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1468 e1000e_reload_nvm(hw); 1510 e1000e_reload_nvm(hw);
1469 msleep(10); 1511 msleep(10);
1470 1512
1513out:
1514 if (ret_val)
1515 hw_dbg(hw, "NVM update error: %d\n", ret_val);
1516
1471 return ret_val; 1517 return ret_val;
1472} 1518}
1473 1519
@@ -1894,7 +1940,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1894 } 1940 }
1895 ret_val = e1000_acquire_swflag_ich8lan(hw); 1941 ret_val = e1000_acquire_swflag_ich8lan(hw);
1896 /* Whether or not the swflag was acquired, we need to reset the part */ 1942 /* Whether or not the swflag was acquired, we need to reset the part */
1897 hw_dbg(hw, "Issuing a global reset to ich8lan"); 1943 hw_dbg(hw, "Issuing a global reset to ich8lan\n");
1898 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 1944 ew32(CTRL, (ctrl | E1000_CTRL_RST));
1899 msleep(20); 1945 msleep(20);
1900 1946
@@ -2074,12 +2120,17 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
2074 * the default flow control setting, so we explicitly 2120 * the default flow control setting, so we explicitly
2075 * set it to full. 2121 * set it to full.
2076 */ 2122 */
2077 if (hw->fc.type == e1000_fc_default) 2123 if (hw->fc.requested_mode == e1000_fc_default)
2078 hw->fc.type = e1000_fc_full; 2124 hw->fc.requested_mode = e1000_fc_full;
2079 2125
2080 hw->fc.original_type = hw->fc.type; 2126 /*
2127 * Save off the requested flow control mode for use later. Depending
2128 * on the link partner's capabilities, we may or may not use this mode.
2129 */
2130 hw->fc.current_mode = hw->fc.requested_mode;
2081 2131
2082 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type); 2132 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n",
2133 hw->fc.current_mode);
2083 2134
2084 /* Continue to configure the copper link. */ 2135 /* Continue to configure the copper link. */
2085 ret_val = e1000_setup_copper_link_ich8lan(hw); 2136 ret_val = e1000_setup_copper_link_ich8lan(hw);
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 089578f6855a..66741104ffd1 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -575,20 +575,42 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
575 */ 575 */
576 /* SYNCH bit and IV bit are sticky. */ 576 /* SYNCH bit and IV bit are sticky. */
577 udelay(10); 577 udelay(10);
578 if (E1000_RXCW_SYNCH & er32(RXCW)) { 578 rxcw = er32(RXCW);
579 if (rxcw & E1000_RXCW_SYNCH) {
579 if (!(rxcw & E1000_RXCW_IV)) { 580 if (!(rxcw & E1000_RXCW_IV)) {
580 mac->serdes_has_link = 1; 581 mac->serdes_has_link = true;
581 hw_dbg(hw, "SERDES: Link is up.\n"); 582 hw_dbg(hw, "SERDES: Link up - forced.\n");
582 } 583 }
583 } else { 584 } else {
584 mac->serdes_has_link = 0; 585 mac->serdes_has_link = false;
585 hw_dbg(hw, "SERDES: Link is down.\n"); 586 hw_dbg(hw, "SERDES: Link down - force failed.\n");
586 } 587 }
587 } 588 }
588 589
589 if (E1000_TXCW_ANE & er32(TXCW)) { 590 if (E1000_TXCW_ANE & er32(TXCW)) {
590 status = er32(STATUS); 591 status = er32(STATUS);
591 mac->serdes_has_link = (status & E1000_STATUS_LU); 592 if (status & E1000_STATUS_LU) {
593 /* SYNCH bit and IV bit are sticky, so reread rxcw. */
594 udelay(10);
595 rxcw = er32(RXCW);
596 if (rxcw & E1000_RXCW_SYNCH) {
597 if (!(rxcw & E1000_RXCW_IV)) {
598 mac->serdes_has_link = true;
599 hw_dbg(hw, "SERDES: Link up - autoneg "
600 "completed sucessfully.\n");
601 } else {
602 mac->serdes_has_link = false;
603 hw_dbg(hw, "SERDES: Link down - invalid"
604 "codewords detected in autoneg.\n");
605 }
606 } else {
607 mac->serdes_has_link = false;
608 hw_dbg(hw, "SERDES: Link down - no sync.\n");
609 }
610 } else {
611 mac->serdes_has_link = false;
612 hw_dbg(hw, "SERDES: Link down - autoneg failed\n");
613 }
592 } 614 }
593 615
594 return 0; 616 return 0;
@@ -623,12 +645,12 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
623 } 645 }
624 646
625 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) 647 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
626 hw->fc.type = e1000_fc_none; 648 hw->fc.requested_mode = e1000_fc_none;
627 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 649 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
628 NVM_WORD0F_ASM_DIR) 650 NVM_WORD0F_ASM_DIR)
629 hw->fc.type = e1000_fc_tx_pause; 651 hw->fc.requested_mode = e1000_fc_tx_pause;
630 else 652 else
631 hw->fc.type = e1000_fc_full; 653 hw->fc.requested_mode = e1000_fc_full;
632 654
633 return 0; 655 return 0;
634} 656}
@@ -656,23 +678,23 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
656 return 0; 678 return 0;
657 679
658 /* 680 /*
659 * If flow control is set to default, set flow control based on 681 * If requested flow control is set to default, set flow control
660 * the EEPROM flow control settings. 682 * based on the EEPROM flow control settings.
661 */ 683 */
662 if (hw->fc.type == e1000_fc_default) { 684 if (hw->fc.requested_mode == e1000_fc_default) {
663 ret_val = e1000_set_default_fc_generic(hw); 685 ret_val = e1000_set_default_fc_generic(hw);
664 if (ret_val) 686 if (ret_val)
665 return ret_val; 687 return ret_val;
666 } 688 }
667 689
668 /* 690 /*
669 * We want to save off the original Flow Control configuration just 691 * Save off the requested flow control mode for use later. Depending
670 * in case we get disconnected and then reconnected into a different 692 * on the link partner's capabilities, we may or may not use this mode.
671 * hub or switch with different Flow Control capabilities.
672 */ 693 */
673 hw->fc.original_type = hw->fc.type; 694 hw->fc.current_mode = hw->fc.requested_mode;
674 695
675 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type); 696 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n",
697 hw->fc.current_mode);
676 698
677 /* Call the necessary media_type subroutine to configure the link. */ 699 /* Call the necessary media_type subroutine to configure the link. */
678 ret_val = mac->ops.setup_physical_interface(hw); 700 ret_val = mac->ops.setup_physical_interface(hw);
@@ -724,7 +746,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
724 * do not support receiving pause frames). 746 * do not support receiving pause frames).
725 * 3: Both Rx and Tx flow control (symmetric) are enabled. 747 * 3: Both Rx and Tx flow control (symmetric) are enabled.
726 */ 748 */
727 switch (hw->fc.type) { 749 switch (hw->fc.current_mode) {
728 case e1000_fc_none: 750 case e1000_fc_none:
729 /* Flow control completely disabled by a software over-ride. */ 751 /* Flow control completely disabled by a software over-ride. */
730 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); 752 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
@@ -906,7 +928,7 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
906 * ability to transmit pause frames is not enabled, then these 928 * ability to transmit pause frames is not enabled, then these
907 * registers will be set to 0. 929 * registers will be set to 0.
908 */ 930 */
909 if (hw->fc.type & e1000_fc_tx_pause) { 931 if (hw->fc.current_mode & e1000_fc_tx_pause) {
910 /* 932 /*
911 * We need to set up the Receive Threshold high and low water 933 * We need to set up the Receive Threshold high and low water
912 * marks as well as (optionally) enabling the transmission of 934 * marks as well as (optionally) enabling the transmission of
@@ -945,7 +967,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
945 * receive flow control. 967 * receive flow control.
946 * 968 *
947 * The "Case" statement below enables/disable flow control 969 * The "Case" statement below enables/disable flow control
948 * according to the "hw->fc.type" parameter. 970 * according to the "hw->fc.current_mode" parameter.
949 * 971 *
950 * The possible values of the "fc" parameter are: 972 * The possible values of the "fc" parameter are:
951 * 0: Flow control is completely disabled 973 * 0: Flow control is completely disabled
@@ -956,9 +978,9 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
956 * 3: Both Rx and Tx flow control (symmetric) is enabled. 978 * 3: Both Rx and Tx flow control (symmetric) is enabled.
957 * other: No other values should be possible at this point. 979 * other: No other values should be possible at this point.
958 */ 980 */
959 hw_dbg(hw, "hw->fc.type = %u\n", hw->fc.type); 981 hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode);
960 982
961 switch (hw->fc.type) { 983 switch (hw->fc.current_mode) {
962 case e1000_fc_none: 984 case e1000_fc_none:
963 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); 985 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
964 break; 986 break;
@@ -1102,11 +1124,11 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1102 * ONLY. Hence, we must now check to see if we need to 1124 * ONLY. Hence, we must now check to see if we need to
1103 * turn OFF the TRANSMISSION of PAUSE frames. 1125 * turn OFF the TRANSMISSION of PAUSE frames.
1104 */ 1126 */
1105 if (hw->fc.original_type == e1000_fc_full) { 1127 if (hw->fc.requested_mode == e1000_fc_full) {
1106 hw->fc.type = e1000_fc_full; 1128 hw->fc.current_mode = e1000_fc_full;
1107 hw_dbg(hw, "Flow Control = FULL.\r\n"); 1129 hw_dbg(hw, "Flow Control = FULL.\r\n");
1108 } else { 1130 } else {
1109 hw->fc.type = e1000_fc_rx_pause; 1131 hw->fc.current_mode = e1000_fc_rx_pause;
1110 hw_dbg(hw, "Flow Control = " 1132 hw_dbg(hw, "Flow Control = "
1111 "RX PAUSE frames only.\r\n"); 1133 "RX PAUSE frames only.\r\n");
1112 } 1134 }
@@ -1124,7 +1146,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1124 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 1146 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1125 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1147 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1126 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1148 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1127 hw->fc.type = e1000_fc_tx_pause; 1149 hw->fc.current_mode = e1000_fc_tx_pause;
1128 hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n"); 1150 hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n");
1129 } 1151 }
1130 /* 1152 /*
@@ -1140,14 +1162,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1140 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 1162 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1141 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1163 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1142 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1164 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1143 hw->fc.type = e1000_fc_rx_pause; 1165 hw->fc.current_mode = e1000_fc_rx_pause;
1144 hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n"); 1166 hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n");
1145 } else { 1167 } else {
1146 /* 1168 /*
1147 * Per the IEEE spec, at this point flow control 1169 * Per the IEEE spec, at this point flow control
1148 * should be disabled. 1170 * should be disabled.
1149 */ 1171 */
1150 hw->fc.type = e1000_fc_none; 1172 hw->fc.current_mode = e1000_fc_none;
1151 hw_dbg(hw, "Flow Control = NONE.\r\n"); 1173 hw_dbg(hw, "Flow Control = NONE.\r\n");
1152 } 1174 }
1153 1175
@@ -1163,7 +1185,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1163 } 1185 }
1164 1186
1165 if (duplex == HALF_DUPLEX) 1187 if (duplex == HALF_DUPLEX)
1166 hw->fc.type = e1000_fc_none; 1188 hw->fc.current_mode = e1000_fc_none;
1167 1189
1168 /* 1190 /*
1169 * Now we call a subroutine to actually force the MAC 1191 * Now we call a subroutine to actually force the MAC
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 122539a0e1fe..d4639facd1bd 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -102,9 +102,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
102 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 102 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
103 le16_to_cpu(vlan)); 103 le16_to_cpu(vlan));
104 else 104 else
105 netif_receive_skb(skb); 105 napi_gro_receive(&adapter->napi, skb);
106
107 netdev->last_rx = jiffies;
108} 106}
109 107
110/** 108/**
@@ -1181,12 +1179,12 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
1181 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1179 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1182 } 1180 }
1183 1181
1184 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 1182 if (netif_rx_schedule_prep(&adapter->napi)) {
1185 adapter->total_tx_bytes = 0; 1183 adapter->total_tx_bytes = 0;
1186 adapter->total_tx_packets = 0; 1184 adapter->total_tx_packets = 0;
1187 adapter->total_rx_bytes = 0; 1185 adapter->total_rx_bytes = 0;
1188 adapter->total_rx_packets = 0; 1186 adapter->total_rx_packets = 0;
1189 __netif_rx_schedule(netdev, &adapter->napi); 1187 __netif_rx_schedule(&adapter->napi);
1190 } 1188 }
1191 1189
1192 return IRQ_HANDLED; 1190 return IRQ_HANDLED;
@@ -1248,12 +1246,12 @@ static irqreturn_t e1000_intr(int irq, void *data)
1248 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1246 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1249 } 1247 }
1250 1248
1251 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 1249 if (netif_rx_schedule_prep(&adapter->napi)) {
1252 adapter->total_tx_bytes = 0; 1250 adapter->total_tx_bytes = 0;
1253 adapter->total_tx_packets = 0; 1251 adapter->total_tx_packets = 0;
1254 adapter->total_rx_bytes = 0; 1252 adapter->total_rx_bytes = 0;
1255 adapter->total_rx_packets = 0; 1253 adapter->total_rx_packets = 0;
1256 __netif_rx_schedule(netdev, &adapter->napi); 1254 __netif_rx_schedule(&adapter->napi);
1257 } 1255 }
1258 1256
1259 return IRQ_HANDLED; 1257 return IRQ_HANDLED;
@@ -1322,10 +1320,10 @@ static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1322 adapter->rx_ring->set_itr = 0; 1320 adapter->rx_ring->set_itr = 0;
1323 } 1321 }
1324 1322
1325 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 1323 if (netif_rx_schedule_prep(&adapter->napi)) {
1326 adapter->total_rx_bytes = 0; 1324 adapter->total_rx_bytes = 0;
1327 adapter->total_rx_packets = 0; 1325 adapter->total_rx_packets = 0;
1328 __netif_rx_schedule(netdev, &adapter->napi); 1326 __netif_rx_schedule(&adapter->napi);
1329 } 1327 }
1330 return IRQ_HANDLED; 1328 return IRQ_HANDLED;
1331} 1329}
@@ -1480,7 +1478,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1480 int err = 0, vector = 0; 1478 int err = 0, vector = 0;
1481 1479
1482 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1480 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1483 sprintf(adapter->rx_ring->name, "%s-rx0", netdev->name); 1481 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1484 else 1482 else
1485 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1483 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1486 err = request_irq(adapter->msix_entries[vector].vector, 1484 err = request_irq(adapter->msix_entries[vector].vector,
@@ -1493,7 +1491,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1493 vector++; 1491 vector++;
1494 1492
1495 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1493 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1496 sprintf(adapter->tx_ring->name, "%s-tx0", netdev->name); 1494 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1497 else 1495 else
1498 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1496 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1499 err = request_irq(adapter->msix_entries[vector].vector, 1497 err = request_irq(adapter->msix_entries[vector].vector,
@@ -2003,8 +2001,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
2003 struct net_device *poll_dev = adapter->netdev; 2001 struct net_device *poll_dev = adapter->netdev;
2004 int tx_cleaned = 0, work_done = 0; 2002 int tx_cleaned = 0, work_done = 0;
2005 2003
2006 /* Must NOT use netdev_priv macro here. */ 2004 adapter = netdev_priv(poll_dev);
2007 adapter = poll_dev->priv;
2008 2005
2009 if (adapter->msix_entries && 2006 if (adapter->msix_entries &&
2010 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2007 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
@@ -2031,7 +2028,7 @@ clean_rx:
2031 if (work_done < budget) { 2028 if (work_done < budget) {
2032 if (adapter->itr_setting & 3) 2029 if (adapter->itr_setting & 3)
2033 e1000_set_itr(adapter); 2030 e1000_set_itr(adapter);
2034 netif_rx_complete(poll_dev, napi); 2031 netif_rx_complete(napi);
2035 if (adapter->msix_entries) 2032 if (adapter->msix_entries)
2036 ew32(IMS, adapter->rx_ring->ims_val); 2033 ew32(IMS, adapter->rx_ring->ims_val);
2037 else 2034 else
@@ -2787,7 +2784,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
2787 else 2784 else
2788 fc->pause_time = E1000_FC_PAUSE_TIME; 2785 fc->pause_time = E1000_FC_PAUSE_TIME;
2789 fc->send_xon = 1; 2786 fc->send_xon = 1;
2790 fc->type = fc->original_type; 2787 fc->current_mode = fc->requested_mode;
2791 2788
2792 /* Allow time for pending master requests to run */ 2789 /* Allow time for pending master requests to run */
2793 mac->ops.reset_hw(hw); 2790 mac->ops.reset_hw(hw);
@@ -3410,7 +3407,10 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
3410 struct e1000_hw *hw = &adapter->hw; 3407 struct e1000_hw *hw = &adapter->hw;
3411 u32 ctrl = er32(CTRL); 3408 u32 ctrl = er32(CTRL);
3412 3409
3413 e_info("Link is Up %d Mbps %s, Flow Control: %s\n", 3410 /* Link status message must follow this format for user tools */
3411 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
3412 "Flow Control: %s\n",
3413 adapter->netdev->name,
3414 adapter->link_speed, 3414 adapter->link_speed,
3415 (adapter->link_duplex == FULL_DUPLEX) ? 3415 (adapter->link_duplex == FULL_DUPLEX) ?
3416 "Full Duplex" : "Half Duplex", 3416 "Full Duplex" : "Half Duplex",
@@ -3420,7 +3420,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
3420 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); 3420 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
3421} 3421}
3422 3422
3423static bool e1000_has_link(struct e1000_adapter *adapter) 3423bool e1000_has_link(struct e1000_adapter *adapter)
3424{ 3424{
3425 struct e1000_hw *hw = &adapter->hw; 3425 struct e1000_hw *hw = &adapter->hw;
3426 bool link_active = 0; 3426 bool link_active = 0;
@@ -3495,6 +3495,7 @@ static void e1000_watchdog_task(struct work_struct *work)
3495 struct e1000_adapter, watchdog_task); 3495 struct e1000_adapter, watchdog_task);
3496 struct net_device *netdev = adapter->netdev; 3496 struct net_device *netdev = adapter->netdev;
3497 struct e1000_mac_info *mac = &adapter->hw.mac; 3497 struct e1000_mac_info *mac = &adapter->hw.mac;
3498 struct e1000_phy_info *phy = &adapter->hw.phy;
3498 struct e1000_ring *tx_ring = adapter->tx_ring; 3499 struct e1000_ring *tx_ring = adapter->tx_ring;
3499 struct e1000_hw *hw = &adapter->hw; 3500 struct e1000_hw *hw = &adapter->hw;
3500 u32 link, tctl; 3501 u32 link, tctl;
@@ -3601,6 +3602,13 @@ static void e1000_watchdog_task(struct work_struct *work)
3601 tctl |= E1000_TCTL_EN; 3602 tctl |= E1000_TCTL_EN;
3602 ew32(TCTL, tctl); 3603 ew32(TCTL, tctl);
3603 3604
3605 /*
3606 * Perform any post-link-up configuration before
3607 * reporting link up.
3608 */
3609 if (phy->ops.cfg_on_link_up)
3610 phy->ops.cfg_on_link_up(hw);
3611
3604 netif_carrier_on(netdev); 3612 netif_carrier_on(netdev);
3605 netif_tx_wake_all_queues(netdev); 3613 netif_tx_wake_all_queues(netdev);
3606 3614
@@ -3612,7 +3620,9 @@ static void e1000_watchdog_task(struct work_struct *work)
3612 if (netif_carrier_ok(netdev)) { 3620 if (netif_carrier_ok(netdev)) {
3613 adapter->link_speed = 0; 3621 adapter->link_speed = 0;
3614 adapter->link_duplex = 0; 3622 adapter->link_duplex = 0;
3615 e_info("Link is Down\n"); 3623 /* Link status message must follow this format */
3624 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
3625 adapter->netdev->name);
3616 netif_carrier_off(netdev); 3626 netif_carrier_off(netdev);
3617 netif_tx_stop_all_queues(netdev); 3627 netif_tx_stop_all_queues(netdev);
3618 if (!test_bit(__E1000_DOWN, &adapter->state)) 3628 if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -4464,7 +4474,27 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4464 4474
4465 pci_disable_device(pdev); 4475 pci_disable_device(pdev);
4466 4476
4467 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4477 /*
4478 * The pci-e switch on some quad port adapters will report a
4479 * correctable error when the MAC transitions from D0 to D3. To
4480 * prevent this we need to mask off the correctable errors on the
4481 * downstream port of the pci-e switch.
4482 */
4483 if (adapter->flags & FLAG_IS_QUAD_PORT) {
4484 struct pci_dev *us_dev = pdev->bus->self;
4485 int pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
4486 u16 devctl;
4487
4488 pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
4489 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
4490 (devctl & ~PCI_EXP_DEVCTL_CERE));
4491
4492 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4493
4494 pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
4495 } else {
4496 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4497 }
4468 4498
4469 return 0; 4499 return 0;
4470} 4500}
@@ -4669,14 +4699,12 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
4669 u32 pba_num; 4699 u32 pba_num;
4670 4700
4671 /* print bus type/speed/width info */ 4701 /* print bus type/speed/width info */
4672 e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n", 4702 e_info("(PCI Express:2.5GB/s:%s) %pM\n",
4673 /* bus width */ 4703 /* bus width */
4674 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 4704 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
4675 "Width x1"), 4705 "Width x1"),
4676 /* MAC address */ 4706 /* MAC address */
4677 netdev->dev_addr[0], netdev->dev_addr[1], 4707 netdev->dev_addr);
4678 netdev->dev_addr[2], netdev->dev_addr[3],
4679 netdev->dev_addr[4], netdev->dev_addr[5]);
4680 e_info("Intel(R) PRO/%s Network Connection\n", 4708 e_info("Intel(R) PRO/%s Network Connection\n",
4681 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); 4709 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
4682 e1000e_read_pba_num(hw, &pba_num); 4710 e1000e_read_pba_num(hw, &pba_num);
@@ -4694,20 +4722,40 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4694 return; 4722 return;
4695 4723
4696 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); 4724 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
4697 if (!(le16_to_cpu(buf) & (1 << 0))) { 4725 if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
4698 /* Deep Smart Power Down (DSPD) */ 4726 /* Deep Smart Power Down (DSPD) */
4699 dev_warn(&adapter->pdev->dev, 4727 dev_warn(&adapter->pdev->dev,
4700 "Warning: detected DSPD enabled in EEPROM\n"); 4728 "Warning: detected DSPD enabled in EEPROM\n");
4701 } 4729 }
4702 4730
4703 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf); 4731 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
4704 if (le16_to_cpu(buf) & (3 << 2)) { 4732 if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
4705 /* ASPM enable */ 4733 /* ASPM enable */
4706 dev_warn(&adapter->pdev->dev, 4734 dev_warn(&adapter->pdev->dev,
4707 "Warning: detected ASPM enabled in EEPROM\n"); 4735 "Warning: detected ASPM enabled in EEPROM\n");
4708 } 4736 }
4709} 4737}
4710 4738
4739static const struct net_device_ops e1000e_netdev_ops = {
4740 .ndo_open = e1000_open,
4741 .ndo_stop = e1000_close,
4742 .ndo_start_xmit = e1000_xmit_frame,
4743 .ndo_get_stats = e1000_get_stats,
4744 .ndo_set_multicast_list = e1000_set_multi,
4745 .ndo_set_mac_address = e1000_set_mac,
4746 .ndo_change_mtu = e1000_change_mtu,
4747 .ndo_do_ioctl = e1000_ioctl,
4748 .ndo_tx_timeout = e1000_tx_timeout,
4749 .ndo_validate_addr = eth_validate_addr,
4750
4751 .ndo_vlan_rx_register = e1000_vlan_rx_register,
4752 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
4753 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
4754#ifdef CONFIG_NET_POLL_CONTROLLER
4755 .ndo_poll_controller = e1000_netpoll,
4756#endif
4757};
4758
4711/** 4759/**
4712 * e1000_probe - Device Initialization Routine 4760 * e1000_probe - Device Initialization Routine
4713 * @pdev: PCI device information struct 4761 * @pdev: PCI device information struct
@@ -4766,7 +4814,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4766 goto err_pci_reg; 4814 goto err_pci_reg;
4767 4815
4768 pci_set_master(pdev); 4816 pci_set_master(pdev);
4769 pci_save_state(pdev); 4817 /* PCI config space info */
4818 err = pci_save_state(pdev);
4819 if (err)
4820 goto err_alloc_etherdev;
4770 4821
4771 err = -ENOMEM; 4822 err = -ENOMEM;
4772 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 4823 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
@@ -4806,24 +4857,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4806 } 4857 }
4807 4858
4808 /* construct the net_device struct */ 4859 /* construct the net_device struct */
4809 netdev->open = &e1000_open; 4860 netdev->netdev_ops = &e1000e_netdev_ops;
4810 netdev->stop = &e1000_close;
4811 netdev->hard_start_xmit = &e1000_xmit_frame;
4812 netdev->get_stats = &e1000_get_stats;
4813 netdev->set_multicast_list = &e1000_set_multi;
4814 netdev->set_mac_address = &e1000_set_mac;
4815 netdev->change_mtu = &e1000_change_mtu;
4816 netdev->do_ioctl = &e1000_ioctl;
4817 e1000e_set_ethtool_ops(netdev); 4861 e1000e_set_ethtool_ops(netdev);
4818 netdev->tx_timeout = &e1000_tx_timeout;
4819 netdev->watchdog_timeo = 5 * HZ; 4862 netdev->watchdog_timeo = 5 * HZ;
4820 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 4863 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
4821 netdev->vlan_rx_register = e1000_vlan_rx_register;
4822 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
4823 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
4824#ifdef CONFIG_NET_POLL_CONTROLLER
4825 netdev->poll_controller = e1000_netpoll;
4826#endif
4827 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 4864 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
4828 4865
4829 netdev->mem_start = mmio_start; 4866 netdev->mem_start = mmio_start;
@@ -4924,10 +4961,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4924 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); 4961 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
4925 4962
4926 if (!is_valid_ether_addr(netdev->perm_addr)) { 4963 if (!is_valid_ether_addr(netdev->perm_addr)) {
4927 e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n", 4964 e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
4928 netdev->perm_addr[0], netdev->perm_addr[1],
4929 netdev->perm_addr[2], netdev->perm_addr[3],
4930 netdev->perm_addr[4], netdev->perm_addr[5]);
4931 err = -EIO; 4965 err = -EIO;
4932 goto err_eeprom; 4966 goto err_eeprom;
4933 } 4967 }
@@ -4948,8 +4982,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4948 /* Initialize link parameters. User can change them with ethtool */ 4982 /* Initialize link parameters. User can change them with ethtool */
4949 adapter->hw.mac.autoneg = 1; 4983 adapter->hw.mac.autoneg = 1;
4950 adapter->fc_autoneg = 1; 4984 adapter->fc_autoneg = 1;
4951 adapter->hw.fc.original_type = e1000_fc_default; 4985 adapter->hw.fc.requested_mode = e1000_fc_default;
4952 adapter->hw.fc.type = e1000_fc_default; 4986 adapter->hw.fc.current_mode = e1000_fc_default;
4953 adapter->hw.phy.autoneg_advertised = 0x2f; 4987 adapter->hw.phy.autoneg_advertised = 0x2f;
4954 4988
4955 /* ring size defaults */ 4989 /* ring size defaults */
@@ -4990,6 +5024,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4990 adapter->wol = adapter->eeprom_wol; 5024 adapter->wol = adapter->eeprom_wol;
4991 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 5025 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
4992 5026
5027 /* save off EEPROM version number */
5028 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
5029
4993 /* reset the hardware with the new settings */ 5030 /* reset the hardware with the new settings */
4994 e1000e_reset(adapter); 5031 e1000e_reset(adapter);
4995 5032
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 6cd333ae61d0..dc4a9cba6a73 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -744,7 +744,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
744 * other: No software override. The flow control configuration 744 * other: No software override. The flow control configuration
745 * in the EEPROM is used. 745 * in the EEPROM is used.
746 */ 746 */
747 switch (hw->fc.type) { 747 switch (hw->fc.current_mode) {
748 case e1000_fc_none: 748 case e1000_fc_none:
749 /* 749 /*
750 * Flow control (Rx & Tx) is completely disabled by a 750 * Flow control (Rx & Tx) is completely disabled by a
@@ -1030,14 +1030,14 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1030 1030
1031 e1000e_phy_force_speed_duplex_setup(hw, &phy_data); 1031 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
1032 1032
1033 /* Reset the phy to commit changes. */
1034 phy_data |= MII_CR_RESET;
1035
1036 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data); 1033 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
1037 if (ret_val) 1034 if (ret_val)
1038 return ret_val; 1035 return ret_val;
1039 1036
1040 udelay(1); 1037 /* Reset the phy to commit changes. */
1038 ret_val = e1000e_commit_phy(hw);
1039 if (ret_val)
1040 return ret_val;
1041 1041
1042 if (phy->autoneg_wait_to_complete) { 1042 if (phy->autoneg_wait_to_complete) {
1043 hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n"); 1043 hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n");
@@ -1114,7 +1114,7 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
1114 u32 ctrl; 1114 u32 ctrl;
1115 1115
1116 /* Turn off flow control when forcing speed/duplex */ 1116 /* Turn off flow control when forcing speed/duplex */
1117 hw->fc.type = e1000_fc_none; 1117 hw->fc.current_mode = e1000_fc_none;
1118 1118
1119 /* Force speed/duplex on the mac */ 1119 /* Force speed/duplex on the mac */
1120 ctrl = er32(CTRL); 1120 ctrl = er32(CTRL);
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index 6390f51ea6fb..20eb05cddb83 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -107,7 +107,7 @@ static void e21_block_output(struct net_device *dev, int count,
107 const unsigned char *buf, int start_page); 107 const unsigned char *buf, int start_page);
108static void e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, 108static void e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
109 int ring_page); 109 int ring_page);
110 110static int e21_open(struct net_device *dev);
111static int e21_close(struct net_device *dev); 111static int e21_close(struct net_device *dev);
112 112
113 113
@@ -160,6 +160,21 @@ out:
160} 160}
161#endif 161#endif
162 162
163static const struct net_device_ops e21_netdev_ops = {
164 .ndo_open = e21_open,
165 .ndo_stop = e21_close,
166
167 .ndo_start_xmit = ei_start_xmit,
168 .ndo_tx_timeout = ei_tx_timeout,
169 .ndo_get_stats = ei_get_stats,
170 .ndo_set_multicast_list = ei_set_multicast_list,
171 .ndo_validate_addr = eth_validate_addr,
172 .ndo_change_mtu = eth_change_mtu,
173#ifdef CONFIG_NET_POLL_CONTROLLER
174 .ndo_poll_controller = ei_poll,
175#endif
176};
177
163static int __init e21_probe1(struct net_device *dev, int ioaddr) 178static int __init e21_probe1(struct net_device *dev, int ioaddr)
164{ 179{
165 int i, status, retval; 180 int i, status, retval;
@@ -265,11 +280,8 @@ static int __init e21_probe1(struct net_device *dev, int ioaddr)
265 ei_status.block_input = &e21_block_input; 280 ei_status.block_input = &e21_block_input;
266 ei_status.block_output = &e21_block_output; 281 ei_status.block_output = &e21_block_output;
267 ei_status.get_8390_hdr = &e21_get_8390_hdr; 282 ei_status.get_8390_hdr = &e21_get_8390_hdr;
268 dev->open = &e21_open; 283
269 dev->stop = &e21_close; 284 dev->netdev_ops = &e21_netdev_ops;
270#ifdef CONFIG_NET_POLL_CONTROLLER
271 dev->poll_controller = ei_poll;
272#endif
273 NS8390_init(dev, 0); 285 NS8390_init(dev, 0);
274 286
275 retval = register_netdev(dev); 287 retval = register_netdev(dev);
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 1f11350e16cf..e187c88ae145 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -605,7 +605,7 @@ out:
605 605
606static void __init printEEPROMInfo(struct net_device *dev) 606static void __init printEEPROMInfo(struct net_device *dev)
607{ 607{
608 struct eepro_local *lp = (struct eepro_local *)dev->priv; 608 struct eepro_local *lp = netdev_priv(dev);
609 int ioaddr = dev->base_addr; 609 int ioaddr = dev->base_addr;
610 unsigned short Word; 610 unsigned short Word;
611 int i,j; 611 int i,j;
@@ -690,7 +690,6 @@ static void __init eepro_print_info (struct net_device *dev)
690 struct eepro_local * lp = netdev_priv(dev); 690 struct eepro_local * lp = netdev_priv(dev);
691 int i; 691 int i;
692 const char * ifmap[] = {"AUI", "10Base2", "10BaseT"}; 692 const char * ifmap[] = {"AUI", "10Base2", "10BaseT"};
693 DECLARE_MAC_BUF(mac);
694 693
695 i = inb(dev->base_addr + ID_REG); 694 i = inb(dev->base_addr + ID_REG);
696 printk(KERN_DEBUG " id: %#x ",i); 695 printk(KERN_DEBUG " id: %#x ",i);
@@ -715,7 +714,7 @@ static void __init eepro_print_info (struct net_device *dev)
715 break; 714 break;
716 } 715 }
717 716
718 printk(" %s", print_mac(mac, dev->dev_addr)); 717 printk(" %pM", dev->dev_addr);
719 718
720 if (net_debug > 3) 719 if (net_debug > 3)
721 printk(KERN_DEBUG ", %dK RCV buffer", 720 printk(KERN_DEBUG ", %dK RCV buffer",
@@ -1396,7 +1395,7 @@ set_multicast_list(struct net_device *dev)
1396#define eeprom_delay() { udelay(40); } 1395#define eeprom_delay() { udelay(40); }
1397#define EE_READ_CMD (6 << 6) 1396#define EE_READ_CMD (6 << 6)
1398 1397
1399int 1398static int
1400read_eeprom(int ioaddr, int location, struct net_device *dev) 1399read_eeprom(int ioaddr, int location, struct net_device *dev)
1401{ 1400{
1402 int i; 1401 int i;
@@ -1581,7 +1580,6 @@ eepro_rx(struct net_device *dev)
1581 1580
1582 skb->protocol = eth_type_trans(skb,dev); 1581 skb->protocol = eth_type_trans(skb,dev);
1583 netif_rx(skb); 1582 netif_rx(skb);
1584 dev->last_rx = jiffies;
1585 dev->stats.rx_packets++; 1583 dev->stats.rx_packets++;
1586 } 1584 }
1587 1585
@@ -1676,7 +1674,7 @@ eepro_transmit_interrupt(struct net_device *dev)
1676static int eepro_ethtool_get_settings(struct net_device *dev, 1674static int eepro_ethtool_get_settings(struct net_device *dev,
1677 struct ethtool_cmd *cmd) 1675 struct ethtool_cmd *cmd)
1678{ 1676{
1679 struct eepro_local *lp = (struct eepro_local *)dev->priv; 1677 struct eepro_local *lp = netdev_priv(dev);
1680 1678
1681 cmd->supported = SUPPORTED_10baseT_Half | 1679 cmd->supported = SUPPORTED_10baseT_Half |
1682 SUPPORTED_10baseT_Full | 1680 SUPPORTED_10baseT_Full |
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
deleted file mode 100644
index e3e26c595fa3..000000000000
--- a/drivers/net/eepro100.c
+++ /dev/null
@@ -1,2401 +0,0 @@
1/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2/*
3 Written 1996-1999 by Donald Becker.
4
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
9
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
12
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
15
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
28*/
29
30static const char * const version =
31"eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33
34/* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
36
37static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41static int txdmacount = 128;
42static int rxdmacount /* = 0 */;
43
44#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47# define rx_align(skb) skb_reserve((skb), 2)
48# define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49#else
50# define rx_align(skb)
51# define RxFD_ALIGNMENT
52#endif
53
54/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56static int rx_copybreak = 200;
57
58/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59static int max_interrupt_work = 20;
60
61/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62static int multicast_filter_limit = 64;
63
64/* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
68
69/* A few values that may be tweaked. */
70/* The ring sizes should be a power of two for efficiency. */
71#define TX_RING_SIZE 64
72#define RX_RING_SIZE 64
73/* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75#define TX_MULTICAST_SIZE 2
76#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77/* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79#define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80/* Hysteresis marking queue as no longer full. */
81#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
82
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88#define PKT_BUF_SZ 1536
89
90#include <linux/module.h>
91
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/errno.h>
95#include <linux/ioport.h>
96#include <linux/slab.h>
97#include <linux/interrupt.h>
98#include <linux/timer.h>
99#include <linux/pci.h>
100#include <linux/spinlock.h>
101#include <linux/init.h>
102#include <linux/mii.h>
103#include <linux/delay.h>
104#include <linux/bitops.h>
105
106#include <asm/io.h>
107#include <asm/uaccess.h>
108#include <asm/irq.h>
109
110#include <linux/netdevice.h>
111#include <linux/etherdevice.h>
112#include <linux/rtnetlink.h>
113#include <linux/skbuff.h>
114#include <linux/ethtool.h>
115
116static int use_io;
117static int debug = -1;
118#define DEBUG_DEFAULT (NETIF_MSG_DRV | \
119 NETIF_MSG_HW | \
120 NETIF_MSG_RX_ERR | \
121 NETIF_MSG_TX_ERR)
122#define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
123
124
125MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
126MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
127MODULE_LICENSE("GPL");
128module_param(use_io, int, 0);
129module_param(debug, int, 0);
130module_param_array(options, int, NULL, 0);
131module_param_array(full_duplex, int, NULL, 0);
132module_param(congenb, int, 0);
133module_param(txfifo, int, 0);
134module_param(rxfifo, int, 0);
135module_param(txdmacount, int, 0);
136module_param(rxdmacount, int, 0);
137module_param(rx_copybreak, int, 0);
138module_param(max_interrupt_work, int, 0);
139module_param(multicast_filter_limit, int, 0);
140MODULE_PARM_DESC(debug, "debug level (0-6)");
141MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
142MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
143MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
144MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
145MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
146MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
147MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
148MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
149MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
150MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
151
152#define RUN_AT(x) (jiffies + (x))
153
154#define netdevice_start(dev)
155#define netdevice_stop(dev)
156#define netif_set_tx_timeout(dev, tf, tm) \
157 do { \
158 (dev)->tx_timeout = (tf); \
159 (dev)->watchdog_timeo = (tm); \
160 } while(0)
161
162
163
164/*
165 Theory of Operation
166
167I. Board Compatibility
168
169This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
170single-chip fast Ethernet controller for PCI, as used on the Intel
171EtherExpress Pro 100 adapter.
172
173II. Board-specific settings
174
175PCI bus devices are configured by the system at boot time, so no jumpers
176need to be set on the board. The system BIOS should be set to assign the
177PCI INTA signal to an otherwise unused system IRQ line. While it's
178possible to share PCI interrupt lines, it negatively impacts performance and
179only recent kernels support it.
180
181III. Driver operation
182
183IIIA. General
184The Speedo3 is very similar to other Intel network chips, that is to say
185"apparently designed on a different planet". This chips retains the complex
186Rx and Tx descriptors and multiple buffers pointers as previous chips, but
187also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
188Tx mode, but in a simplified lower-overhead manner: it associates only a
189single buffer descriptor with each frame descriptor.
190
191Despite the extra space overhead in each receive skbuff, the driver must use
192the simplified Rx buffer mode to assure that only a single data buffer is
193associated with each RxFD. The driver implements this by reserving space
194for the Rx descriptor at the head of each Rx skbuff.
195
196The Speedo-3 has receive and command unit base addresses that are added to
197almost all descriptor pointers. The driver sets these to zero, so that all
198pointer fields are absolute addresses.
199
200The System Control Block (SCB) of some previous Intel chips exists on the
201chip in both PCI I/O and memory space. This driver uses the I/O space
202registers, but might switch to memory mapped mode to better support non-x86
203processors.
204
205IIIB. Transmit structure
206
207The driver must use the complex Tx command+descriptor mode in order to
208have a indirect pointer to the skbuff data section. Each Tx command block
209(TxCB) is associated with two immediately appended Tx Buffer Descriptor
210(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
211speedo_private data structure for each adapter instance.
212
213The newer i82558 explicitly supports this structure, and can read the two
214TxBDs in the same PCI burst as the TxCB.
215
216This ring structure is used for all normal transmit packets, but the
217transmit packet descriptors aren't long enough for most non-Tx commands such
218as CmdConfigure. This is complicated by the possibility that the chip has
219already loaded the link address in the previous descriptor. So for these
220commands we convert the next free descriptor on the ring to a NoOp, and point
221that descriptor's link to the complex command.
222
223An additional complexity of these non-transmit commands are that they may be
224added asynchronous to the normal transmit queue, so we disable interrupts
225whenever the Tx descriptor ring is manipulated.
226
227A notable aspect of these special configure commands is that they do
228work with the normal Tx ring entry scavenge method. The Tx ring scavenge
229is done at interrupt time using the 'dirty_tx' index, and checking for the
230command-complete bit. While the setup frames may have the NoOp command on the
231Tx ring marked as complete, but not have completed the setup command, this
232is not a problem. The tx_ring entry can be still safely reused, as the
233tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
234
235Commands may have bits set e.g. CmdSuspend in the command word to either
236suspend or stop the transmit/command unit. This driver always flags the last
237command with CmdSuspend, erases the CmdSuspend in the previous command, and
238then issues a CU_RESUME.
239Note: Watch out for the potential race condition here: imagine
240 erasing the previous suspend
241 the chip processes the previous command
242 the chip processes the final command, and suspends
243 doing the CU_RESUME
244 the chip processes the next-yet-valid post-final-command.
245So blindly sending a CU_RESUME is only safe if we do it immediately after
246after erasing the previous CmdSuspend, without the possibility of an
247intervening delay. Thus the resume command is always within the
248interrupts-disabled region. This is a timing dependence, but handling this
249condition in a timing-independent way would considerably complicate the code.
250
251Note: In previous generation Intel chips, restarting the command unit was a
252notoriously slow process. This is presumably no longer true.
253
254IIIC. Receive structure
255
256Because of the bus-master support on the Speedo3 this driver uses the new
257SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
258This scheme allocates full-sized skbuffs as receive buffers. The value
259SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
260trade-off the memory wasted by passing the full-sized skbuff to the queue
261layer for all frames vs. the copying cost of copying a frame to a
262correctly-sized skbuff.
263
264For small frames the copying cost is negligible (esp. considering that we
265are pre-loading the cache with immediately useful header information), so we
266allocate a new, minimally-sized skbuff. For large frames the copying cost
267is non-trivial, and the larger copy might flush the cache of useful data, so
268we pass up the skbuff the packet was received into.
269
270IV. Notes
271
272Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
273that stated that I could disclose the information. But I still resent
274having to sign an Intel NDA when I'm helping Intel sell their own product!
275
276*/
277
278static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
279
280/* Offsets to the various registers.
281 All accesses need not be longword aligned. */
282enum speedo_offsets {
283 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
284 SCBIntmask = 3,
285 SCBPointer = 4, /* General purpose pointer. */
286 SCBPort = 8, /* Misc. commands and operands. */
287 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
288 SCBCtrlMDI = 16, /* MDI interface control. */
289 SCBEarlyRx = 20, /* Early receive byte count. */
290};
291/* Commands that can be put in a command list entry. */
292enum commands {
293 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
294 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
295 CmdDump = 0x60000, CmdDiagnose = 0x70000,
296 CmdSuspend = 0x40000000, /* Suspend after completion. */
297 CmdIntr = 0x20000000, /* Interrupt after completion. */
298 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
299};
300/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
301 status bits. Previous driver versions used separate 16 bit fields for
302 commands and statuses. --SAW
303 */
304#if defined(__alpha__)
305# define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
306#else
307# define clear_suspend(cmd) ((__le16 *)&(cmd)->cmd_status)[1] &= ~cpu_to_le16(1<<14)
308#endif
309
310enum SCBCmdBits {
311 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
312 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
313 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
314 /* The rest are Rx and Tx commands. */
315 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
316 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
317 CUDumpStats=0x0070, /* Dump then reset stats counters. */
318 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
319 RxResumeNoResources=0x0007,
320};
321
322enum SCBPort_cmds {
323 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
324};
325
326/* The Speedo3 Rx and Tx frame/buffer descriptors. */
327struct descriptor { /* A generic descriptor. */
328 volatile __le32 cmd_status; /* All command and status fields. */
329 __le32 link; /* struct descriptor * */
330 unsigned char params[0];
331};
332
333/* The Speedo3 Rx and Tx buffer descriptors. */
334struct RxFD { /* Receive frame descriptor. */
335 volatile __le32 status;
336 __le32 link; /* struct RxFD * */
337 __le32 rx_buf_addr; /* void * */
338 __le32 count;
339} RxFD_ALIGNMENT;
340
341/* Selected elements of the Tx/RxFD.status word. */
342enum RxFD_bits {
343 RxComplete=0x8000, RxOK=0x2000,
344 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
345 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
346 TxUnderrun=0x1000, StatusComplete=0x8000,
347};
348
349#define CONFIG_DATA_SIZE 22
350struct TxFD { /* Transmit frame descriptor set. */
351 __le32 status;
352 __le32 link; /* void * */
353 __le32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
354 __le32 count; /* # of TBD (=1), Tx start thresh., etc. */
355 /* This constitutes two "TBD" entries -- we only use one. */
356#define TX_DESCR_BUF_OFFSET 16
357 __le32 tx_buf_addr0; /* void *, frame to be transmitted. */
358 __le32 tx_buf_size0; /* Length of Tx frame. */
359 __le32 tx_buf_addr1; /* void *, frame to be transmitted. */
360 __le32 tx_buf_size1; /* Length of Tx frame. */
361 /* the structure must have space for at least CONFIG_DATA_SIZE starting
362 * from tx_desc_addr field */
363};
364
365/* Multicast filter setting block. --SAW */
366struct speedo_mc_block {
367 struct speedo_mc_block *next;
368 unsigned int tx;
369 dma_addr_t frame_dma;
370 unsigned int len;
371 struct descriptor frame __attribute__ ((__aligned__(16)));
372};
373
374/* Elements of the dump_statistics block. This block must be lword aligned. */
375struct speedo_stats {
376 __le32 tx_good_frames;
377 __le32 tx_coll16_errs;
378 __le32 tx_late_colls;
379 __le32 tx_underruns;
380 __le32 tx_lost_carrier;
381 __le32 tx_deferred;
382 __le32 tx_one_colls;
383 __le32 tx_multi_colls;
384 __le32 tx_total_colls;
385 __le32 rx_good_frames;
386 __le32 rx_crc_errs;
387 __le32 rx_align_errs;
388 __le32 rx_resource_errs;
389 __le32 rx_overrun_errs;
390 __le32 rx_colls_errs;
391 __le32 rx_runt_errs;
392 __le32 done_marker;
393};
394
395enum Rx_ring_state_bits {
396 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
397};
398
399/* Do not change the position (alignment) of the first few elements!
400 The later elements are grouped for cache locality.
401
402 Unfortunately, all the positions have been shifted since there.
403 A new re-alignment is required. 2000/03/06 SAW */
404struct speedo_private {
405 void __iomem *regs;
406 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
407 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
408 /* The addresses of a Tx/Rx-in-place packets/buffers. */
409 struct sk_buff *tx_skbuff[TX_RING_SIZE];
410 struct sk_buff *rx_skbuff[RX_RING_SIZE];
411 /* Mapped addresses of the rings. */
412 dma_addr_t tx_ring_dma;
413#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
414 dma_addr_t rx_ring_dma[RX_RING_SIZE];
415 struct descriptor *last_cmd; /* Last command sent. */
416 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
417 spinlock_t lock; /* Group with Tx control cache line. */
418 u32 tx_threshold; /* The value for txdesc.count. */
419 struct RxFD *last_rxf; /* Last filled RX buffer. */
420 dma_addr_t last_rxf_dma;
421 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
422 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
423 struct net_device_stats stats;
424 struct speedo_stats *lstats;
425 dma_addr_t lstats_dma;
426 int chip_id;
427 struct pci_dev *pdev;
428 struct timer_list timer; /* Media selection timer. */
429 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
430 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
431 long in_interrupt; /* Word-aligned dev->interrupt */
432 unsigned char acpi_pwr;
433 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
434 unsigned int tx_full:1; /* The Tx queue is full. */
435 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
436 unsigned int rx_bug:1; /* Work around receiver hang errata. */
437 unsigned char default_port:8; /* Last dev->if_port value. */
438 unsigned char rx_ring_state; /* RX ring status flags. */
439 unsigned short phy[2]; /* PHY media interfaces available. */
440 unsigned short partner; /* Link partner caps. */
441 struct mii_if_info mii_if; /* MII API hooks, info */
442 u32 msg_enable; /* debug message level */
443};
444
445/* The parameters for a CmdConfigure operation.
446 There are so many options that it would be difficult to document each bit.
447 We mostly use the default or recommended settings. */
448static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
449 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
450 0, 0x2E, 0, 0x60, 0,
451 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
452 0x3f, 0x05, };
453static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
454 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
455 0, 0x2E, 0, 0x60, 0x08, 0x88,
456 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
457 0x31, 0x05, };
458
459/* PHY media interface chips. */
460static const char * const phys[] = {
461 "None", "i82553-A/B", "i82553-C", "i82503",
462 "DP83840", "80c240", "80c24", "i82555",
463 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
464 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
465enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
466 S80C24, I82555, DP83840A=10, };
467static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
468#define EE_READ_CMD (6)
469
470static int eepro100_init_one(struct pci_dev *pdev,
471 const struct pci_device_id *ent);
472
473static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
474static int mdio_read(struct net_device *dev, int phy_id, int location);
475static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
476static int speedo_open(struct net_device *dev);
477static void speedo_resume(struct net_device *dev);
478static void speedo_timer(unsigned long data);
479static void speedo_init_rx_ring(struct net_device *dev);
480static void speedo_tx_timeout(struct net_device *dev);
481static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
482static void speedo_refill_rx_buffers(struct net_device *dev, int force);
483static int speedo_rx(struct net_device *dev);
484static void speedo_tx_buffer_gc(struct net_device *dev);
485static irqreturn_t speedo_interrupt(int irq, void *dev_instance);
486static int speedo_close(struct net_device *dev);
487static struct net_device_stats *speedo_get_stats(struct net_device *dev);
488static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
489static void set_rx_mode(struct net_device *dev);
490static void speedo_show_state(struct net_device *dev);
491static const struct ethtool_ops ethtool_ops;
492
493
494
495#ifdef honor_default_port
496/* Optional driver feature to allow forcing the transceiver setting.
497 Not recommended. */
498static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
499 0x2000, 0x2100, 0x0400, 0x3100};
500#endif
501
502/* How to wait for the command unit to accept a command.
503 Typically this takes 0 ticks. */
504static inline unsigned char wait_for_cmd_done(struct net_device *dev,
505 struct speedo_private *sp)
506{
507 int wait = 1000;
508 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
509 unsigned char r;
510
511 do {
512 udelay(1);
513 r = ioread8(cmd_ioaddr);
514 } while(r && --wait >= 0);
515
516 if (wait < 0)
517 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
518 return r;
519}
520
521static int __devinit eepro100_init_one (struct pci_dev *pdev,
522 const struct pci_device_id *ent)
523{
524 void __iomem *ioaddr;
525 int irq, pci_bar;
526 int acpi_idle_state = 0, pm;
527 static int cards_found /* = 0 */;
528 unsigned long pci_base;
529
530#ifndef MODULE
531 /* when built-in, we only print version if device is found */
532 static int did_version;
533 if (did_version++ == 0)
534 printk(version);
535#endif
536
537 /* save power state before pci_enable_device overwrites it */
538 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
539 if (pm) {
540 u16 pwr_command;
541 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
542 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
543 }
544
545 if (pci_enable_device(pdev))
546 goto err_out_free_mmio_region;
547
548 pci_set_master(pdev);
549
550 if (!request_region(pci_resource_start(pdev, 1),
551 pci_resource_len(pdev, 1), "eepro100")) {
552 dev_err(&pdev->dev, "eepro100: cannot reserve I/O ports\n");
553 goto err_out_none;
554 }
555 if (!request_mem_region(pci_resource_start(pdev, 0),
556 pci_resource_len(pdev, 0), "eepro100")) {
557 dev_err(&pdev->dev, "eepro100: cannot reserve MMIO region\n");
558 goto err_out_free_pio_region;
559 }
560
561 irq = pdev->irq;
562 pci_bar = use_io ? 1 : 0;
563 pci_base = pci_resource_start(pdev, pci_bar);
564 if (DEBUG & NETIF_MSG_PROBE)
565 printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
566 pci_base, irq);
567
568 ioaddr = pci_iomap(pdev, pci_bar, 0);
569 if (!ioaddr) {
570 dev_err(&pdev->dev, "eepro100: cannot remap IO\n");
571 goto err_out_free_mmio_region;
572 }
573
574 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
575 cards_found++;
576 else
577 goto err_out_iounmap;
578
579 return 0;
580
581err_out_iounmap: ;
582 pci_iounmap(pdev, ioaddr);
583err_out_free_mmio_region:
584 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
585err_out_free_pio_region:
586 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
587err_out_none:
588 return -ENODEV;
589}
590
591#ifdef CONFIG_NET_POLL_CONTROLLER
592/*
593 * Polling 'interrupt' - used by things like netconsole to send skbs
594 * without having to re-enable interrupts. It's not called while
595 * the interrupt routine is executing.
596 */
597
598static void poll_speedo (struct net_device *dev)
599{
600 /* disable_irq is not very nice, but with the funny lockless design
601 we have no other choice. */
602 disable_irq(dev->irq);
603 speedo_interrupt (dev->irq, dev);
604 enable_irq(dev->irq);
605}
606#endif
607
608static int __devinit speedo_found1(struct pci_dev *pdev,
609 void __iomem *ioaddr, int card_idx, int acpi_idle_state)
610{
611 struct net_device *dev;
612 struct speedo_private *sp;
613 const char *product;
614 int i, option;
615 u16 eeprom[0x100];
616 int size;
617 void *tx_ring_space;
618 dma_addr_t tx_ring_dma;
619 DECLARE_MAC_BUF(mac);
620
621 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
622 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
623 if (tx_ring_space == NULL)
624 return -1;
625
626 dev = alloc_etherdev(sizeof(struct speedo_private));
627 if (dev == NULL) {
628 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
629 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
630 return -1;
631 }
632
633 SET_NETDEV_DEV(dev, &pdev->dev);
634
635 if (dev->mem_start > 0)
636 option = dev->mem_start;
637 else if (card_idx >= 0 && options[card_idx] >= 0)
638 option = options[card_idx];
639 else
640 option = 0;
641
642 rtnl_lock();
643 if (dev_alloc_name(dev, dev->name) < 0)
644 goto err_free_unlock;
645
646 /* Read the station address EEPROM before doing the reset.
647 Nominally his should even be done before accepting the device, but
648 then we wouldn't have a device name with which to report the error.
649 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
650 */
651 {
652 void __iomem *iobase;
653 int read_cmd, ee_size;
654 u16 sum;
655 int j;
656
657 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
658 requirements. */
659 iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
660 if (!iobase)
661 goto err_free_unlock;
662 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
663 == 0xffe0000) {
664 ee_size = 0x100;
665 read_cmd = EE_READ_CMD << 24;
666 } else {
667 ee_size = 0x40;
668 read_cmd = EE_READ_CMD << 22;
669 }
670
671 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
672 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
673 eeprom[i] = value;
674 sum += value;
675 if (i < 3) {
676 dev->dev_addr[j++] = value;
677 dev->dev_addr[j++] = value >> 8;
678 }
679 }
680 if (sum != 0xBABA)
681 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
682 "check settings before activating this device!\n",
683 dev->name, sum);
684 /* Don't unregister_netdev(dev); as the EEPro may actually be
685 usable, especially if the MAC address is set later.
686 On the other hand, it may be unusable if MDI data is corrupted. */
687
688 pci_iounmap(pdev, iobase);
689 }
690
691 /* Reset the chip: stop Tx and Rx processes and clear counters.
692 This takes less than 10usec and will easily finish before the next
693 action. */
694 iowrite32(PortReset, ioaddr + SCBPort);
695 ioread32(ioaddr + SCBPort);
696 udelay(10);
697
698 if (eeprom[3] & 0x0100)
699 product = "OEM i82557/i82558 10/100 Ethernet";
700 else
701 product = pci_name(pdev);
702
703 printk(KERN_INFO "%s: %s, %s, IRQ %d.\n", dev->name, product,
704 print_mac(mac, dev->dev_addr), pdev->irq);
705
706 sp = netdev_priv(dev);
707
708 /* we must initialize this early, for mdio_{read,write} */
709 sp->regs = ioaddr;
710
711#if 1 || defined(kernel_bloat)
712 /* OK, this is pure kernel bloat. I don't like it when other drivers
713 waste non-pageable kernel space to emit similar messages, but I need
714 them for bug reports. */
715 {
716 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
717 /* The self-test results must be paragraph aligned. */
718 volatile s32 *self_test_results;
719 int boguscnt = 16000; /* Timeout for set-test. */
720 if ((eeprom[3] & 0x03) != 0x03)
721 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
722 " work-around.\n");
723 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
724 " connectors present:",
725 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
726 for (i = 0; i < 4; i++)
727 if (eeprom[5] & (1<<i))
728 printk(connectors[i]);
729 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
730 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
731 if (eeprom[7] & 0x0700)
732 printk(KERN_INFO " Secondary interface chip %s.\n",
733 phys[(eeprom[7]>>8)&7]);
734 if (((eeprom[6]>>8) & 0x3f) == DP83840
735 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
736 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
737 if (congenb)
738 mdi_reg23 |= 0x0100;
739 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
740 mdi_reg23);
741 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
742 }
743 if ((option >= 0) && (option & 0x70)) {
744 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
745 (option & 0x20 ? 100 : 10),
746 (option & 0x10 ? "full" : "half"));
747 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
748 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
749 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
750 }
751
752 /* Perform a system self-test. */
753 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
754 self_test_results[0] = 0;
755 self_test_results[1] = -1;
756 iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
757 do {
758 udelay(10);
759 } while (self_test_results[1] == -1 && --boguscnt >= 0);
760
761 if (boguscnt < 0) { /* Test optimized out. */
762 printk(KERN_ERR "Self test failed, status %8.8x:\n"
763 KERN_ERR " Failure to initialize the i82557.\n"
764 KERN_ERR " Verify that the card is a bus-master"
765 " capable slot.\n",
766 self_test_results[1]);
767 } else
768 printk(KERN_INFO " General self-test: %s.\n"
769 KERN_INFO " Serial sub-system self-test: %s.\n"
770 KERN_INFO " Internal registers self-test: %s.\n"
771 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
772 self_test_results[1] & 0x1000 ? "failed" : "passed",
773 self_test_results[1] & 0x0020 ? "failed" : "passed",
774 self_test_results[1] & 0x0008 ? "failed" : "passed",
775 self_test_results[1] & 0x0004 ? "failed" : "passed",
776 self_test_results[0]);
777 }
778#endif /* kernel_bloat */
779
780 iowrite32(PortReset, ioaddr + SCBPort);
781 ioread32(ioaddr + SCBPort);
782 udelay(10);
783
784 /* Return the chip to its original power state. */
785 pci_set_power_state(pdev, acpi_idle_state);
786
787 pci_set_drvdata (pdev, dev);
788 SET_NETDEV_DEV(dev, &pdev->dev);
789
790 dev->irq = pdev->irq;
791
792 sp->pdev = pdev;
793 sp->msg_enable = DEBUG;
794 sp->acpi_pwr = acpi_idle_state;
795 sp->tx_ring = tx_ring_space;
796 sp->tx_ring_dma = tx_ring_dma;
797 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
798 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
799 init_timer(&sp->timer); /* used in ioctl() */
800 spin_lock_init(&sp->lock);
801
802 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
803 if (card_idx >= 0) {
804 if (full_duplex[card_idx] >= 0)
805 sp->mii_if.full_duplex = full_duplex[card_idx];
806 }
807 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
808
809 sp->phy[0] = eeprom[6];
810 sp->phy[1] = eeprom[7];
811
812 sp->mii_if.phy_id = eeprom[6] & 0x1f;
813 sp->mii_if.phy_id_mask = 0x1f;
814 sp->mii_if.reg_num_mask = 0x1f;
815 sp->mii_if.dev = dev;
816 sp->mii_if.mdio_read = mdio_read;
817 sp->mii_if.mdio_write = mdio_write;
818
819 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
820 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
821 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
822 || (pdev->device == 0x245D)) {
823 sp->chip_id = 1;
824 }
825
826 if (sp->rx_bug)
827 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
828
829 /* The Speedo-specific entries in the device structure. */
830 dev->open = &speedo_open;
831 dev->hard_start_xmit = &speedo_start_xmit;
832 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
833 dev->stop = &speedo_close;
834 dev->get_stats = &speedo_get_stats;
835 dev->set_multicast_list = &set_rx_mode;
836 dev->do_ioctl = &speedo_ioctl;
837 SET_ETHTOOL_OPS(dev, &ethtool_ops);
838#ifdef CONFIG_NET_POLL_CONTROLLER
839 dev->poll_controller = &poll_speedo;
840#endif
841
842 if (register_netdevice(dev))
843 goto err_free_unlock;
844 rtnl_unlock();
845
846 return 0;
847
848 err_free_unlock:
849 rtnl_unlock();
850 free_netdev(dev);
851 return -1;
852}
853
854static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
855{
856 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
857 int wait = 0;
858 do
859 if (ioread8(cmd_ioaddr) == 0) break;
860 while(++wait <= 200);
861 if (wait > 100)
862 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
863 ioread8(cmd_ioaddr), wait);
864
865 iowrite8(cmd, cmd_ioaddr);
866
867 for (wait = 0; wait <= 100; wait++)
868 if (ioread8(cmd_ioaddr) == 0) return;
869 for (; wait <= 20000; wait++)
870 if (ioread8(cmd_ioaddr) == 0) return;
871 else udelay(1);
872 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
873 " Current status %8.8x.\n",
874 cmd, wait, ioread32(sp->regs + SCBStatus));
875}
876
877/* Serial EEPROM section.
878 A "bit" grungy, but we work our way through bit-by-bit :->. */
879/* EEPROM_Ctrl bits. */
880#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
881#define EE_CS 0x02 /* EEPROM chip select. */
882#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
883#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
884#define EE_ENB (0x4800 | EE_CS)
885#define EE_WRITE_0 0x4802
886#define EE_WRITE_1 0x4806
887#define EE_OFFSET SCBeeprom
888
889/* The fixes for the code were kindly provided by Dragan Stancevic
890 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
891 access timing.
892 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
893 interval for serial EEPROM. However, it looks like that there is an
894 additional requirement dictating larger udelay's in the code below.
895 2000/05/24 SAW */
896static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
897{
898 unsigned retval = 0;
899 void __iomem *ee_addr = ioaddr + SCBeeprom;
900
901 iowrite16(EE_ENB, ee_addr); udelay(2);
902 iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
903
904 /* Shift the command bits out. */
905 do {
906 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
907 iowrite16(dataval, ee_addr); udelay(2);
908 iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
909 retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
910 } while (--cmd_len >= 0);
911 iowrite16(EE_ENB, ee_addr); udelay(2);
912
913 /* Terminate the EEPROM access. */
914 iowrite16(EE_ENB & ~EE_CS, ee_addr);
915 return retval;
916}
917
918static int mdio_read(struct net_device *dev, int phy_id, int location)
919{
920 struct speedo_private *sp = netdev_priv(dev);
921 void __iomem *ioaddr = sp->regs;
922 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
923 iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
924 do {
925 val = ioread32(ioaddr + SCBCtrlMDI);
926 if (--boguscnt < 0) {
927 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
928 break;
929 }
930 } while (! (val & 0x10000000));
931 return val & 0xffff;
932}
933
934static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
935{
936 struct speedo_private *sp = netdev_priv(dev);
937 void __iomem *ioaddr = sp->regs;
938 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
939 iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
940 ioaddr + SCBCtrlMDI);
941 do {
942 val = ioread32(ioaddr + SCBCtrlMDI);
943 if (--boguscnt < 0) {
944 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
945 break;
946 }
947 } while (! (val & 0x10000000));
948}
949
950static int
951speedo_open(struct net_device *dev)
952{
953 struct speedo_private *sp = netdev_priv(dev);
954 void __iomem *ioaddr = sp->regs;
955 int retval;
956
957 if (netif_msg_ifup(sp))
958 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
959
960 pci_set_power_state(sp->pdev, PCI_D0);
961
962 /* Set up the Tx queue early.. */
963 sp->cur_tx = 0;
964 sp->dirty_tx = 0;
965 sp->last_cmd = NULL;
966 sp->tx_full = 0;
967 sp->in_interrupt = 0;
968
969 /* .. we can safely take handler calls during init. */
970 retval = request_irq(dev->irq, &speedo_interrupt, IRQF_SHARED, dev->name, dev);
971 if (retval) {
972 return retval;
973 }
974
975 dev->if_port = sp->default_port;
976
977#ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
978 /* Retrigger negotiation to reset previous errors. */
979 if ((sp->phy[0] & 0x8000) == 0) {
980 int phy_addr = sp->phy[0] & 0x1f ;
981 /* Use 0x3300 for restarting NWay, other values to force xcvr:
982 0x0000 10-HD
983 0x0100 10-FD
984 0x2000 100-HD
985 0x2100 100-FD
986 */
987#ifdef honor_default_port
988 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
989#else
990 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
991#endif
992 }
993#endif
994
995 speedo_init_rx_ring(dev);
996
997 /* Fire up the hardware. */
998 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
999 speedo_resume(dev);
1000
1001 netdevice_start(dev);
1002 netif_start_queue(dev);
1003
1004 /* Setup the chip and configure the multicast list. */
1005 sp->mc_setup_head = NULL;
1006 sp->mc_setup_tail = NULL;
1007 sp->flow_ctrl = sp->partner = 0;
1008 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1009 set_rx_mode(dev);
1010 if ((sp->phy[0] & 0x8000) == 0)
1011 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1012
1013 mii_check_link(&sp->mii_if);
1014
1015 if (netif_msg_ifup(sp)) {
1016 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1017 dev->name, ioread16(ioaddr + SCBStatus));
1018 }
1019
1020 /* Set the timer. The timer serves a dual purpose:
1021 1) to monitor the media interface (e.g. link beat) and perhaps switch
1022 to an alternate media type
1023 2) to monitor Rx activity, and restart the Rx process if the receiver
1024 hangs. */
1025 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1026 sp->timer.data = (unsigned long)dev;
1027 sp->timer.function = &speedo_timer; /* timer handler */
1028 add_timer(&sp->timer);
1029
1030 /* No need to wait for the command unit to accept here. */
1031 if ((sp->phy[0] & 0x8000) == 0)
1032 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1033
1034 return 0;
1035}
1036
1037/* Start the chip hardware after a full reset. */
1038static void speedo_resume(struct net_device *dev)
1039{
1040 struct speedo_private *sp = netdev_priv(dev);
1041 void __iomem *ioaddr = sp->regs;
1042
1043 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1044 sp->tx_threshold = 0x01208000;
1045
1046 /* Set the segment registers to '0'. */
1047 if (wait_for_cmd_done(dev, sp) != 0) {
1048 iowrite32(PortPartialReset, ioaddr + SCBPort);
1049 udelay(10);
1050 }
1051
1052 iowrite32(0, ioaddr + SCBPointer);
1053 ioread32(ioaddr + SCBPointer); /* Flush to PCI. */
1054 udelay(10); /* Bogus, but it avoids the bug. */
1055
1056 /* Note: these next two operations can take a while. */
1057 do_slow_command(dev, sp, RxAddrLoad);
1058 do_slow_command(dev, sp, CUCmdBase);
1059
1060 /* Load the statistics block and rx ring addresses. */
1061 iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
1062 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1063
1064 iowrite8(CUStatsAddr, ioaddr + SCBCmd);
1065 sp->lstats->done_marker = 0;
1066 wait_for_cmd_done(dev, sp);
1067
1068 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1069 if (netif_msg_rx_err(sp))
1070 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1071 dev->name);
1072 } else {
1073 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1074 ioaddr + SCBPointer);
1075 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1076 }
1077
1078 /* Note: RxStart should complete instantly. */
1079 do_slow_command(dev, sp, RxStart);
1080 do_slow_command(dev, sp, CUDumpStats);
1081
1082 /* Fill the first command with our physical address. */
1083 {
1084 struct descriptor *ias_cmd;
1085
1086 ias_cmd =
1087 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1088 /* Avoid a bug(?!) here by marking the command already completed. */
1089 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1090 ias_cmd->link =
1091 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1092 memcpy(ias_cmd->params, dev->dev_addr, 6);
1093 if (sp->last_cmd)
1094 clear_suspend(sp->last_cmd);
1095 sp->last_cmd = ias_cmd;
1096 }
1097
1098 /* Start the chip's Tx process and unmask interrupts. */
1099 iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1100 ioaddr + SCBPointer);
1101 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1102 remain masked --Dragan */
1103 iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1104}
1105
1106/*
1107 * Sometimes the receiver stops making progress. This routine knows how to
1108 * get it going again, without losing packets or being otherwise nasty like
1109 * a chip reset would be. Previously the driver had a whole sequence
1110 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1111 * do another, etc. But those things don't really matter. Separate logic
1112 * in the ISR provides for allocating buffers--the other half of operation
1113 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1114 * This problem with the old, more involved algorithm is shown up under
1115 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1116 */
1117static void
1118speedo_rx_soft_reset(struct net_device *dev)
1119{
1120 struct speedo_private *sp = netdev_priv(dev);
1121 struct RxFD *rfd;
1122 void __iomem *ioaddr;
1123
1124 ioaddr = sp->regs;
1125 if (wait_for_cmd_done(dev, sp) != 0) {
1126 printk("%s: previous command stalled\n", dev->name);
1127 return;
1128 }
1129 /*
1130 * Put the hardware into a known state.
1131 */
1132 iowrite8(RxAbort, ioaddr + SCBCmd);
1133
1134 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1135
1136 rfd->rx_buf_addr = cpu_to_le32(0xffffffff);
1137
1138 if (wait_for_cmd_done(dev, sp) != 0) {
1139 printk("%s: RxAbort command stalled\n", dev->name);
1140 return;
1141 }
1142 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1143 ioaddr + SCBPointer);
1144 iowrite8(RxStart, ioaddr + SCBCmd);
1145}
1146
1147
1148/* Media monitoring and control. */
1149static void speedo_timer(unsigned long data)
1150{
1151 struct net_device *dev = (struct net_device *)data;
1152 struct speedo_private *sp = netdev_priv(dev);
1153 void __iomem *ioaddr = sp->regs;
1154 int phy_num = sp->phy[0] & 0x1f;
1155
1156 /* We have MII and lost link beat. */
1157 if ((sp->phy[0] & 0x8000) == 0) {
1158 int partner = mdio_read(dev, phy_num, MII_LPA);
1159 if (partner != sp->partner) {
1160 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1161 if (netif_msg_link(sp)) {
1162 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1163 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1164 dev->name, sp->partner, partner, sp->mii_if.advertising);
1165 }
1166 sp->partner = partner;
1167 if (flow_ctrl != sp->flow_ctrl) {
1168 sp->flow_ctrl = flow_ctrl;
1169 sp->rx_mode = -1; /* Trigger a reload. */
1170 }
1171 }
1172 }
1173 mii_check_link(&sp->mii_if);
1174 if (netif_msg_timer(sp)) {
1175 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1176 dev->name, ioread16(ioaddr + SCBStatus));
1177 }
1178 if (sp->rx_mode < 0 ||
1179 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1180 /* We haven't received a packet in a Long Time. We might have been
1181 bitten by the receiver hang bug. This can be cleared by sending
1182 a set multicast list command. */
1183 if (netif_msg_timer(sp))
1184 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1185 " from a timer routine,"
1186 " m=%d, j=%ld, l=%ld.\n",
1187 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1188 set_rx_mode(dev);
1189 }
1190 /* We must continue to monitor the media. */
1191 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1192 add_timer(&sp->timer);
1193}
1194
1195static void speedo_show_state(struct net_device *dev)
1196{
1197 struct speedo_private *sp = netdev_priv(dev);
1198 int i;
1199
1200 if (netif_msg_pktdata(sp)) {
1201 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1202 dev->name, sp->cur_tx, sp->dirty_tx);
1203 for (i = 0; i < TX_RING_SIZE; i++)
1204 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1205 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1206 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1207 i, sp->tx_ring[i].status);
1208
1209 printk(KERN_DEBUG "%s: Printing Rx ring"
1210 " (next to receive into %u, dirty index %u).\n",
1211 dev->name, sp->cur_rx, sp->dirty_rx);
1212 for (i = 0; i < RX_RING_SIZE; i++)
1213 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1214 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1215 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1216 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1217 i, (sp->rx_ringp[i] != NULL) ?
1218 (unsigned)sp->rx_ringp[i]->status : 0);
1219 }
1220
1221#if 0
1222 {
1223 void __iomem *ioaddr = sp->regs;
1224 int phy_num = sp->phy[0] & 0x1f;
1225 for (i = 0; i < 16; i++) {
1226 /* FIXME: what does it mean? --SAW */
1227 if (i == 6) i = 21;
1228 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1229 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1230 }
1231 }
1232#endif
1233
1234}
1235
1236/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1237static void
1238speedo_init_rx_ring(struct net_device *dev)
1239{
1240 struct speedo_private *sp = netdev_priv(dev);
1241 struct RxFD *rxf, *last_rxf = NULL;
1242 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1243 int i;
1244
1245 sp->cur_rx = 0;
1246
1247 for (i = 0; i < RX_RING_SIZE; i++) {
1248 struct sk_buff *skb;
1249 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1250 if (skb)
1251 rx_align(skb); /* Align IP on 16 byte boundary */
1252 sp->rx_skbuff[i] = skb;
1253 if (skb == NULL)
1254 break; /* OK. Just initially short of Rx bufs. */
1255 skb->dev = dev; /* Mark as being used by this device. */
1256 rxf = (struct RxFD *)skb->data;
1257 sp->rx_ringp[i] = rxf;
1258 sp->rx_ring_dma[i] =
1259 pci_map_single(sp->pdev, rxf,
1260 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1261 skb_reserve(skb, sizeof(struct RxFD));
1262 if (last_rxf) {
1263 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1264 pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1265 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1266 }
1267 last_rxf = rxf;
1268 last_rxf_dma = sp->rx_ring_dma[i];
1269 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1270 rxf->link = 0; /* None yet. */
1271 /* This field unused by i82557. */
1272 rxf->rx_buf_addr = cpu_to_le32(0xffffffff);
1273 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1274 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1275 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1276 }
1277 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1278 /* Mark the last entry as end-of-list. */
1279 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1280 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1281 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1282 sp->last_rxf = last_rxf;
1283 sp->last_rxf_dma = last_rxf_dma;
1284}
1285
1286static void speedo_purge_tx(struct net_device *dev)
1287{
1288 struct speedo_private *sp = netdev_priv(dev);
1289 int entry;
1290
1291 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1292 entry = sp->dirty_tx % TX_RING_SIZE;
1293 if (sp->tx_skbuff[entry]) {
1294 sp->stats.tx_errors++;
1295 pci_unmap_single(sp->pdev,
1296 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1297 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1298 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1299 sp->tx_skbuff[entry] = NULL;
1300 }
1301 sp->dirty_tx++;
1302 }
1303 while (sp->mc_setup_head != NULL) {
1304 struct speedo_mc_block *t;
1305 if (netif_msg_tx_err(sp))
1306 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1307 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1308 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1309 t = sp->mc_setup_head->next;
1310 kfree(sp->mc_setup_head);
1311 sp->mc_setup_head = t;
1312 }
1313 sp->mc_setup_tail = NULL;
1314 sp->tx_full = 0;
1315 netif_wake_queue(dev);
1316}
1317
1318static void reset_mii(struct net_device *dev)
1319{
1320 struct speedo_private *sp = netdev_priv(dev);
1321
1322 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1323 if ((sp->phy[0] & 0x8000) == 0) {
1324 int phy_addr = sp->phy[0] & 0x1f;
1325 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1326 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1327 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1328 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1329 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1330 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1331#ifdef honor_default_port
1332 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1333#else
1334 mdio_read(dev, phy_addr, MII_BMCR);
1335 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1336 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1337#endif
1338 }
1339}
1340
1341static void speedo_tx_timeout(struct net_device *dev)
1342{
1343 struct speedo_private *sp = netdev_priv(dev);
1344 void __iomem *ioaddr = sp->regs;
1345 int status = ioread16(ioaddr + SCBStatus);
1346 unsigned long flags;
1347
1348 if (netif_msg_tx_err(sp)) {
1349 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1350 " %4.4x at %d/%d command %8.8x.\n",
1351 dev->name, status, ioread16(ioaddr + SCBCmd),
1352 sp->dirty_tx, sp->cur_tx,
1353 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1354
1355 }
1356 speedo_show_state(dev);
1357#if 0
1358 if ((status & 0x00C0) != 0x0080
1359 && (status & 0x003C) == 0x0010) {
1360 /* Only the command unit has stopped. */
1361 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1362 dev->name);
1363 iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1364 ioaddr + SCBPointer);
1365 iowrite16(CUStart, ioaddr + SCBCmd);
1366 reset_mii(dev);
1367 } else {
1368#else
1369 {
1370#endif
1371 del_timer_sync(&sp->timer);
1372 /* Reset the Tx and Rx units. */
1373 iowrite32(PortReset, ioaddr + SCBPort);
1374 /* We may get spurious interrupts here. But I don't think that they
1375 may do much harm. 1999/12/09 SAW */
1376 udelay(10);
1377 /* Disable interrupts. */
1378 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1379 synchronize_irq(dev->irq);
1380 speedo_tx_buffer_gc(dev);
1381 /* Free as much as possible.
1382 It helps to recover from a hang because of out-of-memory.
1383 It also simplifies speedo_resume() in case TX ring is full or
1384 close-to-be full. */
1385 speedo_purge_tx(dev);
1386 speedo_refill_rx_buffers(dev, 1);
1387 spin_lock_irqsave(&sp->lock, flags);
1388 speedo_resume(dev);
1389 sp->rx_mode = -1;
1390 dev->trans_start = jiffies;
1391 spin_unlock_irqrestore(&sp->lock, flags);
1392 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1393 /* Reset MII transceiver. Do it before starting the timer to serialize
1394 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1395 reset_mii(dev);
1396 sp->timer.expires = RUN_AT(2*HZ);
1397 add_timer(&sp->timer);
1398 }
1399 return;
1400}
1401
1402static int
1403speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1404{
1405 struct speedo_private *sp = netdev_priv(dev);
1406 void __iomem *ioaddr = sp->regs;
1407 int entry;
1408
1409 /* Prevent interrupts from changing the Tx ring from underneath us. */
1410 unsigned long flags;
1411
1412 spin_lock_irqsave(&sp->lock, flags);
1413
1414 /* Check if there are enough space. */
1415 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1416 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1417 netif_stop_queue(dev);
1418 sp->tx_full = 1;
1419 spin_unlock_irqrestore(&sp->lock, flags);
1420 return 1;
1421 }
1422
1423 /* Calculate the Tx descriptor entry. */
1424 entry = sp->cur_tx++ % TX_RING_SIZE;
1425
1426 sp->tx_skbuff[entry] = skb;
1427 sp->tx_ring[entry].status =
1428 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1429 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1430 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1431 sp->tx_ring[entry].link =
1432 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1433 sp->tx_ring[entry].tx_desc_addr =
1434 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1435 /* The data region is always in one buffer descriptor. */
1436 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1437 sp->tx_ring[entry].tx_buf_addr0 =
1438 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1439 skb->len, PCI_DMA_TODEVICE));
1440 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1441
1442 /* workaround for hardware bug on 10 mbit half duplex */
1443
1444 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1445 wait_for_cmd_done(dev, sp);
1446 iowrite8(0 , ioaddr + SCBCmd);
1447 udelay(1);
1448 }
1449
1450 /* Trigger the command unit resume. */
1451 wait_for_cmd_done(dev, sp);
1452 clear_suspend(sp->last_cmd);
1453 /* We want the time window between clearing suspend flag on the previous
1454 command and resuming CU to be as small as possible.
1455 Interrupts in between are very undesired. --SAW */
1456 iowrite8(CUResume, ioaddr + SCBCmd);
1457 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1458
1459 /* Leave room for set_rx_mode(). If there is no more space than reserved
1460 for multicast filter mark the ring as full. */
1461 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1462 netif_stop_queue(dev);
1463 sp->tx_full = 1;
1464 }
1465
1466 spin_unlock_irqrestore(&sp->lock, flags);
1467
1468 dev->trans_start = jiffies;
1469
1470 return 0;
1471}
1472
1473static void speedo_tx_buffer_gc(struct net_device *dev)
1474{
1475 unsigned int dirty_tx;
1476 struct speedo_private *sp = netdev_priv(dev);
1477
1478 dirty_tx = sp->dirty_tx;
1479 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1480 int entry = dirty_tx % TX_RING_SIZE;
1481 int status = le32_to_cpu(sp->tx_ring[entry].status);
1482
1483 if (netif_msg_tx_done(sp))
1484 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1485 entry, status);
1486 if ((status & StatusComplete) == 0)
1487 break; /* It still hasn't been processed. */
1488 if (status & TxUnderrun)
1489 if (sp->tx_threshold < 0x01e08000) {
1490 if (netif_msg_tx_err(sp))
1491 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1492 dev->name);
1493 sp->tx_threshold += 0x00040000;
1494 }
1495 /* Free the original skb. */
1496 if (sp->tx_skbuff[entry]) {
1497 sp->stats.tx_packets++; /* Count only user packets. */
1498 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1499 pci_unmap_single(sp->pdev,
1500 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1501 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1502 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1503 sp->tx_skbuff[entry] = NULL;
1504 }
1505 dirty_tx++;
1506 }
1507
1508 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1509 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1510 " full=%d.\n",
1511 dirty_tx, sp->cur_tx, sp->tx_full);
1512 dirty_tx += TX_RING_SIZE;
1513 }
1514
1515 while (sp->mc_setup_head != NULL
1516 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1517 struct speedo_mc_block *t;
1518 if (netif_msg_tx_err(sp))
1519 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1520 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1521 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1522 t = sp->mc_setup_head->next;
1523 kfree(sp->mc_setup_head);
1524 sp->mc_setup_head = t;
1525 }
1526 if (sp->mc_setup_head == NULL)
1527 sp->mc_setup_tail = NULL;
1528
1529 sp->dirty_tx = dirty_tx;
1530}
1531
1532/* The interrupt handler does all of the Rx thread work and cleans up
1533 after the Tx thread. */
1534static irqreturn_t speedo_interrupt(int irq, void *dev_instance)
1535{
1536 struct net_device *dev = (struct net_device *)dev_instance;
1537 struct speedo_private *sp;
1538 void __iomem *ioaddr;
1539 long boguscnt = max_interrupt_work;
1540 unsigned short status;
1541 unsigned int handled = 0;
1542
1543 sp = netdev_priv(dev);
1544 ioaddr = sp->regs;
1545
1546#ifndef final_version
1547 /* A lock to prevent simultaneous entry on SMP machines. */
1548 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1549 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1550 dev->name);
1551 sp->in_interrupt = 0; /* Avoid halting machine. */
1552 return IRQ_NONE;
1553 }
1554#endif
1555
1556 do {
1557 status = ioread16(ioaddr + SCBStatus);
1558 /* Acknowledge all of the current interrupt sources ASAP. */
1559 /* Will change from 0xfc00 to 0xff00 when we start handling
1560 FCP and ER interrupts --Dragan */
1561 iowrite16(status & 0xfc00, ioaddr + SCBStatus);
1562
1563 if (netif_msg_intr(sp))
1564 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1565 dev->name, status);
1566
1567 if ((status & 0xfc00) == 0)
1568 break;
1569 handled = 1;
1570
1571
1572 if ((status & 0x5000) || /* Packet received, or Rx error. */
1573 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1574 /* Need to gather the postponed packet. */
1575 speedo_rx(dev);
1576
1577 /* Always check if all rx buffers are allocated. --SAW */
1578 speedo_refill_rx_buffers(dev, 0);
1579
1580 spin_lock(&sp->lock);
1581 /*
1582 * The chip may have suspended reception for various reasons.
1583 * Check for that, and re-prime it should this be the case.
1584 */
1585 switch ((status >> 2) & 0xf) {
1586 case 0: /* Idle */
1587 break;
1588 case 1: /* Suspended */
1589 case 2: /* No resources (RxFDs) */
1590 case 9: /* Suspended with no more RBDs */
1591 case 10: /* No resources due to no RBDs */
1592 case 12: /* Ready with no RBDs */
1593 speedo_rx_soft_reset(dev);
1594 break;
1595 case 3: case 5: case 6: case 7: case 8:
1596 case 11: case 13: case 14: case 15:
1597 /* these are all reserved values */
1598 break;
1599 }
1600
1601
1602 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1603 if (status & 0xA400) {
1604 speedo_tx_buffer_gc(dev);
1605 if (sp->tx_full
1606 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1607 /* The ring is no longer full. */
1608 sp->tx_full = 0;
1609 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1610 }
1611 }
1612
1613 spin_unlock(&sp->lock);
1614
1615 if (--boguscnt < 0) {
1616 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1617 dev->name, status);
1618 /* Clear all interrupt sources. */
1619 /* Will change from 0xfc00 to 0xff00 when we start handling
1620 FCP and ER interrupts --Dragan */
1621 iowrite16(0xfc00, ioaddr + SCBStatus);
1622 break;
1623 }
1624 } while (1);
1625
1626 if (netif_msg_intr(sp))
1627 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1628 dev->name, ioread16(ioaddr + SCBStatus));
1629
1630 clear_bit(0, (void*)&sp->in_interrupt);
1631 return IRQ_RETVAL(handled);
1632}
1633
1634static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1635{
1636 struct speedo_private *sp = netdev_priv(dev);
1637 struct RxFD *rxf;
1638 struct sk_buff *skb;
1639 /* Get a fresh skbuff to replace the consumed one. */
1640 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1641 if (skb)
1642 rx_align(skb); /* Align IP on 16 byte boundary */
1643 sp->rx_skbuff[entry] = skb;
1644 if (skb == NULL) {
1645 sp->rx_ringp[entry] = NULL;
1646 return NULL;
1647 }
1648 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
1649 sp->rx_ring_dma[entry] =
1650 pci_map_single(sp->pdev, rxf,
1651 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1652 skb->dev = dev;
1653 skb_reserve(skb, sizeof(struct RxFD));
1654 rxf->rx_buf_addr = cpu_to_le32(0xffffffff);
1655 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1656 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1657 return rxf;
1658}
1659
1660static inline void speedo_rx_link(struct net_device *dev, int entry,
1661 struct RxFD *rxf, dma_addr_t rxf_dma)
1662{
1663 struct speedo_private *sp = netdev_priv(dev);
1664 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1665 rxf->link = 0; /* None yet. */
1666 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1667 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1668 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1669 pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1670 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1671 sp->last_rxf = rxf;
1672 sp->last_rxf_dma = rxf_dma;
1673}
1674
1675static int speedo_refill_rx_buf(struct net_device *dev, int force)
1676{
1677 struct speedo_private *sp = netdev_priv(dev);
1678 int entry;
1679 struct RxFD *rxf;
1680
1681 entry = sp->dirty_rx % RX_RING_SIZE;
1682 if (sp->rx_skbuff[entry] == NULL) {
1683 rxf = speedo_rx_alloc(dev, entry);
1684 if (rxf == NULL) {
1685 unsigned int forw;
1686 int forw_entry;
1687 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1688 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1689 dev->name, force);
1690 sp->rx_ring_state |= RrOOMReported;
1691 }
1692 speedo_show_state(dev);
1693 if (!force)
1694 return -1; /* Better luck next time! */
1695 /* Borrow an skb from one of next entries. */
1696 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1697 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1698 break;
1699 if (forw == sp->cur_rx)
1700 return -1;
1701 forw_entry = forw % RX_RING_SIZE;
1702 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1703 sp->rx_skbuff[forw_entry] = NULL;
1704 rxf = sp->rx_ringp[forw_entry];
1705 sp->rx_ringp[forw_entry] = NULL;
1706 sp->rx_ringp[entry] = rxf;
1707 }
1708 } else {
1709 rxf = sp->rx_ringp[entry];
1710 }
1711 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1712 sp->dirty_rx++;
1713 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1714 return 0;
1715}
1716
1717static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1718{
1719 struct speedo_private *sp = netdev_priv(dev);
1720
1721 /* Refill the RX ring. */
1722 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1723 speedo_refill_rx_buf(dev, force) != -1);
1724}
1725
1726static int
1727speedo_rx(struct net_device *dev)
1728{
1729 struct speedo_private *sp = netdev_priv(dev);
1730 int entry = sp->cur_rx % RX_RING_SIZE;
1731 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1732 int alloc_ok = 1;
1733 int npkts = 0;
1734
1735 if (netif_msg_intr(sp))
1736 printk(KERN_DEBUG " In speedo_rx().\n");
1737 /* If we own the next entry, it's a new packet. Send it up. */
1738 while (sp->rx_ringp[entry] != NULL) {
1739 int status;
1740 int pkt_len;
1741
1742 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1743 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1744 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1745 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1746
1747 if (!(status & RxComplete))
1748 break;
1749
1750 if (--rx_work_limit < 0)
1751 break;
1752
1753 /* Check for a rare out-of-memory case: the current buffer is
1754 the last buffer allocated in the RX ring. --SAW */
1755 if (sp->last_rxf == sp->rx_ringp[entry]) {
1756 /* Postpone the packet. It'll be reaped at an interrupt when this
1757 packet is no longer the last packet in the ring. */
1758 if (netif_msg_rx_err(sp))
1759 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1760 dev->name);
1761 sp->rx_ring_state |= RrPostponed;
1762 break;
1763 }
1764
1765 if (netif_msg_rx_status(sp))
1766 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1767 pkt_len);
1768 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1769 if (status & RxErrTooBig)
1770 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1771 "status %8.8x!\n", dev->name, status);
1772 else if (! (status & RxOK)) {
1773 /* There was a fatal error. This *should* be impossible. */
1774 sp->stats.rx_errors++;
1775 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1776 "status %8.8x.\n",
1777 dev->name, status);
1778 }
1779 } else {
1780 struct sk_buff *skb;
1781
1782 /* Check if the packet is long enough to just accept without
1783 copying to a properly sized skbuff. */
1784 if (pkt_len < rx_copybreak
1785 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1786 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1787 /* 'skb_put()' points to the start of sk_buff data area. */
1788 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1789 sizeof(struct RxFD) + pkt_len,
1790 PCI_DMA_FROMDEVICE);
1791
1792#if 1 || USE_IP_CSUM
1793 /* Packet is in one chunk -- we can copy + cksum. */
1794 skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
1795 skb_put(skb, pkt_len);
1796#else
1797 skb_copy_from_linear_data(sp->rx_skbuff[entry],
1798 skb_put(skb, pkt_len),
1799 pkt_len);
1800#endif
1801 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1802 sizeof(struct RxFD) + pkt_len,
1803 PCI_DMA_FROMDEVICE);
1804 npkts++;
1805 } else {
1806 /* Pass up the already-filled skbuff. */
1807 skb = sp->rx_skbuff[entry];
1808 if (skb == NULL) {
1809 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1810 dev->name);
1811 break;
1812 }
1813 sp->rx_skbuff[entry] = NULL;
1814 skb_put(skb, pkt_len);
1815 npkts++;
1816 sp->rx_ringp[entry] = NULL;
1817 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1818 PKT_BUF_SZ + sizeof(struct RxFD),
1819 PCI_DMA_FROMDEVICE);
1820 }
1821 skb->protocol = eth_type_trans(skb, dev);
1822 netif_rx(skb);
1823 dev->last_rx = jiffies;
1824 sp->stats.rx_packets++;
1825 sp->stats.rx_bytes += pkt_len;
1826 }
1827 entry = (++sp->cur_rx) % RX_RING_SIZE;
1828 sp->rx_ring_state &= ~RrPostponed;
1829 /* Refill the recently taken buffers.
1830 Do it one-by-one to handle traffic bursts better. */
1831 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1832 alloc_ok = 0;
1833 }
1834
1835 /* Try hard to refill the recently taken buffers. */
1836 speedo_refill_rx_buffers(dev, 1);
1837
1838 if (npkts)
1839 sp->last_rx_time = jiffies;
1840
1841 return 0;
1842}
1843
1844static int
1845speedo_close(struct net_device *dev)
1846{
1847 struct speedo_private *sp = netdev_priv(dev);
1848 void __iomem *ioaddr = sp->regs;
1849 int i;
1850
1851 netdevice_stop(dev);
1852 netif_stop_queue(dev);
1853
1854 if (netif_msg_ifdown(sp))
1855 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1856 dev->name, ioread16(ioaddr + SCBStatus));
1857
1858 /* Shut off the media monitoring timer. */
1859 del_timer_sync(&sp->timer);
1860
1861 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1862
1863 /* Shutting down the chip nicely fails to disable flow control. So.. */
1864 iowrite32(PortPartialReset, ioaddr + SCBPort);
1865 ioread32(ioaddr + SCBPort); /* flush posted write */
1866 /*
1867 * The chip requires a 10 microsecond quiet period. Wait here!
1868 */
1869 udelay(10);
1870
1871 free_irq(dev->irq, dev);
1872 speedo_show_state(dev);
1873
1874 /* Free all the skbuffs in the Rx and Tx queues. */
1875 for (i = 0; i < RX_RING_SIZE; i++) {
1876 struct sk_buff *skb = sp->rx_skbuff[i];
1877 sp->rx_skbuff[i] = NULL;
1878 /* Clear the Rx descriptors. */
1879 if (skb) {
1880 pci_unmap_single(sp->pdev,
1881 sp->rx_ring_dma[i],
1882 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1883 dev_kfree_skb(skb);
1884 }
1885 }
1886
1887 for (i = 0; i < TX_RING_SIZE; i++) {
1888 struct sk_buff *skb = sp->tx_skbuff[i];
1889 sp->tx_skbuff[i] = NULL;
1890 /* Clear the Tx descriptors. */
1891 if (skb) {
1892 pci_unmap_single(sp->pdev,
1893 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1894 skb->len, PCI_DMA_TODEVICE);
1895 dev_kfree_skb(skb);
1896 }
1897 }
1898
1899 /* Free multicast setting blocks. */
1900 for (i = 0; sp->mc_setup_head != NULL; i++) {
1901 struct speedo_mc_block *t;
1902 t = sp->mc_setup_head->next;
1903 kfree(sp->mc_setup_head);
1904 sp->mc_setup_head = t;
1905 }
1906 sp->mc_setup_tail = NULL;
1907 if (netif_msg_ifdown(sp))
1908 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1909
1910 pci_set_power_state(sp->pdev, PCI_D2);
1911
1912 return 0;
1913}
1914
1915/* The Speedo-3 has an especially awkward and unusable method of getting
1916 statistics out of the chip. It takes an unpredictable length of time
1917 for the dump-stats command to complete. To avoid a busy-wait loop we
1918 update the stats with the previous dump results, and then trigger a
1919 new dump.
1920
1921 Oh, and incoming frames are dropped while executing dump-stats!
1922 */
1923static struct net_device_stats *
1924speedo_get_stats(struct net_device *dev)
1925{
1926 struct speedo_private *sp = netdev_priv(dev);
1927 void __iomem *ioaddr = sp->regs;
1928
1929 /* Update only if the previous dump finished. */
1930 if (sp->lstats->done_marker == cpu_to_le32(0xA007)) {
1931 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1932 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1933 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1934 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1935 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1936 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1937 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1938 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1939 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1940 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1941 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1942 sp->lstats->done_marker = 0x0000;
1943 if (netif_running(dev)) {
1944 unsigned long flags;
1945 /* Take a spinlock to make wait_for_cmd_done and sending the
1946 command atomic. --SAW */
1947 spin_lock_irqsave(&sp->lock, flags);
1948 wait_for_cmd_done(dev, sp);
1949 iowrite8(CUDumpStats, ioaddr + SCBCmd);
1950 spin_unlock_irqrestore(&sp->lock, flags);
1951 }
1952 }
1953 return &sp->stats;
1954}
1955
1956static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1957{
1958 struct speedo_private *sp = netdev_priv(dev);
1959 strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
1960 strncpy(info->version, version, sizeof(info->version)-1);
1961 if (sp->pdev)
1962 strcpy(info->bus_info, pci_name(sp->pdev));
1963}
1964
1965static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1966{
1967 struct speedo_private *sp = netdev_priv(dev);
1968 spin_lock_irq(&sp->lock);
1969 mii_ethtool_gset(&sp->mii_if, ecmd);
1970 spin_unlock_irq(&sp->lock);
1971 return 0;
1972}
1973
1974static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1975{
1976 struct speedo_private *sp = netdev_priv(dev);
1977 int res;
1978 spin_lock_irq(&sp->lock);
1979 res = mii_ethtool_sset(&sp->mii_if, ecmd);
1980 spin_unlock_irq(&sp->lock);
1981 return res;
1982}
1983
1984static int speedo_nway_reset(struct net_device *dev)
1985{
1986 struct speedo_private *sp = netdev_priv(dev);
1987 return mii_nway_restart(&sp->mii_if);
1988}
1989
1990static u32 speedo_get_link(struct net_device *dev)
1991{
1992 struct speedo_private *sp = netdev_priv(dev);
1993 return mii_link_ok(&sp->mii_if);
1994}
1995
1996static u32 speedo_get_msglevel(struct net_device *dev)
1997{
1998 struct speedo_private *sp = netdev_priv(dev);
1999 return sp->msg_enable;
2000}
2001
2002static void speedo_set_msglevel(struct net_device *dev, u32 v)
2003{
2004 struct speedo_private *sp = netdev_priv(dev);
2005 sp->msg_enable = v;
2006}
2007
2008static const struct ethtool_ops ethtool_ops = {
2009 .get_drvinfo = speedo_get_drvinfo,
2010 .get_settings = speedo_get_settings,
2011 .set_settings = speedo_set_settings,
2012 .nway_reset = speedo_nway_reset,
2013 .get_link = speedo_get_link,
2014 .get_msglevel = speedo_get_msglevel,
2015 .set_msglevel = speedo_set_msglevel,
2016};
2017
2018static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2019{
2020 struct speedo_private *sp = netdev_priv(dev);
2021 struct mii_ioctl_data *data = if_mii(rq);
2022 int phy = sp->phy[0] & 0x1f;
2023 int saved_acpi;
2024 int t;
2025
2026 switch(cmd) {
2027 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2028 data->phy_id = phy;
2029
2030 case SIOCGMIIREG: /* Read MII PHY register. */
2031 /* FIXME: these operations need to be serialized with MDIO
2032 access from the timeout handler.
2033 They are currently serialized only with MDIO access from the
2034 timer routine. 2000/05/09 SAW */
2035 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2036 t = del_timer_sync(&sp->timer);
2037 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2038 if (t)
2039 add_timer(&sp->timer); /* may be set to the past --SAW */
2040 pci_set_power_state(sp->pdev, saved_acpi);
2041 return 0;
2042
2043 case SIOCSMIIREG: /* Write MII PHY register. */
2044 if (!capable(CAP_NET_ADMIN))
2045 return -EPERM;
2046 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2047 t = del_timer_sync(&sp->timer);
2048 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2049 if (t)
2050 add_timer(&sp->timer); /* may be set to the past --SAW */
2051 pci_set_power_state(sp->pdev, saved_acpi);
2052 return 0;
2053 default:
2054 return -EOPNOTSUPP;
2055 }
2056}
2057
2058/* Set or clear the multicast filter for this adaptor.
2059 This is very ugly with Intel chips -- we usually have to execute an
2060 entire configuration command, plus process a multicast command.
2061 This is complicated. We must put a large configuration command and
2062 an arbitrarily-sized multicast command in the transmit list.
2063 To minimize the disruption -- the previous command might have already
2064 loaded the link -- we convert the current command block, normally a Tx
2065 command, into a no-op and link it to the new command.
2066*/
2067static void set_rx_mode(struct net_device *dev)
2068{
2069 struct speedo_private *sp = netdev_priv(dev);
2070 void __iomem *ioaddr = sp->regs;
2071 struct descriptor *last_cmd;
2072 char new_rx_mode;
2073 unsigned long flags;
2074 int entry, i;
2075
2076 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2077 new_rx_mode = 3;
2078 } else if ((dev->flags & IFF_ALLMULTI) ||
2079 dev->mc_count > multicast_filter_limit) {
2080 new_rx_mode = 1;
2081 } else
2082 new_rx_mode = 0;
2083
2084 if (netif_msg_rx_status(sp))
2085 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2086 sp->rx_mode, new_rx_mode);
2087
2088 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2089 /* The Tx ring is full -- don't add anything! Hope the mode will be
2090 * set again later. */
2091 sp->rx_mode = -1;
2092 return;
2093 }
2094
2095 if (new_rx_mode != sp->rx_mode) {
2096 u8 *config_cmd_data;
2097
2098 spin_lock_irqsave(&sp->lock, flags);
2099 entry = sp->cur_tx++ % TX_RING_SIZE;
2100 last_cmd = sp->last_cmd;
2101 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2102
2103 sp->tx_skbuff[entry] = NULL; /* Redundant. */
2104 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2105 sp->tx_ring[entry].link =
2106 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2107 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2108 /* Construct a full CmdConfig frame. */
2109 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2110 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2111 config_cmd_data[4] = rxdmacount;
2112 config_cmd_data[5] = txdmacount + 0x80;
2113 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2114 /* 0x80 doesn't disable FC 0x84 does.
2115 Disable Flow control since we are not ACK-ing any FC interrupts
2116 for now. --Dragan */
2117 config_cmd_data[19] = 0x84;
2118 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2119 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2120 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2121 config_cmd_data[15] |= 0x80;
2122 config_cmd_data[8] = 0;
2123 }
2124 /* Trigger the command unit resume. */
2125 wait_for_cmd_done(dev, sp);
2126 clear_suspend(last_cmd);
2127 iowrite8(CUResume, ioaddr + SCBCmd);
2128 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2129 netif_stop_queue(dev);
2130 sp->tx_full = 1;
2131 }
2132 spin_unlock_irqrestore(&sp->lock, flags);
2133 }
2134
2135 if (new_rx_mode == 0 && dev->mc_count < 4) {
2136 /* The simple case of 0-3 multicast list entries occurs often, and
2137 fits within one tx_ring[] entry. */
2138 struct dev_mc_list *mclist;
2139 __le16 *setup_params, *eaddrs;
2140
2141 spin_lock_irqsave(&sp->lock, flags);
2142 entry = sp->cur_tx++ % TX_RING_SIZE;
2143 last_cmd = sp->last_cmd;
2144 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2145
2146 sp->tx_skbuff[entry] = NULL;
2147 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2148 sp->tx_ring[entry].link =
2149 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2150 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2151 setup_params = (__le16 *)&sp->tx_ring[entry].tx_desc_addr;
2152 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2153 /* Fill in the multicast addresses. */
2154 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2155 i++, mclist = mclist->next) {
2156 eaddrs = (__le16 *)mclist->dmi_addr;
2157 *setup_params++ = *eaddrs++;
2158 *setup_params++ = *eaddrs++;
2159 *setup_params++ = *eaddrs++;
2160 }
2161
2162 wait_for_cmd_done(dev, sp);
2163 clear_suspend(last_cmd);
2164 /* Immediately trigger the command unit resume. */
2165 iowrite8(CUResume, ioaddr + SCBCmd);
2166
2167 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2168 netif_stop_queue(dev);
2169 sp->tx_full = 1;
2170 }
2171 spin_unlock_irqrestore(&sp->lock, flags);
2172 } else if (new_rx_mode == 0) {
2173 struct dev_mc_list *mclist;
2174 __le16 *setup_params, *eaddrs;
2175 struct speedo_mc_block *mc_blk;
2176 struct descriptor *mc_setup_frm;
2177 int i;
2178
2179 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2180 GFP_ATOMIC);
2181 if (mc_blk == NULL) {
2182 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2183 dev->name);
2184 sp->rx_mode = -1; /* We failed, try again. */
2185 return;
2186 }
2187 mc_blk->next = NULL;
2188 mc_blk->len = 2 + multicast_filter_limit*6;
2189 mc_blk->frame_dma =
2190 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2191 PCI_DMA_TODEVICE);
2192 mc_setup_frm = &mc_blk->frame;
2193
2194 /* Fill the setup frame. */
2195 if (netif_msg_ifup(sp))
2196 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2197 dev->name, mc_setup_frm);
2198 mc_setup_frm->cmd_status =
2199 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2200 /* Link set below. */
2201 setup_params = (__le16 *)&mc_setup_frm->params;
2202 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2203 /* Fill in the multicast addresses. */
2204 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2205 i++, mclist = mclist->next) {
2206 eaddrs = (__le16 *)mclist->dmi_addr;
2207 *setup_params++ = *eaddrs++;
2208 *setup_params++ = *eaddrs++;
2209 *setup_params++ = *eaddrs++;
2210 }
2211
2212 /* Disable interrupts while playing with the Tx Cmd list. */
2213 spin_lock_irqsave(&sp->lock, flags);
2214
2215 if (sp->mc_setup_tail)
2216 sp->mc_setup_tail->next = mc_blk;
2217 else
2218 sp->mc_setup_head = mc_blk;
2219 sp->mc_setup_tail = mc_blk;
2220 mc_blk->tx = sp->cur_tx;
2221
2222 entry = sp->cur_tx++ % TX_RING_SIZE;
2223 last_cmd = sp->last_cmd;
2224 sp->last_cmd = mc_setup_frm;
2225
2226 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2227 sp->tx_skbuff[entry] = NULL;
2228 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2229 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2230
2231 /* Set the link in the setup frame. */
2232 mc_setup_frm->link =
2233 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2234
2235 pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2236 mc_blk->len, PCI_DMA_TODEVICE);
2237
2238 wait_for_cmd_done(dev, sp);
2239 clear_suspend(last_cmd);
2240 /* Immediately trigger the command unit resume. */
2241 iowrite8(CUResume, ioaddr + SCBCmd);
2242
2243 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2244 netif_stop_queue(dev);
2245 sp->tx_full = 1;
2246 }
2247 spin_unlock_irqrestore(&sp->lock, flags);
2248
2249 if (netif_msg_rx_status(sp))
2250 printk(" CmdMCSetup frame length %d in entry %d.\n",
2251 dev->mc_count, entry);
2252 }
2253
2254 sp->rx_mode = new_rx_mode;
2255}
2256
2257#ifdef CONFIG_PM
2258static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
2259{
2260 struct net_device *dev = pci_get_drvdata (pdev);
2261 struct speedo_private *sp = netdev_priv(dev);
2262 void __iomem *ioaddr = sp->regs;
2263
2264 pci_save_state(pdev);
2265
2266 if (!netif_running(dev))
2267 return 0;
2268
2269 del_timer_sync(&sp->timer);
2270
2271 netif_device_detach(dev);
2272 iowrite32(PortPartialReset, ioaddr + SCBPort);
2273
2274 /* XXX call pci_set_power_state ()? */
2275 pci_disable_device(pdev);
2276 pci_set_power_state (pdev, PCI_D3hot);
2277 return 0;
2278}
2279
2280static int eepro100_resume(struct pci_dev *pdev)
2281{
2282 struct net_device *dev = pci_get_drvdata (pdev);
2283 struct speedo_private *sp = netdev_priv(dev);
2284 void __iomem *ioaddr = sp->regs;
2285 int rc;
2286
2287 pci_set_power_state(pdev, PCI_D0);
2288 pci_restore_state(pdev);
2289
2290 rc = pci_enable_device(pdev);
2291 if (rc)
2292 return rc;
2293
2294 pci_set_master(pdev);
2295
2296 if (!netif_running(dev))
2297 return 0;
2298
2299 /* I'm absolutely uncertain if this part of code may work.
2300 The problems are:
2301 - correct hardware reinitialization;
2302 - correct driver behavior between different steps of the
2303 reinitialization;
2304 - serialization with other driver calls.
2305 2000/03/08 SAW */
2306 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
2307 speedo_resume(dev);
2308 netif_device_attach(dev);
2309 sp->rx_mode = -1;
2310 sp->flow_ctrl = sp->partner = 0;
2311 set_rx_mode(dev);
2312 sp->timer.expires = RUN_AT(2*HZ);
2313 add_timer(&sp->timer);
2314 return 0;
2315}
2316#endif /* CONFIG_PM */
2317
2318static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2319{
2320 struct net_device *dev = pci_get_drvdata (pdev);
2321 struct speedo_private *sp = netdev_priv(dev);
2322
2323 unregister_netdev(dev);
2324
2325 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2326 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2327
2328 pci_iounmap(pdev, sp->regs);
2329 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2330 + sizeof(struct speedo_stats),
2331 sp->tx_ring, sp->tx_ring_dma);
2332 pci_disable_device(pdev);
2333 free_netdev(dev);
2334}
2335
2336static struct pci_device_id eepro100_pci_tbl[] = {
2337 { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
2338 { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
2339 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2340 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2341 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2342 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2343 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2344 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2345 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2346 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2347 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2348 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2349 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2350 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2351 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2352 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2353 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2354 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2355 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2356 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2357 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2358 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2359 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2360 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2361 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2362 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2363 { 0,}
2364};
2365MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2366
2367static struct pci_driver eepro100_driver = {
2368 .name = "eepro100",
2369 .id_table = eepro100_pci_tbl,
2370 .probe = eepro100_init_one,
2371 .remove = __devexit_p(eepro100_remove_one),
2372#ifdef CONFIG_PM
2373 .suspend = eepro100_suspend,
2374 .resume = eepro100_resume,
2375#endif /* CONFIG_PM */
2376};
2377
2378static int __init eepro100_init_module(void)
2379{
2380#ifdef MODULE
2381 printk(version);
2382#endif
2383 return pci_register_driver(&eepro100_driver);
2384}
2385
2386static void __exit eepro100_cleanup_module(void)
2387{
2388 pci_unregister_driver(&eepro100_driver);
2389}
2390
2391module_init(eepro100_init_module);
2392module_exit(eepro100_cleanup_module);
2393
2394/*
2395 * Local variables:
2396 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2397 * c-indent-level: 4
2398 * c-basic-offset: 4
2399 * tab-width: 4
2400 * End:
2401 */
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index b751c1b96cfa..9ff3f2f5e382 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -967,7 +967,6 @@ static void eexp_hw_rx_pio(struct net_device *dev)
967 insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1); 967 insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1);
968 skb->protocol = eth_type_trans(skb,dev); 968 skb->protocol = eth_type_trans(skb,dev);
969 netif_rx(skb); 969 netif_rx(skb);
970 dev->last_rx = jiffies;
971 dev->stats.rx_packets++; 970 dev->stats.rx_packets++;
972 dev->stats.rx_bytes += pkt_len; 971 dev->stats.rx_bytes += pkt_len;
973 } 972 }
@@ -1047,7 +1046,7 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
1047/* 1046/*
1048 * Sanity check the suspected EtherExpress card 1047 * Sanity check the suspected EtherExpress card
1049 * Read hardware address, reset card, size memory and initialize buffer 1048 * Read hardware address, reset card, size memory and initialize buffer
1050 * memory pointers. These are held in dev->priv, in case someone has more 1049 * memory pointers. These are held in netdev_priv(), in case someone has more
1051 * than one card in a machine. 1050 * than one card in a machine.
1052 */ 1051 */
1053 1052
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 002d918fb4c7..9930d5f8b9e1 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0095" 43#define DRV_VERSION "EHEA_0096"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 422fcb93e2c3..035aa7dfc5cd 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -728,7 +728,6 @@ static int ehea_proc_rwqes(struct net_device *dev,
728 } 728 }
729 729
730 ehea_proc_skb(pr, cqe, skb); 730 ehea_proc_skb(pr, cqe, skb);
731 dev->last_rx = jiffies;
732 } else { 731 } else {
733 pr->p_stats.poll_receive_errors++; 732 pr->p_stats.poll_receive_errors++;
734 port_reset = ehea_treat_poll_error(pr, rq, cqe, 733 port_reset = ehea_treat_poll_error(pr, rq, cqe,
@@ -831,7 +830,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
831 while ((rx != budget) || force_irq) { 830 while ((rx != budget) || force_irq) {
832 pr->poll_counter = 0; 831 pr->poll_counter = 0;
833 force_irq = 0; 832 force_irq = 0;
834 netif_rx_complete(dev, napi); 833 netif_rx_complete(napi);
835 ehea_reset_cq_ep(pr->recv_cq); 834 ehea_reset_cq_ep(pr->recv_cq);
836 ehea_reset_cq_ep(pr->send_cq); 835 ehea_reset_cq_ep(pr->send_cq);
837 ehea_reset_cq_n1(pr->recv_cq); 836 ehea_reset_cq_n1(pr->recv_cq);
@@ -860,7 +859,7 @@ static void ehea_netpoll(struct net_device *dev)
860 int i; 859 int i;
861 860
862 for (i = 0; i < port->num_def_qps; i++) 861 for (i = 0; i < port->num_def_qps; i++)
863 netif_rx_schedule(dev, &port->port_res[i].napi); 862 netif_rx_schedule(&port->port_res[i].napi);
864} 863}
865#endif 864#endif
866 865
@@ -868,7 +867,7 @@ static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
868{ 867{
869 struct ehea_port_res *pr = param; 868 struct ehea_port_res *pr = param;
870 869
871 netif_rx_schedule(pr->port->netdev, &pr->napi); 870 netif_rx_schedule(&pr->napi);
872 871
873 return IRQ_HANDLED; 872 return IRQ_HANDLED;
874} 873}
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 9d006878f045..225c692b5d99 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -182,7 +182,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
182 goto out_kill_hwq; 182 goto out_kill_hwq;
183 } 183 }
184 } else { 184 } else {
185 if ((hret != H_PAGE_REGISTERED) || (!vpage)) { 185 if (hret != H_PAGE_REGISTERED) {
186 ehea_error("CQ: registration of page failed " 186 ehea_error("CQ: registration of page failed "
187 "hret=%lx\n", hret); 187 "hret=%lx\n", hret);
188 goto out_kill_hwq; 188 goto out_kill_hwq;
@@ -303,7 +303,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
303 goto out_kill_hwq; 303 goto out_kill_hwq;
304 304
305 } else { 305 } else {
306 if ((hret != H_PAGE_REGISTERED) || (!vpage)) 306 if (hret != H_PAGE_REGISTERED)
307 goto out_kill_hwq; 307 goto out_kill_hwq;
308 308
309 } 309 }
@@ -653,7 +653,7 @@ static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add
653 int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT); 653 int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
654 int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT); 654 int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
655 int idx = i & EHEA_INDEX_MASK; 655 int idx = i & EHEA_INDEX_MASK;
656 656
657 if (add) { 657 if (add) {
658 int ret = ehea_init_bmap(ehea_bmap, top, dir); 658 int ret = ehea_init_bmap(ehea_bmap, top, dir);
659 if (ret) 659 if (ret)
@@ -780,7 +780,7 @@ void ehea_destroy_busmap(void)
780 780
781 kfree(ehea_bmap); 781 kfree(ehea_bmap);
782 ehea_bmap = NULL; 782 ehea_bmap = NULL;
783out_destroy: 783out_destroy:
784 mutex_unlock(&ehea_busmap_mutex); 784 mutex_unlock(&ehea_busmap_mutex);
785} 785}
786 786
@@ -858,10 +858,10 @@ static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
858 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) { 858 for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
859 if (!ehea_bmap->top[top]->dir[dir]->ent[idx]) 859 if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
860 continue; 860 continue;
861 861
862 hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr); 862 hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
863 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) 863 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
864 return hret; 864 return hret;
865 } 865 }
866 return hret; 866 return hret;
867} 867}
@@ -879,7 +879,7 @@ static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
879 879
880 hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr); 880 hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
881 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) 881 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
882 return hret; 882 return hret;
883 } 883 }
884 return hret; 884 return hret;
885} 885}
@@ -893,7 +893,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
893 893
894 unsigned long top; 894 unsigned long top;
895 895
896 pt = kzalloc(PAGE_SIZE, GFP_KERNEL); 896 pt = (void *)get_zeroed_page(GFP_KERNEL);
897 if (!pt) { 897 if (!pt) {
898 ehea_error("no mem"); 898 ehea_error("no mem");
899 ret = -ENOMEM; 899 ret = -ENOMEM;
@@ -937,7 +937,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
937 mr->adapter = adapter; 937 mr->adapter = adapter;
938 ret = 0; 938 ret = 0;
939out: 939out:
940 kfree(pt); 940 free_page((unsigned long)pt);
941 return ret; 941 return ret;
942} 942}
943 943
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 36cb6e95b465..b0ef46c51a9d 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -196,16 +196,32 @@ static void enc28j60_soft_reset(struct enc28j60_net *priv)
196 */ 196 */
197static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr) 197static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr)
198{ 198{
199 if ((addr & BANK_MASK) != priv->bank) { 199 u8 b = (addr & BANK_MASK) >> 5;
200 u8 b = (addr & BANK_MASK) >> 5;
201 200
202 if (b != (ECON1_BSEL1 | ECON1_BSEL0)) 201 /* These registers (EIE, EIR, ESTAT, ECON2, ECON1)
202 * are present in all banks, no need to switch bank
203 */
204 if (addr >= EIE && addr <= ECON1)
205 return;
206
207 /* Clear or set each bank selection bit as needed */
208 if ((b & ECON1_BSEL0) != (priv->bank & ECON1_BSEL0)) {
209 if (b & ECON1_BSEL0)
210 spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1,
211 ECON1_BSEL0);
212 else
203 spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1, 213 spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1,
204 ECON1_BSEL1 | ECON1_BSEL0); 214 ECON1_BSEL0);
205 if (b != 0)
206 spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1, b);
207 priv->bank = (addr & BANK_MASK);
208 } 215 }
216 if ((b & ECON1_BSEL1) != (priv->bank & ECON1_BSEL1)) {
217 if (b & ECON1_BSEL1)
218 spi_write_op(priv, ENC28J60_BIT_FIELD_SET, ECON1,
219 ECON1_BSEL1);
220 else
221 spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, ECON1,
222 ECON1_BSEL1);
223 }
224 priv->bank = b;
209} 225}
210 226
211/* 227/*
@@ -477,12 +493,10 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev)
477 493
478 mutex_lock(&priv->lock); 494 mutex_lock(&priv->lock);
479 if (!priv->hw_enable) { 495 if (!priv->hw_enable) {
480 if (netif_msg_drv(priv)) { 496 if (netif_msg_drv(priv))
481 DECLARE_MAC_BUF(mac);
482 printk(KERN_INFO DRV_NAME 497 printk(KERN_INFO DRV_NAME
483 ": %s: Setting MAC address to %s\n", 498 ": %s: Setting MAC address to %pM\n",
484 ndev->name, print_mac(mac, ndev->dev_addr)); 499 ndev->name, ndev->dev_addr);
485 }
486 /* NOTE: MAC address in ENC28J60 is byte-backward */ 500 /* NOTE: MAC address in ENC28J60 is byte-backward */
487 nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]); 501 nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]);
488 nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]); 502 nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]);
@@ -958,7 +972,6 @@ static void enc28j60_hw_rx(struct net_device *ndev)
958 /* update statistics */ 972 /* update statistics */
959 ndev->stats.rx_packets++; 973 ndev->stats.rx_packets++;
960 ndev->stats.rx_bytes += len; 974 ndev->stats.rx_bytes += len;
961 ndev->last_rx = jiffies;
962 netif_rx_ni(skb); 975 netif_rx_ni(skb);
963 } 976 }
964 } 977 }
@@ -1340,11 +1353,9 @@ static int enc28j60_net_open(struct net_device *dev)
1340 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); 1353 printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
1341 1354
1342 if (!is_valid_ether_addr(dev->dev_addr)) { 1355 if (!is_valid_ether_addr(dev->dev_addr)) {
1343 if (netif_msg_ifup(priv)) { 1356 if (netif_msg_ifup(priv))
1344 DECLARE_MAC_BUF(mac); 1357 dev_err(&dev->dev, "invalid MAC address %pM\n",
1345 dev_err(&dev->dev, "invalid MAC address %s\n", 1358 dev->dev_addr);
1346 print_mac(mac, dev->dev_addr));
1347 }
1348 return -EADDRNOTAVAIL; 1359 return -EADDRNOTAVAIL;
1349 } 1360 }
1350 /* Reset the hardware here (and take it out of low power mode) */ 1361 /* Reset the hardware here (and take it out of low power mode) */
@@ -1465,7 +1476,7 @@ enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1465 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1476 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1466 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1477 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1467 strlcpy(info->bus_info, 1478 strlcpy(info->bus_info,
1468 dev->dev.parent->bus_id, sizeof(info->bus_info)); 1479 dev_name(dev->dev.parent), sizeof(info->bus_info));
1469} 1480}
1470 1481
1471static int 1482static int
diff --git a/drivers/net/enic/cq_desc.h b/drivers/net/enic/cq_desc.h
index c036a8bfd043..1eb289f773bf 100644
--- a/drivers/net/enic/cq_desc.h
+++ b/drivers/net/enic/cq_desc.h
@@ -44,9 +44,10 @@ struct cq_desc {
44 u8 type_color; 44 u8 type_color;
45}; 45};
46 46
47#define CQ_DESC_TYPE_BITS 7 47#define CQ_DESC_TYPE_BITS 4
48#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1) 48#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
49#define CQ_DESC_COLOR_MASK 1 49#define CQ_DESC_COLOR_MASK 1
50#define CQ_DESC_COLOR_SHIFT 7
50#define CQ_DESC_Q_NUM_BITS 10 51#define CQ_DESC_Q_NUM_BITS 10
51#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1) 52#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
52#define CQ_DESC_COMP_NDX_BITS 12 53#define CQ_DESC_COMP_NDX_BITS 12
@@ -58,7 +59,7 @@ static inline void cq_desc_dec(const struct cq_desc *desc_arg,
58 const struct cq_desc *desc = desc_arg; 59 const struct cq_desc *desc = desc_arg;
59 const u8 type_color = desc->type_color; 60 const u8 type_color = desc->type_color;
60 61
61 *color = (type_color >> CQ_DESC_TYPE_BITS) & CQ_DESC_COLOR_MASK; 62 *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
62 63
63 /* 64 /*
64 * Make sure color bit is read from desc *before* other fields 65 * Make sure color bit is read from desc *before* other fields
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 7f677e89a788..a832cc5d6a1e 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -33,7 +33,7 @@
33 33
34#define DRV_NAME "enic" 34#define DRV_NAME "enic"
35#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver" 35#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
36#define DRV_VERSION "0.0.1-18163.472-k1" 36#define DRV_VERSION "1.0.0.648"
37#define DRV_COPYRIGHT "Copyright 2008 Cisco Systems, Inc" 37#define DRV_COPYRIGHT "Copyright 2008 Cisco Systems, Inc"
38#define PFX DRV_NAME ": " 38#define PFX DRV_NAME ": "
39 39
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 180e968dc54d..d039e16f2763 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -273,6 +273,8 @@ static struct ethtool_ops enic_ethtool_ops = {
273 .set_sg = ethtool_op_set_sg, 273 .set_sg = ethtool_op_set_sg,
274 .get_tso = ethtool_op_get_tso, 274 .get_tso = ethtool_op_get_tso,
275 .set_tso = enic_set_tso, 275 .set_tso = enic_set_tso,
276 .get_flags = ethtool_op_get_flags,
277 .set_flags = ethtool_op_set_flags,
276}; 278};
277 279
278static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 280static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
@@ -409,8 +411,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
409 } 411 }
410 412
411 if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { 413 if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
412 if (netif_rx_schedule_prep(netdev, &enic->napi)) 414 if (netif_rx_schedule_prep(&enic->napi))
413 __netif_rx_schedule(netdev, &enic->napi); 415 __netif_rx_schedule(&enic->napi);
414 } else { 416 } else {
415 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); 417 vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
416 } 418 }
@@ -438,7 +440,7 @@ static irqreturn_t enic_isr_msi(int irq, void *data)
438 * writes). 440 * writes).
439 */ 441 */
440 442
441 netif_rx_schedule(enic->netdev, &enic->napi); 443 netif_rx_schedule(&enic->napi);
442 444
443 return IRQ_HANDLED; 445 return IRQ_HANDLED;
444} 446}
@@ -448,7 +450,7 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
448 struct enic *enic = data; 450 struct enic *enic = data;
449 451
450 /* schedule NAPI polling for RQ cleanup */ 452 /* schedule NAPI polling for RQ cleanup */
451 netif_rx_schedule(enic->netdev, &enic->napi); 453 netif_rx_schedule(&enic->napi);
452 454
453 return IRQ_HANDLED; 455 return IRQ_HANDLED;
454} 456}
@@ -895,6 +897,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
895 int skipped, void *opaque) 897 int skipped, void *opaque)
896{ 898{
897 struct enic *enic = vnic_dev_priv(rq->vdev); 899 struct enic *enic = vnic_dev_priv(rq->vdev);
900 struct net_device *netdev = enic->netdev;
898 struct sk_buff *skb; 901 struct sk_buff *skb;
899 902
900 u8 type, color, eop, sop, ingress_port, vlan_stripped; 903 u8 type, color, eop, sop, ingress_port, vlan_stripped;
@@ -929,7 +932,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
929 if (net_ratelimit()) 932 if (net_ratelimit())
930 printk(KERN_ERR PFX 933 printk(KERN_ERR PFX
931 "%s: packet error: bad FCS\n", 934 "%s: packet error: bad FCS\n",
932 enic->netdev->name); 935 netdev->name);
933 } 936 }
934 937
935 dev_kfree_skb_any(skb); 938 dev_kfree_skb_any(skb);
@@ -943,19 +946,18 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
943 */ 946 */
944 947
945 skb_put(skb, bytes_written); 948 skb_put(skb, bytes_written);
946 skb->protocol = eth_type_trans(skb, enic->netdev); 949 skb->protocol = eth_type_trans(skb, netdev);
947 950
948 if (enic->csum_rx_enabled && !csum_not_calc) { 951 if (enic->csum_rx_enabled && !csum_not_calc) {
949 skb->csum = htons(checksum); 952 skb->csum = htons(checksum);
950 skb->ip_summed = CHECKSUM_COMPLETE; 953 skb->ip_summed = CHECKSUM_COMPLETE;
951 } 954 }
952 955
953 skb->dev = enic->netdev; 956 skb->dev = netdev;
954 enic->netdev->last_rx = jiffies;
955 957
956 if (enic->vlan_group && vlan_stripped) { 958 if (enic->vlan_group && vlan_stripped) {
957 959
958 if (ENIC_SETTING(enic, LRO) && ipv4) 960 if ((netdev->features & NETIF_F_LRO) && ipv4)
959 lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, 961 lro_vlan_hwaccel_receive_skb(&enic->lro_mgr,
960 skb, enic->vlan_group, 962 skb, enic->vlan_group,
961 vlan, cq_desc); 963 vlan, cq_desc);
@@ -965,7 +967,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
965 967
966 } else { 968 } else {
967 969
968 if (ENIC_SETTING(enic, LRO) && ipv4) 970 if ((netdev->features & NETIF_F_LRO) && ipv4)
969 lro_receive_skb(&enic->lro_mgr, skb, cq_desc); 971 lro_receive_skb(&enic->lro_mgr, skb, cq_desc);
970 else 972 else
971 netif_receive_skb(skb); 973 netif_receive_skb(skb);
@@ -1063,10 +1065,10 @@ static int enic_poll(struct napi_struct *napi, int budget)
1063 /* If no work done, flush all LROs and exit polling 1065 /* If no work done, flush all LROs and exit polling
1064 */ 1066 */
1065 1067
1066 if (ENIC_SETTING(enic, LRO)) 1068 if (netdev->features & NETIF_F_LRO)
1067 lro_flush_all(&enic->lro_mgr); 1069 lro_flush_all(&enic->lro_mgr);
1068 1070
1069 netif_rx_complete(netdev, napi); 1071 netif_rx_complete(napi);
1070 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); 1072 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1071 } 1073 }
1072 1074
@@ -1107,10 +1109,10 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1107 /* If no work done, flush all LROs and exit polling 1109 /* If no work done, flush all LROs and exit polling
1108 */ 1110 */
1109 1111
1110 if (ENIC_SETTING(enic, LRO)) 1112 if (netdev->features & NETIF_F_LRO)
1111 lro_flush_all(&enic->lro_mgr); 1113 lro_flush_all(&enic->lro_mgr);
1112 1114
1113 netif_rx_complete(netdev, napi); 1115 netif_rx_complete(napi);
1114 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); 1116 vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
1115 } 1117 }
1116 1118
@@ -1591,6 +1593,23 @@ static void enic_iounmap(struct enic *enic)
1591 iounmap(enic->bar0.vaddr); 1593 iounmap(enic->bar0.vaddr);
1592} 1594}
1593 1595
1596static const struct net_device_ops enic_netdev_ops = {
1597 .ndo_open = enic_open,
1598 .ndo_stop = enic_stop,
1599 .ndo_start_xmit = enic_hard_start_xmit,
1600 .ndo_get_stats = enic_get_stats,
1601 .ndo_validate_addr = eth_validate_addr,
1602 .ndo_set_multicast_list = enic_set_multicast_list,
1603 .ndo_change_mtu = enic_change_mtu,
1604 .ndo_vlan_rx_register = enic_vlan_rx_register,
1605 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
1606 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
1607 .ndo_tx_timeout = enic_tx_timeout,
1608#ifdef CONFIG_NET_POLL_CONTROLLER
1609 .ndo_poll_controller = enic_poll_controller,
1610#endif
1611};
1612
1594static int __devinit enic_probe(struct pci_dev *pdev, 1613static int __devinit enic_probe(struct pci_dev *pdev,
1595 const struct pci_device_id *ent) 1614 const struct pci_device_id *ent)
1596{ 1615{
@@ -1746,13 +1765,13 @@ static int __devinit enic_probe(struct pci_dev *pdev,
1746 } 1765 }
1747 1766
1748 /* Get available resource counts 1767 /* Get available resource counts
1749 */ 1768 */
1750 1769
1751 enic_get_res_counts(enic); 1770 enic_get_res_counts(enic);
1752 1771
1753 /* Set interrupt mode based on resource counts and system 1772 /* Set interrupt mode based on resource counts and system
1754 * capabilities 1773 * capabilities
1755 */ 1774 */
1756 1775
1757 err = enic_set_intr_mode(enic); 1776 err = enic_set_intr_mode(enic);
1758 if (err) { 1777 if (err) {
@@ -1814,21 +1833,9 @@ static int __devinit enic_probe(struct pci_dev *pdev,
1814 goto err_out_free_vnic_resources; 1833 goto err_out_free_vnic_resources;
1815 } 1834 }
1816 1835
1817 netdev->open = enic_open; 1836 netdev->netdev_ops = &enic_netdev_ops;
1818 netdev->stop = enic_stop;
1819 netdev->hard_start_xmit = enic_hard_start_xmit;
1820 netdev->get_stats = enic_get_stats;
1821 netdev->set_multicast_list = enic_set_multicast_list;
1822 netdev->change_mtu = enic_change_mtu;
1823 netdev->vlan_rx_register = enic_vlan_rx_register;
1824 netdev->vlan_rx_add_vid = enic_vlan_rx_add_vid;
1825 netdev->vlan_rx_kill_vid = enic_vlan_rx_kill_vid;
1826 netdev->tx_timeout = enic_tx_timeout;
1827 netdev->watchdog_timeo = 2 * HZ; 1837 netdev->watchdog_timeo = 2 * HZ;
1828 netdev->ethtool_ops = &enic_ethtool_ops; 1838 netdev->ethtool_ops = &enic_ethtool_ops;
1829#ifdef CONFIG_NET_POLL_CONTROLLER
1830 netdev->poll_controller = enic_poll_controller;
1831#endif
1832 1839
1833 switch (vnic_dev_get_intr_mode(enic->vdev)) { 1840 switch (vnic_dev_get_intr_mode(enic->vdev)) {
1834 default: 1841 default:
@@ -1845,22 +1852,23 @@ static int __devinit enic_probe(struct pci_dev *pdev,
1845 if (ENIC_SETTING(enic, TSO)) 1852 if (ENIC_SETTING(enic, TSO))
1846 netdev->features |= NETIF_F_TSO | 1853 netdev->features |= NETIF_F_TSO |
1847 NETIF_F_TSO6 | NETIF_F_TSO_ECN; 1854 NETIF_F_TSO6 | NETIF_F_TSO_ECN;
1855 if (ENIC_SETTING(enic, LRO))
1856 netdev->features |= NETIF_F_LRO;
1848 if (using_dac) 1857 if (using_dac)
1849 netdev->features |= NETIF_F_HIGHDMA; 1858 netdev->features |= NETIF_F_HIGHDMA;
1850 1859
1851 1860
1852 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); 1861 enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
1853 1862
1854 if (ENIC_SETTING(enic, LRO)) { 1863 enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR;
1855 enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; 1864 enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC;
1856 enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC; 1865 enic->lro_mgr.lro_arr = enic->lro_desc;
1857 enic->lro_mgr.lro_arr = enic->lro_desc; 1866 enic->lro_mgr.get_skb_header = enic_get_skb_header;
1858 enic->lro_mgr.get_skb_header = enic_get_skb_header; 1867 enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1859 enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; 1868 enic->lro_mgr.dev = netdev;
1860 enic->lro_mgr.dev = netdev; 1869 enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE;
1861 enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; 1870 enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1862 enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; 1871
1863 }
1864 1872
1865 err = register_netdev(netdev); 1873 err = register_netdev(netdev);
1866 if (err) { 1874 if (err) {
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 95184b9108ef..e5fc9384f8f5 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -90,11 +90,8 @@ int enic_get_vnic_config(struct enic *enic)
90 90
91 c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer); 91 c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
92 92
93 printk(KERN_INFO PFX "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " 93 printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n",
94 "wq/rq %d/%d\n", 94 enic->mac_addr, c->wq_desc_count, c->rq_desc_count);
95 enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
96 enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
97 c->wq_desc_count, c->rq_desc_count);
98 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d " 95 printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
99 "intr timer %d\n", 96 "intr timer %d\n",
100 c->mtu, ENIC_SETTING(enic, TXCSUM), 97 c->mtu, ENIC_SETTING(enic, TXCSUM),
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 68534a29b7ac..7bf272fa859b 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -58,8 +58,6 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
58 (u16)vlan_tag, 58 (u16)vlan_tag,
59 0 /* loopback */); 59 0 /* loopback */);
60 60
61 wmb();
62
63 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); 61 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
64} 62}
65 63
@@ -127,8 +125,6 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
127 (u64)dma_addr | VNIC_PADDR_TARGET, 125 (u64)dma_addr | VNIC_PADDR_TARGET,
128 type, (u16)len); 126 type, (u16)len);
129 127
130 wmb();
131
132 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len); 128 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
133} 129}
134 130
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 4d104f5c30f9..11708579b6ce 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -43,6 +43,7 @@ struct vnic_dev {
43 struct vnic_devcmd_notify *notify; 43 struct vnic_devcmd_notify *notify;
44 struct vnic_devcmd_notify notify_copy; 44 struct vnic_devcmd_notify notify_copy;
45 dma_addr_t notify_pa; 45 dma_addr_t notify_pa;
46 u32 notify_sz;
46 u32 *linkstatus; 47 u32 *linkstatus;
47 dma_addr_t linkstatus_pa; 48 dma_addr_t linkstatus_pa;
48 struct vnic_stats *stats; 49 struct vnic_stats *stats;
@@ -235,14 +236,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
235 struct vnic_devcmd __iomem *devcmd = vdev->devcmd; 236 struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
236 int delay; 237 int delay;
237 u32 status; 238 u32 status;
238 int dev_cmd_err[] = {
239 /* convert from fw's version of error.h to host's version */
240 0, /* ERR_SUCCESS */
241 EINVAL, /* ERR_EINVAL */
242 EFAULT, /* ERR_EFAULT */
243 EPERM, /* ERR_EPERM */
244 EBUSY, /* ERR_EBUSY */
245 };
246 int err; 239 int err;
247 240
248 status = ioread32(&devcmd->status); 241 status = ioread32(&devcmd->status);
@@ -270,10 +263,12 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
270 if (!(status & STAT_BUSY)) { 263 if (!(status & STAT_BUSY)) {
271 264
272 if (status & STAT_ERROR) { 265 if (status & STAT_ERROR) {
273 err = dev_cmd_err[(int)readq(&devcmd->args[0])]; 266 err = (int)readq(&devcmd->args[0]);
274 printk(KERN_ERR "Error %d devcmd %d\n", 267 if (err != ERR_ECMDUNKNOWN ||
275 err, _CMD_N(cmd)); 268 cmd != CMD_CAPABILITY)
276 return -err; 269 printk(KERN_ERR "Error %d devcmd %d\n",
270 err, _CMD_N(cmd));
271 return err;
277 } 272 }
278 273
279 if (_CMD_DIR(cmd) & _CMD_DIR_READ) { 274 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
@@ -290,6 +285,17 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
290 return -ETIMEDOUT; 285 return -ETIMEDOUT;
291} 286}
292 287
288static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
289{
290 u64 a0 = (u32)cmd, a1 = 0;
291 int wait = 1000;
292 int err;
293
294 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
295
296 return !(err || a0);
297}
298
293int vnic_dev_fw_info(struct vnic_dev *vdev, 299int vnic_dev_fw_info(struct vnic_dev *vdev,
294 struct vnic_devcmd_fw_info **fw_info) 300 struct vnic_devcmd_fw_info **fw_info)
295{ 301{
@@ -489,10 +495,7 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
489 495
490 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 496 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
491 if (err) 497 if (err)
492 printk(KERN_ERR 498 printk(KERN_ERR "Can't add addr [%pM], %d\n", addr, err);
493 "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
494 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
495 err);
496} 499}
497 500
498void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) 501void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
@@ -507,16 +510,14 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
507 510
508 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); 511 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
509 if (err) 512 if (err)
510 printk(KERN_ERR 513 printk(KERN_ERR "Can't del addr [%pM], %d\n", addr, err);
511 "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
512 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
513 err);
514} 514}
515 515
516int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) 516int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
517{ 517{
518 u64 a0, a1; 518 u64 a0, a1;
519 int wait = 1000; 519 int wait = 1000;
520 int r;
520 521
521 if (!vdev->notify) { 522 if (!vdev->notify) {
522 vdev->notify = pci_alloc_consistent(vdev->pdev, 523 vdev->notify = pci_alloc_consistent(vdev->pdev,
@@ -524,13 +525,16 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
524 &vdev->notify_pa); 525 &vdev->notify_pa);
525 if (!vdev->notify) 526 if (!vdev->notify)
526 return -ENOMEM; 527 return -ENOMEM;
528 memset(vdev->notify, 0, sizeof(struct vnic_devcmd_notify));
527 } 529 }
528 530
529 a0 = vdev->notify_pa; 531 a0 = vdev->notify_pa;
530 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; 532 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
531 a1 += sizeof(struct vnic_devcmd_notify); 533 a1 += sizeof(struct vnic_devcmd_notify);
532 534
533 return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 535 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
536 vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
537 return r;
534} 538}
535 539
536void vnic_dev_notify_unset(struct vnic_dev *vdev) 540void vnic_dev_notify_unset(struct vnic_dev *vdev)
@@ -543,22 +547,22 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev)
543 a1 += sizeof(struct vnic_devcmd_notify); 547 a1 += sizeof(struct vnic_devcmd_notify);
544 548
545 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 549 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
550 vdev->notify_sz = 0;
546} 551}
547 552
548static int vnic_dev_notify_ready(struct vnic_dev *vdev) 553static int vnic_dev_notify_ready(struct vnic_dev *vdev)
549{ 554{
550 u32 *words; 555 u32 *words;
551 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4; 556 unsigned int nwords = vdev->notify_sz / 4;
552 unsigned int i; 557 unsigned int i;
553 u32 csum; 558 u32 csum;
554 559
555 if (!vdev->notify) 560 if (!vdev->notify || !vdev->notify_sz)
556 return 0; 561 return 0;
557 562
558 do { 563 do {
559 csum = 0; 564 csum = 0;
560 memcpy(&vdev->notify_copy, vdev->notify, 565 memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
561 sizeof(struct vnic_devcmd_notify));
562 words = (u32 *)&vdev->notify_copy; 566 words = (u32 *)&vdev->notify_copy;
563 for (i = 1; i < nwords; i++) 567 for (i = 1; i < nwords; i++)
564 csum += words[i]; 568 csum += words[i];
@@ -571,7 +575,20 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
571{ 575{
572 u64 a0 = (u32)arg, a1 = 0; 576 u64 a0 = (u32)arg, a1 = 0;
573 int wait = 1000; 577 int wait = 1000;
574 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 578 int r = 0;
579
580 if (vnic_dev_capable(vdev, CMD_INIT))
581 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
582 else {
583 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
584 if (a0 & CMD_INITF_DEFAULT_MAC) {
585 // Emulate these for old CMD_INIT_v1 which
586 // didn't pass a0 so no CMD_INITF_*.
587 vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait);
588 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
589 }
590 }
591 return r;
575} 592}
576 593
577int vnic_dev_link_status(struct vnic_dev *vdev) 594int vnic_dev_link_status(struct vnic_dev *vdev)
@@ -672,3 +689,4 @@ err_out:
672 return NULL; 689 return NULL;
673} 690}
674 691
692
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index d8617a3373b1..8062c75154e6 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -168,7 +168,8 @@ enum vnic_devcmd_cmd {
168 CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25), 168 CMD_CLOSE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 25),
169 169
170 /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */ 170 /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
171 CMD_INIT = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26), 171/***** Replaced by CMD_INIT *****/
172 CMD_INIT_v1 = _CMDCNW(_CMD_DIR_READ, _CMD_VTYPE_ALL, 26),
172 173
173 /* variant of CMD_INIT, with provisioning info 174 /* variant of CMD_INIT, with provisioning info
174 * (u64)a0=paddr of vnic_devcmd_provinfo 175 * (u64)a0=paddr of vnic_devcmd_provinfo
@@ -198,6 +199,14 @@ enum vnic_devcmd_cmd {
198 199
199 /* undo initialize of virtual link */ 200 /* undo initialize of virtual link */
200 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), 201 CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34),
202
203 /* initialize virtual link: (u32)a0=flags (see CMD_INITF_*) */
204 CMD_INIT = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 35),
205
206 /* check fw capability of a cmd:
207 * in: (u32)a0=cmd
208 * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */
209 CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36),
201}; 210};
202 211
203/* flags for CMD_OPEN */ 212/* flags for CMD_OPEN */
@@ -249,8 +258,16 @@ struct vnic_devcmd_notify {
249 u32 uif; /* uplink interface */ 258 u32 uif; /* uplink interface */
250 u32 status; /* status bits (see VNIC_STF_*) */ 259 u32 status; /* status bits (see VNIC_STF_*) */
251 u32 error; /* error code (see ERR_*) for first ERR */ 260 u32 error; /* error code (see ERR_*) for first ERR */
261 u32 link_down_cnt; /* running count of link down transitions */
252}; 262};
253#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */ 263#define VNIC_STF_FATAL_ERR 0x0001 /* fatal fw error */
264#define VNIC_STF_STD_PAUSE 0x0002 /* standard link-level pause on */
265#define VNIC_STF_PFC_PAUSE 0x0004 /* priority flow control pause on */
266/* all supported status flags */
267#define VNIC_STF_ALL (VNIC_STF_FATAL_ERR |\
268 VNIC_STF_STD_PAUSE |\
269 VNIC_STF_PFC_PAUSE |\
270 0)
254 271
255struct vnic_devcmd_provinfo { 272struct vnic_devcmd_provinfo {
256 u8 oui[3]; 273 u8 oui[3];
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index ccc408116af8..ce633a5a7e3c 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -78,7 +78,7 @@ static inline void vnic_intr_return_credits(struct vnic_intr *intr,
78 78
79static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba) 79static inline u32 vnic_intr_legacy_pba(u32 __iomem *legacy_pba)
80{ 80{
81 /* get and ack interrupt in one read (clear-and-ack-on-read) */ 81 /* read PBA without clearing */
82 return ioread32(legacy_pba); 82 return ioread32(legacy_pba);
83} 83}
84 84
diff --git a/drivers/net/enic/vnic_resource.h b/drivers/net/enic/vnic_resource.h
index 144d2812f081..b61c22aec41a 100644
--- a/drivers/net/enic/vnic_resource.h
+++ b/drivers/net/enic/vnic_resource.h
@@ -38,7 +38,7 @@ enum vnic_res_type {
38 RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */ 38 RES_TYPE_INTR_CTRL, /* Interrupt ctrl table */
39 RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */ 39 RES_TYPE_INTR_TABLE, /* MSI/MSI-X Interrupt table */
40 RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */ 40 RES_TYPE_INTR_PBA, /* MSI/MSI-X PBA table */
41 RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status, r2c */ 41 RES_TYPE_INTR_PBA_LEGACY, /* Legacy intr status */
42 RES_TYPE_RSVD6, 42 RES_TYPE_RSVD6,
43 RES_TYPE_RSVD7, 43 RES_TYPE_RSVD7,
44 RES_TYPE_DEVCMD, /* Device command region */ 44 RES_TYPE_DEVCMD, /* Device command region */
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 82bfca67cc4d..fd0ef66d2e9f 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -132,8 +132,15 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
132#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */ 132#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
133#endif 133#endif
134 134
135 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) 135 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
136 /* Adding write memory barrier prevents compiler and/or CPU
137 * reordering, thus avoiding descriptor posting before
138 * descriptor is initialized. Otherwise, hardware can read
139 * stale descriptor fields.
140 */
141 wmb();
136 iowrite32(buf->index, &rq->ctrl->posted_index); 142 iowrite32(buf->index, &rq->ctrl->posted_index);
143 }
137} 144}
138 145
139static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) 146static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
diff --git a/drivers/net/enic/vnic_rss.h b/drivers/net/enic/vnic_rss.h
index e325d65d7c34..5fbb3c923bcd 100644
--- a/drivers/net/enic/vnic_rss.h
+++ b/drivers/net/enic/vnic_rss.h
@@ -1,6 +1,19 @@
1/* 1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
4 */ 17 */
5 18
6#ifndef _VNIC_RSS_H_ 19#ifndef _VNIC_RSS_H_
diff --git a/drivers/net/enic/vnic_wq.h b/drivers/net/enic/vnic_wq.h
index 7081828d8a42..c826137dc651 100644
--- a/drivers/net/enic/vnic_wq.h
+++ b/drivers/net/enic/vnic_wq.h
@@ -108,8 +108,15 @@ static inline void vnic_wq_post(struct vnic_wq *wq,
108 buf->len = len; 108 buf->len = len;
109 109
110 buf = buf->next; 110 buf = buf->next;
111 if (eop) 111 if (eop) {
112 /* Adding write memory barrier prevents compiler and/or CPU
113 * reordering, thus avoiding descriptor posting before
114 * descriptor is initialized. Otherwise, hardware can read
115 * stale descriptor fields.
116 */
117 wmb();
112 iowrite32(buf->index, &wq->ctrl->posted_index); 118 iowrite32(buf->index, &wq->ctrl->posted_index);
119 }
113 wq->to_use = buf; 120 wq->to_use = buf;
114 121
115 wq->ring.desc_avail--; 122 wq->ring.desc_avail--;
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 76118ddd1042..f9b37c80dda6 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -322,7 +322,6 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
322 int i, ret, option = 0, duplex = 0; 322 int i, ret, option = 0, duplex = 0;
323 void *ring_space; 323 void *ring_space;
324 dma_addr_t ring_dma; 324 dma_addr_t ring_dma;
325 DECLARE_MAC_BUF(mac);
326 325
327/* when built into the kernel, we only print version if device is found */ 326/* when built into the kernel, we only print version if device is found */
328#ifndef MODULE 327#ifndef MODULE
@@ -364,7 +363,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
364 ioaddr = pci_resource_start (pdev, 0); 363 ioaddr = pci_resource_start (pdev, 0);
365#else 364#else
366 ioaddr = pci_resource_start (pdev, 1); 365 ioaddr = pci_resource_start (pdev, 1);
367 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1)); 366 ioaddr = (long) pci_ioremap_bar(pdev, 1);
368 if (!ioaddr) { 367 if (!ioaddr) {
369 dev_err(&pdev->dev, "ioremap failed\n"); 368 dev_err(&pdev->dev, "ioremap failed\n");
370 goto err_out_free_netdev; 369 goto err_out_free_netdev;
@@ -372,7 +371,7 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
372#endif 371#endif
373 372
374 pci_set_drvdata(pdev, dev); 373 pci_set_drvdata(pdev, dev);
375 ep = dev->priv; 374 ep = netdev_priv(dev);
376 ep->mii.dev = dev; 375 ep->mii.dev = dev;
377 ep->mii.mdio_read = mdio_read; 376 ep->mii.mdio_read = mdio_read;
378 ep->mii.mdio_write = mdio_write; 377 ep->mii.mdio_write = mdio_write;
@@ -499,9 +498,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
499 if (ret < 0) 498 if (ret < 0)
500 goto err_out_unmap_rx; 499 goto err_out_unmap_rx;
501 500
502 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %s\n", 501 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
503 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq, 502 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq,
504 print_mac(mac, dev->dev_addr)); 503 dev->dev_addr);
505 504
506out: 505out:
507 return ret; 506 return ret;
@@ -655,7 +654,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
655 654
656static int epic_open(struct net_device *dev) 655static int epic_open(struct net_device *dev)
657{ 656{
658 struct epic_private *ep = dev->priv; 657 struct epic_private *ep = netdev_priv(dev);
659 long ioaddr = dev->base_addr; 658 long ioaddr = dev->base_addr;
660 int i; 659 int i;
661 int retval; 660 int retval;
@@ -767,7 +766,7 @@ static int epic_open(struct net_device *dev)
767static void epic_pause(struct net_device *dev) 766static void epic_pause(struct net_device *dev)
768{ 767{
769 long ioaddr = dev->base_addr; 768 long ioaddr = dev->base_addr;
770 struct epic_private *ep = dev->priv; 769 struct epic_private *ep = netdev_priv(dev);
771 770
772 netif_stop_queue (dev); 771 netif_stop_queue (dev);
773 772
@@ -790,7 +789,7 @@ static void epic_pause(struct net_device *dev)
790static void epic_restart(struct net_device *dev) 789static void epic_restart(struct net_device *dev)
791{ 790{
792 long ioaddr = dev->base_addr; 791 long ioaddr = dev->base_addr;
793 struct epic_private *ep = dev->priv; 792 struct epic_private *ep = netdev_priv(dev);
794 int i; 793 int i;
795 794
796 /* Soft reset the chip. */ 795 /* Soft reset the chip. */
@@ -842,7 +841,7 @@ static void epic_restart(struct net_device *dev)
842 841
843static void check_media(struct net_device *dev) 842static void check_media(struct net_device *dev)
844{ 843{
845 struct epic_private *ep = dev->priv; 844 struct epic_private *ep = netdev_priv(dev);
846 long ioaddr = dev->base_addr; 845 long ioaddr = dev->base_addr;
847 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; 846 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
848 int negotiated = mii_lpa & ep->mii.advertising; 847 int negotiated = mii_lpa & ep->mii.advertising;
@@ -864,7 +863,7 @@ static void check_media(struct net_device *dev)
864static void epic_timer(unsigned long data) 863static void epic_timer(unsigned long data)
865{ 864{
866 struct net_device *dev = (struct net_device *)data; 865 struct net_device *dev = (struct net_device *)data;
867 struct epic_private *ep = dev->priv; 866 struct epic_private *ep = netdev_priv(dev);
868 long ioaddr = dev->base_addr; 867 long ioaddr = dev->base_addr;
869 int next_tick = 5*HZ; 868 int next_tick = 5*HZ;
870 869
@@ -885,7 +884,7 @@ static void epic_timer(unsigned long data)
885 884
886static void epic_tx_timeout(struct net_device *dev) 885static void epic_tx_timeout(struct net_device *dev)
887{ 886{
888 struct epic_private *ep = dev->priv; 887 struct epic_private *ep = netdev_priv(dev);
889 long ioaddr = dev->base_addr; 888 long ioaddr = dev->base_addr;
890 889
891 if (debug > 0) { 890 if (debug > 0) {
@@ -914,7 +913,7 @@ static void epic_tx_timeout(struct net_device *dev)
914/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 913/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
915static void epic_init_ring(struct net_device *dev) 914static void epic_init_ring(struct net_device *dev)
916{ 915{
917 struct epic_private *ep = dev->priv; 916 struct epic_private *ep = netdev_priv(dev);
918 int i; 917 int i;
919 918
920 ep->tx_full = 0; 919 ep->tx_full = 0;
@@ -960,7 +959,7 @@ static void epic_init_ring(struct net_device *dev)
960 959
961static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev) 960static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
962{ 961{
963 struct epic_private *ep = dev->priv; 962 struct epic_private *ep = netdev_priv(dev);
964 int entry, free_count; 963 int entry, free_count;
965 u32 ctrl_word; 964 u32 ctrl_word;
966 unsigned long flags; 965 unsigned long flags;
@@ -1088,7 +1087,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
1088static irqreturn_t epic_interrupt(int irq, void *dev_instance) 1087static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1089{ 1088{
1090 struct net_device *dev = dev_instance; 1089 struct net_device *dev = dev_instance;
1091 struct epic_private *ep = dev->priv; 1090 struct epic_private *ep = netdev_priv(dev);
1092 long ioaddr = dev->base_addr; 1091 long ioaddr = dev->base_addr;
1093 unsigned int handled = 0; 1092 unsigned int handled = 0;
1094 int status; 1093 int status;
@@ -1110,9 +1109,9 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1110 1109
1111 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) { 1110 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1112 spin_lock(&ep->napi_lock); 1111 spin_lock(&ep->napi_lock);
1113 if (netif_rx_schedule_prep(dev, &ep->napi)) { 1112 if (netif_rx_schedule_prep(&ep->napi)) {
1114 epic_napi_irq_off(dev, ep); 1113 epic_napi_irq_off(dev, ep);
1115 __netif_rx_schedule(dev, &ep->napi); 1114 __netif_rx_schedule(&ep->napi);
1116 } else 1115 } else
1117 ep->reschedule_in_poll++; 1116 ep->reschedule_in_poll++;
1118 spin_unlock(&ep->napi_lock); 1117 spin_unlock(&ep->napi_lock);
@@ -1156,7 +1155,7 @@ out:
1156 1155
1157static int epic_rx(struct net_device *dev, int budget) 1156static int epic_rx(struct net_device *dev, int budget)
1158{ 1157{
1159 struct epic_private *ep = dev->priv; 1158 struct epic_private *ep = netdev_priv(dev);
1160 int entry = ep->cur_rx % RX_RING_SIZE; 1159 int entry = ep->cur_rx % RX_RING_SIZE;
1161 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx; 1160 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1162 int work_done = 0; 1161 int work_done = 0;
@@ -1223,7 +1222,6 @@ static int epic_rx(struct net_device *dev, int budget)
1223 } 1222 }
1224 skb->protocol = eth_type_trans(skb, dev); 1223 skb->protocol = eth_type_trans(skb, dev);
1225 netif_receive_skb(skb); 1224 netif_receive_skb(skb);
1226 dev->last_rx = jiffies;
1227 ep->stats.rx_packets++; 1225 ep->stats.rx_packets++;
1228 ep->stats.rx_bytes += pkt_len; 1226 ep->stats.rx_bytes += pkt_len;
1229 } 1227 }
@@ -1290,7 +1288,7 @@ rx_action:
1290 1288
1291 more = ep->reschedule_in_poll; 1289 more = ep->reschedule_in_poll;
1292 if (!more) { 1290 if (!more) {
1293 __netif_rx_complete(dev, napi); 1291 __netif_rx_complete(napi);
1294 outl(EpicNapiEvent, ioaddr + INTSTAT); 1292 outl(EpicNapiEvent, ioaddr + INTSTAT);
1295 epic_napi_irq_on(dev, ep); 1293 epic_napi_irq_on(dev, ep);
1296 } else 1294 } else
@@ -1308,7 +1306,7 @@ rx_action:
1308static int epic_close(struct net_device *dev) 1306static int epic_close(struct net_device *dev)
1309{ 1307{
1310 long ioaddr = dev->base_addr; 1308 long ioaddr = dev->base_addr;
1311 struct epic_private *ep = dev->priv; 1309 struct epic_private *ep = netdev_priv(dev);
1312 struct sk_buff *skb; 1310 struct sk_buff *skb;
1313 int i; 1311 int i;
1314 1312
@@ -1358,7 +1356,7 @@ static int epic_close(struct net_device *dev)
1358 1356
1359static struct net_device_stats *epic_get_stats(struct net_device *dev) 1357static struct net_device_stats *epic_get_stats(struct net_device *dev)
1360{ 1358{
1361 struct epic_private *ep = dev->priv; 1359 struct epic_private *ep = netdev_priv(dev);
1362 long ioaddr = dev->base_addr; 1360 long ioaddr = dev->base_addr;
1363 1361
1364 if (netif_running(dev)) { 1362 if (netif_running(dev)) {
@@ -1379,7 +1377,7 @@ static struct net_device_stats *epic_get_stats(struct net_device *dev)
1379static void set_rx_mode(struct net_device *dev) 1377static void set_rx_mode(struct net_device *dev)
1380{ 1378{
1381 long ioaddr = dev->base_addr; 1379 long ioaddr = dev->base_addr;
1382 struct epic_private *ep = dev->priv; 1380 struct epic_private *ep = netdev_priv(dev);
1383 unsigned char mc_filter[8]; /* Multicast hash filter */ 1381 unsigned char mc_filter[8]; /* Multicast hash filter */
1384 int i; 1382 int i;
1385 1383
@@ -1418,7 +1416,7 @@ static void set_rx_mode(struct net_device *dev)
1418 1416
1419static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) 1417static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1420{ 1418{
1421 struct epic_private *np = dev->priv; 1419 struct epic_private *np = netdev_priv(dev);
1422 1420
1423 strcpy (info->driver, DRV_NAME); 1421 strcpy (info->driver, DRV_NAME);
1424 strcpy (info->version, DRV_VERSION); 1422 strcpy (info->version, DRV_VERSION);
@@ -1427,7 +1425,7 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
1427 1425
1428static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1426static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1429{ 1427{
1430 struct epic_private *np = dev->priv; 1428 struct epic_private *np = netdev_priv(dev);
1431 int rc; 1429 int rc;
1432 1430
1433 spin_lock_irq(&np->lock); 1431 spin_lock_irq(&np->lock);
@@ -1439,7 +1437,7 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1439 1437
1440static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1438static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1441{ 1439{
1442 struct epic_private *np = dev->priv; 1440 struct epic_private *np = netdev_priv(dev);
1443 int rc; 1441 int rc;
1444 1442
1445 spin_lock_irq(&np->lock); 1443 spin_lock_irq(&np->lock);
@@ -1451,13 +1449,13 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1451 1449
1452static int netdev_nway_reset(struct net_device *dev) 1450static int netdev_nway_reset(struct net_device *dev)
1453{ 1451{
1454 struct epic_private *np = dev->priv; 1452 struct epic_private *np = netdev_priv(dev);
1455 return mii_nway_restart(&np->mii); 1453 return mii_nway_restart(&np->mii);
1456} 1454}
1457 1455
1458static u32 netdev_get_link(struct net_device *dev) 1456static u32 netdev_get_link(struct net_device *dev)
1459{ 1457{
1460 struct epic_private *np = dev->priv; 1458 struct epic_private *np = netdev_priv(dev);
1461 return mii_link_ok(&np->mii); 1459 return mii_link_ok(&np->mii);
1462} 1460}
1463 1461
@@ -1506,7 +1504,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1506 1504
1507static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1505static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1508{ 1506{
1509 struct epic_private *np = dev->priv; 1507 struct epic_private *np = netdev_priv(dev);
1510 long ioaddr = dev->base_addr; 1508 long ioaddr = dev->base_addr;
1511 struct mii_ioctl_data *data = if_mii(rq); 1509 struct mii_ioctl_data *data = if_mii(rq);
1512 int rc; 1510 int rc;
@@ -1534,7 +1532,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1534static void __devexit epic_remove_one (struct pci_dev *pdev) 1532static void __devexit epic_remove_one (struct pci_dev *pdev)
1535{ 1533{
1536 struct net_device *dev = pci_get_drvdata(pdev); 1534 struct net_device *dev = pci_get_drvdata(pdev);
1537 struct epic_private *ep = dev->priv; 1535 struct epic_private *ep = netdev_priv(dev);
1538 1536
1539 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); 1537 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1540 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); 1538 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 18f1364d3d5b..40125694bd9f 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -162,6 +162,13 @@ static void eql_timer(unsigned long param)
162static char version[] __initdata = 162static char version[] __initdata =
163 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n"; 163 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n";
164 164
165static const struct net_device_ops eql_netdev_ops = {
166 .ndo_open = eql_open,
167 .ndo_stop = eql_close,
168 .ndo_do_ioctl = eql_ioctl,
169 .ndo_start_xmit = eql_slave_xmit,
170};
171
165static void __init eql_setup(struct net_device *dev) 172static void __init eql_setup(struct net_device *dev)
166{ 173{
167 equalizer_t *eql = netdev_priv(dev); 174 equalizer_t *eql = netdev_priv(dev);
@@ -175,10 +182,7 @@ static void __init eql_setup(struct net_device *dev)
175 INIT_LIST_HEAD(&eql->queue.all_slaves); 182 INIT_LIST_HEAD(&eql->queue.all_slaves);
176 eql->queue.master_dev = dev; 183 eql->queue.master_dev = dev;
177 184
178 dev->open = eql_open; 185 dev->netdev_ops = &eql_netdev_ops;
179 dev->stop = eql_close;
180 dev->do_ioctl = eql_ioctl;
181 dev->hard_start_xmit = eql_slave_xmit;
182 186
183 /* 187 /*
184 * Now we undo some of the things that eth_setup does 188 * Now we undo some of the things that eth_setup does
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index deefa51b8c31..5569f2ffb62c 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -64,9 +64,6 @@ static const char version[] =
64 64
65static int es_probe1(struct net_device *dev, int ioaddr); 65static int es_probe1(struct net_device *dev, int ioaddr);
66 66
67static int es_open(struct net_device *dev);
68static int es_close(struct net_device *dev);
69
70static void es_reset_8390(struct net_device *dev); 67static void es_reset_8390(struct net_device *dev);
71 68
72static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); 69static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
@@ -179,7 +176,6 @@ static int __init es_probe1(struct net_device *dev, int ioaddr)
179{ 176{
180 int i, retval; 177 int i, retval;
181 unsigned long eisa_id; 178 unsigned long eisa_id;
182 DECLARE_MAC_BUF(mac);
183 179
184 if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210")) 180 if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210"))
185 return -ENODEV; 181 return -ENODEV;
@@ -205,14 +201,14 @@ static int __init es_probe1(struct net_device *dev, int ioaddr)
205 if (dev->dev_addr[0] != ES_ADDR0 || 201 if (dev->dev_addr[0] != ES_ADDR0 ||
206 dev->dev_addr[1] != ES_ADDR1 || 202 dev->dev_addr[1] != ES_ADDR1 ||
207 dev->dev_addr[2] != ES_ADDR2) { 203 dev->dev_addr[2] != ES_ADDR2) {
208 printk("es3210.c: card not found %s (invalid_prefix).\n", 204 printk("es3210.c: card not found %pM (invalid_prefix).\n",
209 print_mac(mac, dev->dev_addr)); 205 dev->dev_addr);
210 retval = -ENODEV; 206 retval = -ENODEV;
211 goto out; 207 goto out;
212 } 208 }
213 209
214 printk("es3210.c: ES3210 rev. %ld at %#x, node %s", 210 printk("es3210.c: ES3210 rev. %ld at %#x, node %pM",
215 eisa_id>>24, ioaddr, print_mac(mac, dev->dev_addr)); 211 eisa_id>>24, ioaddr, dev->dev_addr);
216 212
217 /* Snarf the interrupt now. */ 213 /* Snarf the interrupt now. */
218 if (dev->irq == 0) { 214 if (dev->irq == 0) {
@@ -290,11 +286,7 @@ static int __init es_probe1(struct net_device *dev, int ioaddr)
290 ei_status.block_output = &es_block_output; 286 ei_status.block_output = &es_block_output;
291 ei_status.get_8390_hdr = &es_get_8390_hdr; 287 ei_status.get_8390_hdr = &es_get_8390_hdr;
292 288
293 dev->open = &es_open; 289 dev->netdev_ops = &ei_netdev_ops;
294 dev->stop = &es_close;
295#ifdef CONFIG_NET_POLL_CONTROLLER
296 dev->poll_controller = ei_poll;
297#endif
298 NS8390_init(dev, 0); 290 NS8390_init(dev, 0);
299 291
300 retval = register_netdev(dev); 292 retval = register_netdev(dev);
@@ -386,22 +378,6 @@ static void es_block_output(struct net_device *dev, int count,
386 memcpy_toio(shmem, buf, count); 378 memcpy_toio(shmem, buf, count);
387} 379}
388 380
389static int es_open(struct net_device *dev)
390{
391 ei_open(dev);
392 return 0;
393}
394
395static int es_close(struct net_device *dev)
396{
397
398 if (ei_debug > 1)
399 printk("%s: Shutting down ethercard.\n", dev->name);
400
401 ei_close(dev);
402 return 0;
403}
404
405#ifdef MODULE 381#ifdef MODULE
406#define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */ 382#define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */
407#define NAMELEN 8 /* # of chars for storing dev->name */ 383#define NAMELEN 8 /* # of chars for storing dev->name */
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index bee8b3fbc565..5c048f2fd74f 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1205,7 +1205,6 @@ static void eth16i_rx(struct net_device *dev)
1205 printk(KERN_DEBUG ".\n"); 1205 printk(KERN_DEBUG ".\n");
1206 } 1206 }
1207 netif_rx(skb); 1207 netif_rx(skb);
1208 dev->last_rx = jiffies;
1209 dev->stats.rx_packets++; 1208 dev->stats.rx_packets++;
1210 dev->stats.rx_bytes += pkt_len; 1209 dev->stats.rx_bytes += pkt_len;
1211 1210
@@ -1466,7 +1465,7 @@ void __exit cleanup_module(void)
1466 for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) { 1465 for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) {
1467 struct net_device *dev = dev_eth16i[this_dev]; 1466 struct net_device *dev = dev_eth16i[this_dev];
1468 1467
1469 if(dev->priv) { 1468 if (netdev_priv(dev)) {
1470 unregister_netdev(dev); 1469 unregister_netdev(dev);
1471 free_irq(dev->irq, dev); 1470 free_irq(dev->irq, dev);
1472 release_region(dev->base_addr, ETH16I_IO_EXTENT); 1471 release_region(dev->base_addr, ETH16I_IO_EXTENT);
@@ -1475,15 +1474,3 @@ void __exit cleanup_module(void)
1475 } 1474 }
1476} 1475}
1477#endif /* MODULE */ 1476#endif /* MODULE */
1478
1479/*
1480 * Local variables:
1481 * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c eth16i.c"
1482 * alt-compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict -prototypes -O6 -c eth16i.c"
1483 * tab-width: 8
1484 * c-basic-offset: 8
1485 * c-indent-level: 8
1486 * End:
1487 */
1488
1489/* End of file eth16i.c */
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 593a120e31b2..b852303c9362 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -396,7 +396,6 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase)
396 u_long mem_start, shmem_length; 396 u_long mem_start, shmem_length;
397 u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0; 397 u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0;
398 u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0; 398 u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0;
399 DECLARE_MAC_BUF(mac);
400 399
401 /* 400 /*
402 ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot. 401 ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot.
@@ -461,7 +460,7 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase)
461 if (lemac != LeMAC2) 460 if (lemac != LeMAC2)
462 DevicePresent(iobase); /* need after EWRK3_INIT */ 461 DevicePresent(iobase); /* need after EWRK3_INIT */
463 status = get_hw_addr(dev, eeprom_image, lemac); 462 status = get_hw_addr(dev, eeprom_image, lemac);
464 printk("%s\n", print_mac(mac, dev->dev_addr)); 463 printk("%pM\n", dev->dev_addr);
465 464
466 if (status) { 465 if (status) {
467 printk(" which has an EEPROM CRC error.\n"); 466 printk(" which has an EEPROM CRC error.\n");
@@ -646,10 +645,8 @@ static int ewrk3_open(struct net_device *dev)
646 ewrk3_init(dev); 645 ewrk3_init(dev);
647 646
648 if (ewrk3_debug > 1) { 647 if (ewrk3_debug > 1) {
649 DECLARE_MAC_BUF(mac);
650 printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq); 648 printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq);
651 printk(" physical address: %s\n", 649 printk(" physical address: %pM\n", dev->dev_addr);
652 print_mac(mac, dev->dev_addr));
653 if (lp->shmem_length == 0) { 650 if (lp->shmem_length == 0) {
654 printk(" no shared memory, I/O only mode\n"); 651 printk(" no shared memory, I/O only mode\n");
655 } else { 652 } else {
@@ -1029,7 +1026,6 @@ static int ewrk3_rx(struct net_device *dev)
1029 /* 1026 /*
1030 ** Update stats 1027 ** Update stats
1031 */ 1028 */
1032 dev->last_rx = jiffies;
1033 dev->stats.rx_packets++; 1029 dev->stats.rx_packets++;
1034 dev->stats.rx_bytes += pkt_len; 1030 dev->stats.rx_bytes += pkt_len;
1035 } else { 1031 } else {
@@ -1971,13 +1967,3 @@ module_exit(ewrk3_exit_module);
1971module_init(ewrk3_init_module); 1967module_init(ewrk3_init_module);
1972#endif /* MODULE */ 1968#endif /* MODULE */
1973MODULE_LICENSE("GPL"); 1969MODULE_LICENSE("GPL");
1974
1975
1976
1977/*
1978 * Local variables:
1979 * compile-command: "gcc -D__KERNEL__ -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c ewrk3.c"
1980 *
1981 * compile-command: "gcc -D__KERNEL__ -DMODULE -I/linux/include -Wall -Wstrict-prototypes -fomit-frame-pointer -fno-strength-reduce -malign-loops=2 -malign-jumps=2 -malign-functions=2 -O2 -m486 -c ewrk3.c"
1982 * End:
1983 */
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index b455ae931f7a..31ab1ff623fc 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -486,7 +486,6 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
486#else 486#else
487 int bar = 1; 487 int bar = 1;
488#endif 488#endif
489 DECLARE_MAC_BUF(mac);
490 489
491/* when built into the kernel, we only print version if device is found */ 490/* when built into the kernel, we only print version if device is found */
492#ifndef MODULE 491#ifndef MODULE
@@ -665,9 +664,9 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
665 if (err) 664 if (err)
666 goto err_out_free_tx; 665 goto err_out_free_tx;
667 666
668 printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", 667 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
669 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr, 668 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr,
670 print_mac(mac, dev->dev_addr), irq); 669 dev->dev_addr, irq);
671 670
672 return 0; 671 return 0;
673 672
@@ -1727,7 +1726,6 @@ static int netdev_rx(struct net_device *dev)
1727 } 1726 }
1728 skb->protocol = eth_type_trans(skb, dev); 1727 skb->protocol = eth_type_trans(skb, dev);
1729 netif_rx(skb); 1728 netif_rx(skb);
1730 dev->last_rx = jiffies;
1731 np->stats.rx_packets++; 1729 np->stats.rx_packets++;
1732 np->stats.rx_bytes += pkt_len; 1730 np->stats.rx_bytes += pkt_len;
1733 } 1731 }
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index ecd5c71a7a8a..7e33c129d51c 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1155,7 +1155,7 @@ static phy_info_t const phy_info_ks8721bl = {
1155 1155
1156static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev) 1156static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
1157{ 1157{
1158 struct fec_enet_private *fep = dev->priv; 1158 struct fec_enet_private *fep = netdev_priv(dev);
1159 volatile uint *s = &(fep->phy_status); 1159 volatile uint *s = &(fep->phy_status);
1160 1160
1161 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 1161 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
@@ -2562,7 +2562,6 @@ static int __init fec_enet_module_init(void)
2562{ 2562{
2563 struct net_device *dev; 2563 struct net_device *dev;
2564 int i, err; 2564 int i, err;
2565 DECLARE_MAC_BUF(mac);
2566 2565
2567 printk("FEC ENET Version 0.2\n"); 2566 printk("FEC ENET Version 0.2\n");
2568 2567
@@ -2581,8 +2580,7 @@ static int __init fec_enet_module_init(void)
2581 return -EIO; 2580 return -EIO;
2582 } 2581 }
2583 2582
2584 printk("%s: ethernet %s\n", 2583 printk("%s: ethernet %pM\n", dev->name, dev->dev_addr);
2585 dev->name, print_mac(mac, dev->dev_addr));
2586 } 2584 }
2587 return 0; 2585 return 0;
2588} 2586}
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index aec3b97e794d..cd8e98b45ec5 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -216,7 +216,7 @@ static int mpc52xx_fec_init_phy(struct net_device *dev)
216 struct phy_device *phydev; 216 struct phy_device *phydev;
217 char phy_id[BUS_ID_SIZE]; 217 char phy_id[BUS_ID_SIZE];
218 218
219 snprintf(phy_id, BUS_ID_SIZE, "%x:%02x", 219 snprintf(phy_id, sizeof(phy_id), "%x:%02x",
220 (unsigned int)dev->base_addr, priv->phy_addr); 220 (unsigned int)dev->base_addr, priv->phy_addr);
221 221
222 priv->link = PHY_DOWN; 222 priv->link = PHY_DOWN;
@@ -487,7 +487,6 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
487 rskb->protocol = eth_type_trans(rskb, dev); 487 rskb->protocol = eth_type_trans(rskb, dev);
488 488
489 netif_rx(rskb); 489 netif_rx(rskb);
490 dev->last_rx = jiffies;
491 } else { 490 } else {
492 /* Can't get a new one : reuse the same & drop pkt */ 491 /* Can't get a new one : reuse the same & drop pkt */
493 dev_notice(&dev->dev, "Memory squeeze, dropping packet.\n"); 492 dev_notice(&dev->dev, "Memory squeeze, dropping packet.\n");
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index cc7328b15521..5b68dc20168d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -712,12 +712,12 @@ struct nv_skb_map {
712 712
713/* 713/*
714 * SMP locking: 714 * SMP locking:
715 * All hardware access under dev->priv->lock, except the performance 715 * All hardware access under netdev_priv(dev)->lock, except the performance
716 * critical parts: 716 * critical parts:
717 * - rx is (pseudo-) lockless: it relies on the single-threading provided 717 * - rx is (pseudo-) lockless: it relies on the single-threading provided
718 * by the arch code for interrupts. 718 * by the arch code for interrupts.
719 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission 719 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
720 * needs dev->priv->lock :-( 720 * needs netdev_priv(dev)->lock :-(
721 * - set_multicast_list: preparation lockless, relies on netif_tx_lock. 721 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
722 */ 722 */
723 723
@@ -818,7 +818,7 @@ struct fe_priv {
818 * Maximum number of loops until we assume that a bit in the irq mask 818 * Maximum number of loops until we assume that a bit in the irq mask
819 * is stuck. Overridable with module param. 819 * is stuck. Overridable with module param.
820 */ 820 */
821static int max_interrupt_work = 5; 821static int max_interrupt_work = 15;
822 822
823/* 823/*
824 * Optimization can be either throuput mode or cpu mode 824 * Optimization can be either throuput mode or cpu mode
@@ -1446,9 +1446,9 @@ static int phy_init(struct net_device *dev)
1446 /* some phys clear out pause advertisment on reset, set it back */ 1446 /* some phys clear out pause advertisment on reset, set it back */
1447 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1447 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1448 1448
1449 /* restart auto negotiation */ 1449 /* restart auto negotiation, power down phy */
1450 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1450 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1451 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1451 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE | BMCR_PDOWN);
1452 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1452 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1453 return PHY_ERROR; 1453 return PHY_ERROR;
1454 } 1454 }
@@ -1760,7 +1760,7 @@ static void nv_do_rx_refill(unsigned long data)
1760 struct fe_priv *np = netdev_priv(dev); 1760 struct fe_priv *np = netdev_priv(dev);
1761 1761
1762 /* Just reschedule NAPI rx processing */ 1762 /* Just reschedule NAPI rx processing */
1763 netif_rx_schedule(dev, &np->napi); 1763 netif_rx_schedule(&np->napi);
1764} 1764}
1765#else 1765#else
1766static void nv_do_rx_refill(unsigned long data) 1766static void nv_do_rx_refill(unsigned long data)
@@ -2735,7 +2735,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2735#else 2735#else
2736 netif_rx(skb); 2736 netif_rx(skb);
2737#endif 2737#endif
2738 dev->last_rx = jiffies;
2739 dev->stats.rx_packets++; 2738 dev->stats.rx_packets++;
2740 dev->stats.rx_bytes += len; 2739 dev->stats.rx_bytes += len;
2741next_pkt: 2740next_pkt:
@@ -2848,7 +2847,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2848 } 2847 }
2849 } 2848 }
2850 2849
2851 dev->last_rx = jiffies;
2852 dev->stats.rx_packets++; 2850 dev->stats.rx_packets++;
2853 dev->stats.rx_bytes += len; 2851 dev->stats.rx_bytes += len;
2854 } else { 2852 } else {
@@ -3405,7 +3403,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3405 3403
3406#ifdef CONFIG_FORCEDETH_NAPI 3404#ifdef CONFIG_FORCEDETH_NAPI
3407 if (events & NVREG_IRQ_RX_ALL) { 3405 if (events & NVREG_IRQ_RX_ALL) {
3408 netif_rx_schedule(dev, &np->napi); 3406 netif_rx_schedule(&np->napi);
3409 3407
3410 /* Disable furthur receive irq's */ 3408 /* Disable furthur receive irq's */
3411 spin_lock(&np->lock); 3409 spin_lock(&np->lock);
@@ -3522,7 +3520,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3522 3520
3523#ifdef CONFIG_FORCEDETH_NAPI 3521#ifdef CONFIG_FORCEDETH_NAPI
3524 if (events & NVREG_IRQ_RX_ALL) { 3522 if (events & NVREG_IRQ_RX_ALL) {
3525 netif_rx_schedule(dev, &np->napi); 3523 netif_rx_schedule(&np->napi);
3526 3524
3527 /* Disable furthur receive irq's */ 3525 /* Disable furthur receive irq's */
3528 spin_lock(&np->lock); 3526 spin_lock(&np->lock);
@@ -3680,7 +3678,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3680 /* re-enable receive interrupts */ 3678 /* re-enable receive interrupts */
3681 spin_lock_irqsave(&np->lock, flags); 3679 spin_lock_irqsave(&np->lock, flags);
3682 3680
3683 __netif_rx_complete(dev, napi); 3681 __netif_rx_complete(napi);
3684 3682
3685 np->irqmask |= NVREG_IRQ_RX_ALL; 3683 np->irqmask |= NVREG_IRQ_RX_ALL;
3686 if (np->msi_flags & NV_MSI_X_ENABLED) 3684 if (np->msi_flags & NV_MSI_X_ENABLED)
@@ -3706,7 +3704,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3706 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3704 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3707 3705
3708 if (events) { 3706 if (events) {
3709 netif_rx_schedule(dev, &np->napi); 3707 netif_rx_schedule(&np->napi);
3710 /* disable receive interrupts on the nic */ 3708 /* disable receive interrupts on the nic */
3711 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 3709 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3712 pci_push(base); 3710 pci_push(base);
@@ -5210,6 +5208,10 @@ static int nv_open(struct net_device *dev)
5210 5208
5211 dprintk(KERN_DEBUG "nv_open: begin\n"); 5209 dprintk(KERN_DEBUG "nv_open: begin\n");
5212 5210
5211 /* power up phy */
5212 mii_rw(dev, np->phyaddr, MII_BMCR,
5213 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5214
5213 /* erase previous misconfiguration */ 5215 /* erase previous misconfiguration */
5214 if (np->driver_data & DEV_HAS_POWER_CNTRL) 5216 if (np->driver_data & DEV_HAS_POWER_CNTRL)
5215 nv_mac_reset(dev); 5217 nv_mac_reset(dev);
@@ -5403,6 +5405,10 @@ static int nv_close(struct net_device *dev)
5403 if (np->wolenabled) { 5405 if (np->wolenabled) {
5404 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5406 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5405 nv_start_rx(dev); 5407 nv_start_rx(dev);
5408 } else {
5409 /* power down phy */
5410 mii_rw(dev, np->phyaddr, MII_BMCR,
5411 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5406 } 5412 }
5407 5413
5408 /* FIXME: power down nic */ 5414 /* FIXME: power down nic */
@@ -5410,6 +5416,38 @@ static int nv_close(struct net_device *dev)
5410 return 0; 5416 return 0;
5411} 5417}
5412 5418
5419static const struct net_device_ops nv_netdev_ops = {
5420 .ndo_open = nv_open,
5421 .ndo_stop = nv_close,
5422 .ndo_get_stats = nv_get_stats,
5423 .ndo_start_xmit = nv_start_xmit,
5424 .ndo_tx_timeout = nv_tx_timeout,
5425 .ndo_change_mtu = nv_change_mtu,
5426 .ndo_validate_addr = eth_validate_addr,
5427 .ndo_set_mac_address = nv_set_mac_address,
5428 .ndo_set_multicast_list = nv_set_multicast,
5429 .ndo_vlan_rx_register = nv_vlan_rx_register,
5430#ifdef CONFIG_NET_POLL_CONTROLLER
5431 .ndo_poll_controller = nv_poll_controller,
5432#endif
5433};
5434
5435static const struct net_device_ops nv_netdev_ops_optimized = {
5436 .ndo_open = nv_open,
5437 .ndo_stop = nv_close,
5438 .ndo_get_stats = nv_get_stats,
5439 .ndo_start_xmit = nv_start_xmit_optimized,
5440 .ndo_tx_timeout = nv_tx_timeout,
5441 .ndo_change_mtu = nv_change_mtu,
5442 .ndo_validate_addr = eth_validate_addr,
5443 .ndo_set_mac_address = nv_set_mac_address,
5444 .ndo_set_multicast_list = nv_set_multicast,
5445 .ndo_vlan_rx_register = nv_vlan_rx_register,
5446#ifdef CONFIG_NET_POLL_CONTROLLER
5447 .ndo_poll_controller = nv_poll_controller,
5448#endif
5449};
5450
5413static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5451static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5414{ 5452{
5415 struct net_device *dev; 5453 struct net_device *dev;
@@ -5420,7 +5458,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5420 u32 powerstate, txreg; 5458 u32 powerstate, txreg;
5421 u32 phystate_orig = 0, phystate; 5459 u32 phystate_orig = 0, phystate;
5422 int phyinitialized = 0; 5460 int phyinitialized = 0;
5423 DECLARE_MAC_BUF(mac);
5424 static int printed_version; 5461 static int printed_version;
5425 5462
5426 if (!printed_version++) 5463 if (!printed_version++)
@@ -5530,7 +5567,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5530 if (id->driver_data & DEV_HAS_VLAN) { 5567 if (id->driver_data & DEV_HAS_VLAN) {
5531 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE; 5568 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5532 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX; 5569 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5533 dev->vlan_rx_register = nv_vlan_rx_register;
5534 } 5570 }
5535 5571
5536 np->msi_flags = 0; 5572 np->msi_flags = 0;
@@ -5580,25 +5616,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5580 if (!np->rx_skb || !np->tx_skb) 5616 if (!np->rx_skb || !np->tx_skb)
5581 goto out_freering; 5617 goto out_freering;
5582 5618
5583 dev->open = nv_open;
5584 dev->stop = nv_close;
5585
5586 if (!nv_optimized(np)) 5619 if (!nv_optimized(np))
5587 dev->hard_start_xmit = nv_start_xmit; 5620 dev->netdev_ops = &nv_netdev_ops;
5588 else 5621 else
5589 dev->hard_start_xmit = nv_start_xmit_optimized; 5622 dev->netdev_ops = &nv_netdev_ops_optimized;
5590 dev->get_stats = nv_get_stats; 5623
5591 dev->change_mtu = nv_change_mtu;
5592 dev->set_mac_address = nv_set_mac_address;
5593 dev->set_multicast_list = nv_set_multicast;
5594#ifdef CONFIG_NET_POLL_CONTROLLER
5595 dev->poll_controller = nv_poll_controller;
5596#endif
5597#ifdef CONFIG_FORCEDETH_NAPI 5624#ifdef CONFIG_FORCEDETH_NAPI
5598 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5625 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5599#endif 5626#endif
5600 SET_ETHTOOL_OPS(dev, &ops); 5627 SET_ETHTOOL_OPS(dev, &ops);
5601 dev->tx_timeout = nv_tx_timeout;
5602 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5628 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5603 5629
5604 pci_set_drvdata(pci_dev, dev); 5630 pci_set_drvdata(pci_dev, dev);
@@ -5653,8 +5679,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5653 * to 01:23:45:67:89:ab 5679 * to 01:23:45:67:89:ab
5654 */ 5680 */
5655 dev_printk(KERN_ERR, &pci_dev->dev, 5681 dev_printk(KERN_ERR, &pci_dev->dev,
5656 "Invalid Mac address detected: %s\n", 5682 "Invalid Mac address detected: %pM\n",
5657 print_mac(mac, dev->dev_addr)); 5683 dev->dev_addr);
5658 dev_printk(KERN_ERR, &pci_dev->dev, 5684 dev_printk(KERN_ERR, &pci_dev->dev,
5659 "Please complain to your hardware vendor. Switching to a random MAC.\n"); 5685 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5660 dev->dev_addr[0] = 0x00; 5686 dev->dev_addr[0] = 0x00;
@@ -5663,8 +5689,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5663 get_random_bytes(&dev->dev_addr[3], 3); 5689 get_random_bytes(&dev->dev_addr[3], 3);
5664 } 5690 }
5665 5691
5666 dprintk(KERN_DEBUG "%s: MAC Address %s\n", 5692 dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
5667 pci_name(pci_dev), print_mac(mac, dev->dev_addr)); 5693 pci_name(pci_dev), dev->dev_addr);
5668 5694
5669 /* set mac address */ 5695 /* set mac address */
5670 nv_copy_mac_to_hw(dev); 5696 nv_copy_mac_to_hw(dev);
@@ -6141,7 +6167,7 @@ static struct pci_device_id pci_tbl[] = {
6141 }, 6167 },
6142 { /* MCP79 Ethernet Controller */ 6168 { /* MCP79 Ethernet Controller */
6143 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 6169 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
6144 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, 6170 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE,
6145 }, 6171 },
6146 { /* MCP79 Ethernet Controller */ 6172 { /* MCP79 Ethernet Controller */
6147 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 6173 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index a6f49d025787..4e6a9195fe5f 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -209,7 +209,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
209 209
210 if (received < budget) { 210 if (received < budget) {
211 /* done */ 211 /* done */
212 netif_rx_complete(dev, napi); 212 netif_rx_complete(napi);
213 (*fep->ops->napi_enable_rx)(dev); 213 (*fep->ops->napi_enable_rx)(dev);
214 } 214 }
215 return received; 215 return received;
@@ -478,7 +478,7 @@ fs_enet_interrupt(int irq, void *dev_id)
478 /* NOTE: it is possible for FCCs in NAPI mode */ 478 /* NOTE: it is possible for FCCs in NAPI mode */
479 /* to submit a spurious interrupt while in poll */ 479 /* to submit a spurious interrupt while in poll */
480 if (napi_ok) 480 if (napi_ok)
481 __netif_rx_schedule(dev, &fep->napi); 481 __netif_rx_schedule(&fep->napi);
482 } 482 }
483 } 483 }
484 484
@@ -1117,10 +1117,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
1117 if (ret) 1117 if (ret)
1118 goto out_free_bd; 1118 goto out_free_bd;
1119 1119
1120 printk(KERN_INFO "%s: fs_enet: %02x:%02x:%02x:%02x:%02x:%02x\n", 1120 printk(KERN_INFO "%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
1121 ndev->name,
1122 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
1123 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
1124 1121
1125 return 0; 1122 return 0;
1126 1123
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c4af949bf860..c672ecfc9595 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -25,11 +25,8 @@
25 * 25 *
26 * Theory of operation 26 * Theory of operation
27 * 27 *
28 * The driver is initialized through platform_device. Structures which 28 * The driver is initialized through of_device. Configuration information
29 * define the configuration needed by the board are defined in a 29 * is therefore conveyed through an OF-style device tree.
30 * board structure in arch/ppc/platforms (though I do not
31 * discount the possibility that other architectures could one
32 * day be supported.
33 * 30 *
34 * The Gianfar Ethernet Controller uses a ring of buffer 31 * The Gianfar Ethernet Controller uses a ring of buffer
35 * descriptors. The beginning is indicated by a register 32 * descriptors. The beginning is indicated by a register
@@ -78,7 +75,7 @@
78#include <linux/if_vlan.h> 75#include <linux/if_vlan.h>
79#include <linux/spinlock.h> 76#include <linux/spinlock.h>
80#include <linux/mm.h> 77#include <linux/mm.h>
81#include <linux/platform_device.h> 78#include <linux/of_platform.h>
82#include <linux/ip.h> 79#include <linux/ip.h>
83#include <linux/tcp.h> 80#include <linux/tcp.h>
84#include <linux/udp.h> 81#include <linux/udp.h>
@@ -92,6 +89,8 @@
92#include <linux/crc32.h> 89#include <linux/crc32.h>
93#include <linux/mii.h> 90#include <linux/mii.h>
94#include <linux/phy.h> 91#include <linux/phy.h>
92#include <linux/phy_fixed.h>
93#include <linux/of.h>
95 94
96#include "gianfar.h" 95#include "gianfar.h"
97#include "gianfar_mii.h" 96#include "gianfar_mii.h"
@@ -119,8 +118,9 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id);
119static void adjust_link(struct net_device *dev); 118static void adjust_link(struct net_device *dev);
120static void init_registers(struct net_device *dev); 119static void init_registers(struct net_device *dev);
121static int init_phy(struct net_device *dev); 120static int init_phy(struct net_device *dev);
122static int gfar_probe(struct platform_device *pdev); 121static int gfar_probe(struct of_device *ofdev,
123static int gfar_remove(struct platform_device *pdev); 122 const struct of_device_id *match);
123static int gfar_remove(struct of_device *ofdev);
124static void free_skb_resources(struct gfar_private *priv); 124static void free_skb_resources(struct gfar_private *priv);
125static void gfar_set_multi(struct net_device *dev); 125static void gfar_set_multi(struct net_device *dev);
126static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 126static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
@@ -131,7 +131,8 @@ static void gfar_netpoll(struct net_device *dev);
131#endif 131#endif
132int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 132int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
133static int gfar_clean_tx_ring(struct net_device *dev); 133static int gfar_clean_tx_ring(struct net_device *dev);
134static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 134static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
135 int amount_pull);
135static void gfar_vlan_rx_register(struct net_device *netdev, 136static void gfar_vlan_rx_register(struct net_device *netdev,
136 struct vlan_group *grp); 137 struct vlan_group *grp);
137void gfar_halt(struct net_device *dev); 138void gfar_halt(struct net_device *dev);
@@ -149,29 +150,163 @@ MODULE_LICENSE("GPL");
149/* Returns 1 if incoming frames use an FCB */ 150/* Returns 1 if incoming frames use an FCB */
150static inline int gfar_uses_fcb(struct gfar_private *priv) 151static inline int gfar_uses_fcb(struct gfar_private *priv)
151{ 152{
152 return (priv->vlan_enable || priv->rx_csum_enable); 153 return priv->vlgrp || priv->rx_csum_enable;
154}
155
156static int gfar_of_init(struct net_device *dev)
157{
158 struct device_node *phy, *mdio;
159 const unsigned int *id;
160 const char *model;
161 const char *ctype;
162 const void *mac_addr;
163 const phandle *ph;
164 u64 addr, size;
165 int err = 0;
166 struct gfar_private *priv = netdev_priv(dev);
167 struct device_node *np = priv->node;
168 char bus_name[MII_BUS_ID_SIZE];
169
170 if (!np || !of_device_is_available(np))
171 return -ENODEV;
172
173 /* get a pointer to the register memory */
174 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
175 priv->regs = ioremap(addr, size);
176
177 if (priv->regs == NULL)
178 return -ENOMEM;
179
180 priv->interruptTransmit = irq_of_parse_and_map(np, 0);
181
182 model = of_get_property(np, "model", NULL);
183
184 /* If we aren't the FEC we have multiple interrupts */
185 if (model && strcasecmp(model, "FEC")) {
186 priv->interruptReceive = irq_of_parse_and_map(np, 1);
187
188 priv->interruptError = irq_of_parse_and_map(np, 2);
189
190 if (priv->interruptTransmit < 0 ||
191 priv->interruptReceive < 0 ||
192 priv->interruptError < 0) {
193 err = -EINVAL;
194 goto err_out;
195 }
196 }
197
198 mac_addr = of_get_mac_address(np);
199 if (mac_addr)
200 memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
201
202 if (model && !strcasecmp(model, "TSEC"))
203 priv->device_flags =
204 FSL_GIANFAR_DEV_HAS_GIGABIT |
205 FSL_GIANFAR_DEV_HAS_COALESCE |
206 FSL_GIANFAR_DEV_HAS_RMON |
207 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
208 if (model && !strcasecmp(model, "eTSEC"))
209 priv->device_flags =
210 FSL_GIANFAR_DEV_HAS_GIGABIT |
211 FSL_GIANFAR_DEV_HAS_COALESCE |
212 FSL_GIANFAR_DEV_HAS_RMON |
213 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
214 FSL_GIANFAR_DEV_HAS_PADDING |
215 FSL_GIANFAR_DEV_HAS_CSUM |
216 FSL_GIANFAR_DEV_HAS_VLAN |
217 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
218 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
219
220 ctype = of_get_property(np, "phy-connection-type", NULL);
221
222 /* We only care about rgmii-id. The rest are autodetected */
223 if (ctype && !strcmp(ctype, "rgmii-id"))
224 priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
225 else
226 priv->interface = PHY_INTERFACE_MODE_MII;
227
228 if (of_get_property(np, "fsl,magic-packet", NULL))
229 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
230
231 ph = of_get_property(np, "phy-handle", NULL);
232 if (ph == NULL) {
233 u32 *fixed_link;
234
235 fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL);
236 if (!fixed_link) {
237 err = -ENODEV;
238 goto err_out;
239 }
240
241 snprintf(priv->phy_bus_id, BUS_ID_SIZE, PHY_ID_FMT, "0",
242 fixed_link[0]);
243 } else {
244 phy = of_find_node_by_phandle(*ph);
245
246 if (phy == NULL) {
247 err = -ENODEV;
248 goto err_out;
249 }
250
251 mdio = of_get_parent(phy);
252
253 id = of_get_property(phy, "reg", NULL);
254
255 of_node_put(phy);
256 of_node_put(mdio);
257
258 gfar_mdio_bus_name(bus_name, mdio);
259 snprintf(priv->phy_bus_id, BUS_ID_SIZE, "%s:%02x",
260 bus_name, *id);
261 }
262
263 /* Find the TBI PHY. If it's not there, we don't support SGMII */
264 ph = of_get_property(np, "tbi-handle", NULL);
265 if (ph) {
266 struct device_node *tbi = of_find_node_by_phandle(*ph);
267 struct of_device *ofdev;
268 struct mii_bus *bus;
269
270 if (!tbi)
271 return 0;
272
273 mdio = of_get_parent(tbi);
274 if (!mdio)
275 return 0;
276
277 ofdev = of_find_device_by_node(mdio);
278
279 of_node_put(mdio);
280
281 id = of_get_property(tbi, "reg", NULL);
282 if (!id)
283 return 0;
284
285 of_node_put(tbi);
286
287 bus = dev_get_drvdata(&ofdev->dev);
288
289 priv->tbiphy = bus->phy_map[*id];
290 }
291
292 return 0;
293
294err_out:
295 iounmap(priv->regs);
296 return err;
153} 297}
154 298
155/* Set up the ethernet device structure, private data, 299/* Set up the ethernet device structure, private data,
156 * and anything else we need before we start */ 300 * and anything else we need before we start */
157static int gfar_probe(struct platform_device *pdev) 301static int gfar_probe(struct of_device *ofdev,
302 const struct of_device_id *match)
158{ 303{
159 u32 tempval; 304 u32 tempval;
160 struct net_device *dev = NULL; 305 struct net_device *dev = NULL;
161 struct gfar_private *priv = NULL; 306 struct gfar_private *priv = NULL;
162 struct gianfar_platform_data *einfo;
163 struct resource *r;
164 int err = 0, irq;
165 DECLARE_MAC_BUF(mac); 307 DECLARE_MAC_BUF(mac);
166 308 int err = 0;
167 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 309 int len_devname;
168
169 if (NULL == einfo) {
170 printk(KERN_ERR "gfar %d: Missing additional data!\n",
171 pdev->id);
172
173 return -ENODEV;
174 }
175 310
176 /* Create an ethernet device instance */ 311 /* Create an ethernet device instance */
177 dev = alloc_etherdev(sizeof (*priv)); 312 dev = alloc_etherdev(sizeof (*priv));
@@ -181,64 +316,23 @@ static int gfar_probe(struct platform_device *pdev)
181 316
182 priv = netdev_priv(dev); 317 priv = netdev_priv(dev);
183 priv->dev = dev; 318 priv->dev = dev;
319 priv->node = ofdev->node;
184 320
185 /* Set the info in the priv to the current info */ 321 err = gfar_of_init(dev);
186 priv->einfo = einfo;
187
188 /* fill out IRQ fields */
189 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
190 irq = platform_get_irq_byname(pdev, "tx");
191 if (irq < 0)
192 goto regs_fail;
193 priv->interruptTransmit = irq;
194
195 irq = platform_get_irq_byname(pdev, "rx");
196 if (irq < 0)
197 goto regs_fail;
198 priv->interruptReceive = irq;
199
200 irq = platform_get_irq_byname(pdev, "error");
201 if (irq < 0)
202 goto regs_fail;
203 priv->interruptError = irq;
204 } else {
205 irq = platform_get_irq(pdev, 0);
206 if (irq < 0)
207 goto regs_fail;
208 priv->interruptTransmit = irq;
209 }
210
211 /* get a pointer to the register memory */
212 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
213 priv->regs = ioremap(r->start, sizeof (struct gfar));
214 322
215 if (NULL == priv->regs) { 323 if (err)
216 err = -ENOMEM;
217 goto regs_fail; 324 goto regs_fail;
218 }
219 325
220 spin_lock_init(&priv->txlock); 326 spin_lock_init(&priv->txlock);
221 spin_lock_init(&priv->rxlock); 327 spin_lock_init(&priv->rxlock);
222 spin_lock_init(&priv->bflock); 328 spin_lock_init(&priv->bflock);
223 INIT_WORK(&priv->reset_task, gfar_reset_task); 329 INIT_WORK(&priv->reset_task, gfar_reset_task);
224 330
225 platform_set_drvdata(pdev, dev); 331 dev_set_drvdata(&ofdev->dev, priv);
226 332
227 /* Stop the DMA engine now, in case it was running before */ 333 /* Stop the DMA engine now, in case it was running before */
228 /* (The firmware could have used it, and left it running). */ 334 /* (The firmware could have used it, and left it running). */
229 /* To do this, we write Graceful Receive Stop and Graceful */ 335 gfar_halt(dev);
230 /* Transmit Stop, and then wait until the corresponding bits */
231 /* in IEVENT indicate the stops have completed. */
232 tempval = gfar_read(&priv->regs->dmactrl);
233 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
234 gfar_write(&priv->regs->dmactrl, tempval);
235
236 tempval = gfar_read(&priv->regs->dmactrl);
237 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
238 gfar_write(&priv->regs->dmactrl, tempval);
239
240 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
241 cpu_relax();
242 336
243 /* Reset MAC layer */ 337 /* Reset MAC layer */
244 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 338 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
@@ -252,13 +346,10 @@ static int gfar_probe(struct platform_device *pdev)
252 /* Initialize ECNTRL */ 346 /* Initialize ECNTRL */
253 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 347 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
254 348
255 /* Copy the station address into the dev structure, */
256 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
257
258 /* Set the dev->base_addr to the gfar reg region */ 349 /* Set the dev->base_addr to the gfar reg region */
259 dev->base_addr = (unsigned long) (priv->regs); 350 dev->base_addr = (unsigned long) (priv->regs);
260 351
261 SET_NETDEV_DEV(dev, &pdev->dev); 352 SET_NETDEV_DEV(dev, &ofdev->dev);
262 353
263 /* Fill in the dev structure */ 354 /* Fill in the dev structure */
264 dev->open = gfar_enet_open; 355 dev->open = gfar_enet_open;
@@ -276,23 +367,21 @@ static int gfar_probe(struct platform_device *pdev)
276 367
277 dev->ethtool_ops = &gfar_ethtool_ops; 368 dev->ethtool_ops = &gfar_ethtool_ops;
278 369
279 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 370 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
280 priv->rx_csum_enable = 1; 371 priv->rx_csum_enable = 1;
281 dev->features |= NETIF_F_IP_CSUM; 372 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
282 } else 373 } else
283 priv->rx_csum_enable = 0; 374 priv->rx_csum_enable = 0;
284 375
285 priv->vlgrp = NULL; 376 priv->vlgrp = NULL;
286 377
287 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 378 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
288 dev->vlan_rx_register = gfar_vlan_rx_register; 379 dev->vlan_rx_register = gfar_vlan_rx_register;
289 380
290 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 381 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
291
292 priv->vlan_enable = 1;
293 } 382 }
294 383
295 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 384 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
296 priv->extended_hash = 1; 385 priv->extended_hash = 1;
297 priv->hash_width = 9; 386 priv->hash_width = 9;
298 387
@@ -327,7 +416,7 @@ static int gfar_probe(struct platform_device *pdev)
327 priv->hash_regs[7] = &priv->regs->gaddr7; 416 priv->hash_regs[7] = &priv->regs->gaddr7;
328 } 417 }
329 418
330 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 419 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
331 priv->padding = DEFAULT_PADDING; 420 priv->padding = DEFAULT_PADDING;
332 else 421 else
333 priv->padding = 0; 422 priv->padding = 0;
@@ -338,13 +427,12 @@ static int gfar_probe(struct platform_device *pdev)
338 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 427 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
339 priv->tx_ring_size = DEFAULT_TX_RING_SIZE; 428 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
340 priv->rx_ring_size = DEFAULT_RX_RING_SIZE; 429 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
430 priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
341 431
342 priv->txcoalescing = DEFAULT_TX_COALESCE; 432 priv->txcoalescing = DEFAULT_TX_COALESCE;
343 priv->txcount = DEFAULT_TXCOUNT; 433 priv->txic = DEFAULT_TXIC;
344 priv->txtime = DEFAULT_TXTIME;
345 priv->rxcoalescing = DEFAULT_RX_COALESCE; 434 priv->rxcoalescing = DEFAULT_RX_COALESCE;
346 priv->rxcount = DEFAULT_RXCOUNT; 435 priv->rxic = DEFAULT_RXIC;
347 priv->rxtime = DEFAULT_RXTIME;
348 436
349 /* Enable most messages by default */ 437 /* Enable most messages by default */
350 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 438 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -360,12 +448,28 @@ static int gfar_probe(struct platform_device *pdev)
360 goto register_fail; 448 goto register_fail;
361 } 449 }
362 450
451 /* fill out IRQ number and name fields */
452 len_devname = strlen(dev->name);
453 strncpy(&priv->int_name_tx[0], dev->name, len_devname);
454 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
455 strncpy(&priv->int_name_tx[len_devname],
456 "_tx", sizeof("_tx") + 1);
457
458 strncpy(&priv->int_name_rx[0], dev->name, len_devname);
459 strncpy(&priv->int_name_rx[len_devname],
460 "_rx", sizeof("_rx") + 1);
461
462 strncpy(&priv->int_name_er[0], dev->name, len_devname);
463 strncpy(&priv->int_name_er[len_devname],
464 "_er", sizeof("_er") + 1);
465 } else
466 priv->int_name_tx[len_devname] = '\0';
467
363 /* Create all the sysfs files */ 468 /* Create all the sysfs files */
364 gfar_init_sysfs(dev); 469 gfar_init_sysfs(dev);
365 470
366 /* Print out the device info */ 471 /* Print out the device info */
367 printk(KERN_INFO DEVICE_NAME "%s\n", 472 printk(KERN_INFO DEVICE_NAME "%pM\n", dev->name, dev->dev_addr);
368 dev->name, print_mac(mac, dev->dev_addr));
369 473
370 /* Even more device info helps when determining which kernel */ 474 /* Even more device info helps when determining which kernel */
371 /* provided which set of benchmarks. */ 475 /* provided which set of benchmarks. */
@@ -382,29 +486,28 @@ regs_fail:
382 return err; 486 return err;
383} 487}
384 488
385static int gfar_remove(struct platform_device *pdev) 489static int gfar_remove(struct of_device *ofdev)
386{ 490{
387 struct net_device *dev = platform_get_drvdata(pdev); 491 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
388 struct gfar_private *priv = netdev_priv(dev);
389 492
390 platform_set_drvdata(pdev, NULL); 493 dev_set_drvdata(&ofdev->dev, NULL);
391 494
392 iounmap(priv->regs); 495 iounmap(priv->regs);
393 free_netdev(dev); 496 free_netdev(priv->dev);
394 497
395 return 0; 498 return 0;
396} 499}
397 500
398#ifdef CONFIG_PM 501#ifdef CONFIG_PM
399static int gfar_suspend(struct platform_device *pdev, pm_message_t state) 502static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
400{ 503{
401 struct net_device *dev = platform_get_drvdata(pdev); 504 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
402 struct gfar_private *priv = netdev_priv(dev); 505 struct net_device *dev = priv->dev;
403 unsigned long flags; 506 unsigned long flags;
404 u32 tempval; 507 u32 tempval;
405 508
406 int magic_packet = priv->wol_en && 509 int magic_packet = priv->wol_en &&
407 (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 510 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
408 511
409 netif_device_detach(dev); 512 netif_device_detach(dev);
410 513
@@ -445,14 +548,14 @@ static int gfar_suspend(struct platform_device *pdev, pm_message_t state)
445 return 0; 548 return 0;
446} 549}
447 550
448static int gfar_resume(struct platform_device *pdev) 551static int gfar_resume(struct of_device *ofdev)
449{ 552{
450 struct net_device *dev = platform_get_drvdata(pdev); 553 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev);
451 struct gfar_private *priv = netdev_priv(dev); 554 struct net_device *dev = priv->dev;
452 unsigned long flags; 555 unsigned long flags;
453 u32 tempval; 556 u32 tempval;
454 int magic_packet = priv->wol_en && 557 int magic_packet = priv->wol_en &&
455 (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 558 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
456 559
457 if (!netif_running(dev)) { 560 if (!netif_running(dev)) {
458 netif_device_attach(dev); 561 netif_device_attach(dev);
@@ -511,7 +614,7 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
511 if (ecntrl & ECNTRL_REDUCED_MII_MODE) 614 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
512 return PHY_INTERFACE_MODE_RMII; 615 return PHY_INTERFACE_MODE_RMII;
513 else { 616 else {
514 phy_interface_t interface = priv->einfo->interface; 617 phy_interface_t interface = priv->interface;
515 618
516 /* 619 /*
517 * This isn't autodetected right now, so it must 620 * This isn't autodetected right now, so it must
@@ -524,7 +627,7 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
524 } 627 }
525 } 628 }
526 629
527 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) 630 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
528 return PHY_INTERFACE_MODE_GMII; 631 return PHY_INTERFACE_MODE_GMII;
529 632
530 return PHY_INTERFACE_MODE_MII; 633 return PHY_INTERFACE_MODE_MII;
@@ -538,21 +641,18 @@ static int init_phy(struct net_device *dev)
538{ 641{
539 struct gfar_private *priv = netdev_priv(dev); 642 struct gfar_private *priv = netdev_priv(dev);
540 uint gigabit_support = 643 uint gigabit_support =
541 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 644 priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
542 SUPPORTED_1000baseT_Full : 0; 645 SUPPORTED_1000baseT_Full : 0;
543 struct phy_device *phydev; 646 struct phy_device *phydev;
544 char phy_id[BUS_ID_SIZE];
545 phy_interface_t interface; 647 phy_interface_t interface;
546 648
547 priv->oldlink = 0; 649 priv->oldlink = 0;
548 priv->oldspeed = 0; 650 priv->oldspeed = 0;
549 priv->oldduplex = -1; 651 priv->oldduplex = -1;
550 652
551 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
552
553 interface = gfar_get_interface(dev); 653 interface = gfar_get_interface(dev);
554 654
555 phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface); 655 phydev = phy_connect(dev, priv->phy_bus_id, &adjust_link, 0, interface);
556 656
557 if (interface == PHY_INTERFACE_MODE_SGMII) 657 if (interface == PHY_INTERFACE_MODE_SGMII)
558 gfar_configure_serdes(dev); 658 gfar_configure_serdes(dev);
@@ -583,35 +683,31 @@ static int init_phy(struct net_device *dev)
583static void gfar_configure_serdes(struct net_device *dev) 683static void gfar_configure_serdes(struct net_device *dev)
584{ 684{
585 struct gfar_private *priv = netdev_priv(dev); 685 struct gfar_private *priv = netdev_priv(dev);
586 struct gfar_mii __iomem *regs =
587 (void __iomem *)&priv->regs->gfar_mii_regs;
588 int tbipa = gfar_read(&priv->regs->tbipa);
589 struct mii_bus *bus = gfar_get_miibus(priv);
590 686
591 if (bus) 687 if (!priv->tbiphy) {
592 mutex_lock(&bus->mdio_lock); 688 printk(KERN_WARNING "SGMII mode requires that the device "
689 "tree specify a tbi-handle\n");
690 return;
691 }
593 692
594 /* If the link is already up, we must already be ok, and don't need to 693 /*
694 * If the link is already up, we must already be ok, and don't need to
595 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured 695 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
596 * everything for us? Resetting it takes the link down and requires 696 * everything for us? Resetting it takes the link down and requires
597 * several seconds for it to come back. 697 * several seconds for it to come back.
598 */ 698 */
599 if (gfar_local_mdio_read(regs, tbipa, MII_BMSR) & BMSR_LSTATUS) 699 if (phy_read(priv->tbiphy, MII_BMSR) & BMSR_LSTATUS)
600 goto done; 700 return;
601 701
602 /* Single clk mode, mii mode off(for serdes communication) */ 702 /* Single clk mode, mii mode off(for serdes communication) */
603 gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT); 703 phy_write(priv->tbiphy, MII_TBICON, TBICON_CLK_SELECT);
604 704
605 gfar_local_mdio_write(regs, tbipa, MII_ADVERTISE, 705 phy_write(priv->tbiphy, MII_ADVERTISE,
606 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | 706 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
607 ADVERTISE_1000XPSE_ASYM); 707 ADVERTISE_1000XPSE_ASYM);
608 708
609 gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE | 709 phy_write(priv->tbiphy, MII_BMCR, BMCR_ANENABLE |
610 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); 710 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
611
612 done:
613 if (bus)
614 mutex_unlock(&bus->mdio_lock);
615} 711}
616 712
617static void init_registers(struct net_device *dev) 713static void init_registers(struct net_device *dev)
@@ -644,7 +740,7 @@ static void init_registers(struct net_device *dev)
644 gfar_write(&priv->regs->gaddr7, 0); 740 gfar_write(&priv->regs->gaddr7, 0);
645 741
646 /* Zero out the rmon mib registers if it has them */ 742 /* Zero out the rmon mib registers if it has them */
647 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 743 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
648 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); 744 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
649 745
650 /* Mask off the CAM interrupts */ 746 /* Mask off the CAM interrupts */
@@ -719,7 +815,7 @@ void stop_gfar(struct net_device *dev)
719 spin_unlock_irqrestore(&priv->txlock, flags); 815 spin_unlock_irqrestore(&priv->txlock, flags);
720 816
721 /* Free the IRQs */ 817 /* Free the IRQs */
722 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 818 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
723 free_irq(priv->interruptError, dev); 819 free_irq(priv->interruptError, dev);
724 free_irq(priv->interruptTransmit, dev); 820 free_irq(priv->interruptTransmit, dev);
725 free_irq(priv->interruptReceive, dev); 821 free_irq(priv->interruptReceive, dev);
@@ -742,22 +838,26 @@ static void free_skb_resources(struct gfar_private *priv)
742{ 838{
743 struct rxbd8 *rxbdp; 839 struct rxbd8 *rxbdp;
744 struct txbd8 *txbdp; 840 struct txbd8 *txbdp;
745 int i; 841 int i, j;
746 842
747 /* Go through all the buffer descriptors and free their data buffers */ 843 /* Go through all the buffer descriptors and free their data buffers */
748 txbdp = priv->tx_bd_base; 844 txbdp = priv->tx_bd_base;
749 845
750 for (i = 0; i < priv->tx_ring_size; i++) { 846 for (i = 0; i < priv->tx_ring_size; i++) {
751 847 if (!priv->tx_skbuff[i])
752 if (priv->tx_skbuff[i]) { 848 continue;
753 dma_unmap_single(&priv->dev->dev, txbdp->bufPtr, 849
754 txbdp->length, 850 dma_unmap_single(&priv->dev->dev, txbdp->bufPtr,
755 DMA_TO_DEVICE); 851 txbdp->length, DMA_TO_DEVICE);
756 dev_kfree_skb_any(priv->tx_skbuff[i]); 852 txbdp->lstatus = 0;
757 priv->tx_skbuff[i] = NULL; 853 for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) {
854 txbdp++;
855 dma_unmap_page(&priv->dev->dev, txbdp->bufPtr,
856 txbdp->length, DMA_TO_DEVICE);
758 } 857 }
759
760 txbdp++; 858 txbdp++;
859 dev_kfree_skb_any(priv->tx_skbuff[i]);
860 priv->tx_skbuff[i] = NULL;
761 } 861 }
762 862
763 kfree(priv->tx_skbuff); 863 kfree(priv->tx_skbuff);
@@ -777,8 +877,7 @@ static void free_skb_resources(struct gfar_private *priv)
777 priv->rx_skbuff[i] = NULL; 877 priv->rx_skbuff[i] = NULL;
778 } 878 }
779 879
780 rxbdp->status = 0; 880 rxbdp->lstatus = 0;
781 rxbdp->length = 0;
782 rxbdp->bufPtr = 0; 881 rxbdp->bufPtr = 0;
783 882
784 rxbdp++; 883 rxbdp++;
@@ -815,6 +914,8 @@ void gfar_start(struct net_device *dev)
815 914
816 /* Unmask the interrupts we look for */ 915 /* Unmask the interrupts we look for */
817 gfar_write(&regs->imask, IMASK_DEFAULT); 916 gfar_write(&regs->imask, IMASK_DEFAULT);
917
918 dev->trans_start = jiffies;
818} 919}
819 920
820/* Bring the controller up and running */ 921/* Bring the controller up and running */
@@ -889,6 +990,7 @@ int startup_gfar(struct net_device *dev)
889 priv->rx_skbuff[i] = NULL; 990 priv->rx_skbuff[i] = NULL;
890 991
891 /* Initialize some variables in our dev structure */ 992 /* Initialize some variables in our dev structure */
993 priv->num_txbdfree = priv->tx_ring_size;
892 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; 994 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
893 priv->cur_rx = priv->rx_bd_base; 995 priv->cur_rx = priv->rx_bd_base;
894 priv->skb_curtx = priv->skb_dirtytx = 0; 996 priv->skb_curtx = priv->skb_dirtytx = 0;
@@ -897,8 +999,7 @@ int startup_gfar(struct net_device *dev)
897 /* Initialize Transmit Descriptor Ring */ 999 /* Initialize Transmit Descriptor Ring */
898 txbdp = priv->tx_bd_base; 1000 txbdp = priv->tx_bd_base;
899 for (i = 0; i < priv->tx_ring_size; i++) { 1001 for (i = 0; i < priv->tx_ring_size; i++) {
900 txbdp->status = 0; 1002 txbdp->lstatus = 0;
901 txbdp->length = 0;
902 txbdp->bufPtr = 0; 1003 txbdp->bufPtr = 0;
903 txbdp++; 1004 txbdp++;
904 } 1005 }
@@ -933,11 +1034,11 @@ int startup_gfar(struct net_device *dev)
933 1034
934 /* If the device has multiple interrupts, register for 1035 /* If the device has multiple interrupts, register for
935 * them. Otherwise, only register for the one */ 1036 * them. Otherwise, only register for the one */
936 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1037 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
937 /* Install our interrupt handlers for Error, 1038 /* Install our interrupt handlers for Error,
938 * Transmit, and Receive */ 1039 * Transmit, and Receive */
939 if (request_irq(priv->interruptError, gfar_error, 1040 if (request_irq(priv->interruptError, gfar_error,
940 0, "enet_error", dev) < 0) { 1041 0, priv->int_name_er, dev) < 0) {
941 if (netif_msg_intr(priv)) 1042 if (netif_msg_intr(priv))
942 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1043 printk(KERN_ERR "%s: Can't get IRQ %d\n",
943 dev->name, priv->interruptError); 1044 dev->name, priv->interruptError);
@@ -947,7 +1048,7 @@ int startup_gfar(struct net_device *dev)
947 } 1048 }
948 1049
949 if (request_irq(priv->interruptTransmit, gfar_transmit, 1050 if (request_irq(priv->interruptTransmit, gfar_transmit,
950 0, "enet_tx", dev) < 0) { 1051 0, priv->int_name_tx, dev) < 0) {
951 if (netif_msg_intr(priv)) 1052 if (netif_msg_intr(priv))
952 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1053 printk(KERN_ERR "%s: Can't get IRQ %d\n",
953 dev->name, priv->interruptTransmit); 1054 dev->name, priv->interruptTransmit);
@@ -958,7 +1059,7 @@ int startup_gfar(struct net_device *dev)
958 } 1059 }
959 1060
960 if (request_irq(priv->interruptReceive, gfar_receive, 1061 if (request_irq(priv->interruptReceive, gfar_receive,
961 0, "enet_rx", dev) < 0) { 1062 0, priv->int_name_rx, dev) < 0) {
962 if (netif_msg_intr(priv)) 1063 if (netif_msg_intr(priv))
963 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 1064 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
964 dev->name, priv->interruptReceive); 1065 dev->name, priv->interruptReceive);
@@ -968,10 +1069,10 @@ int startup_gfar(struct net_device *dev)
968 } 1069 }
969 } else { 1070 } else {
970 if (request_irq(priv->interruptTransmit, gfar_interrupt, 1071 if (request_irq(priv->interruptTransmit, gfar_interrupt,
971 0, "gfar_interrupt", dev) < 0) { 1072 0, priv->int_name_tx, dev) < 0) {
972 if (netif_msg_intr(priv)) 1073 if (netif_msg_intr(priv))
973 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1074 printk(KERN_ERR "%s: Can't get IRQ %d\n",
974 dev->name, priv->interruptError); 1075 dev->name, priv->interruptTransmit);
975 1076
976 err = -1; 1077 err = -1;
977 goto err_irq_fail; 1078 goto err_irq_fail;
@@ -981,17 +1082,13 @@ int startup_gfar(struct net_device *dev)
981 phy_start(priv->phydev); 1082 phy_start(priv->phydev);
982 1083
983 /* Configure the coalescing support */ 1084 /* Configure the coalescing support */
1085 gfar_write(&regs->txic, 0);
984 if (priv->txcoalescing) 1086 if (priv->txcoalescing)
985 gfar_write(&regs->txic, 1087 gfar_write(&regs->txic, priv->txic);
986 mk_ic_value(priv->txcount, priv->txtime));
987 else
988 gfar_write(&regs->txic, 0);
989 1088
1089 gfar_write(&regs->rxic, 0);
990 if (priv->rxcoalescing) 1090 if (priv->rxcoalescing)
991 gfar_write(&regs->rxic, 1091 gfar_write(&regs->rxic, priv->rxic);
992 mk_ic_value(priv->rxcount, priv->rxtime));
993 else
994 gfar_write(&regs->rxic, 0);
995 1092
996 if (priv->rx_csum_enable) 1093 if (priv->rx_csum_enable)
997 rctrl |= RCTRL_CHECKSUMMING; 1094 rctrl |= RCTRL_CHECKSUMMING;
@@ -1003,9 +1100,6 @@ int startup_gfar(struct net_device *dev)
1003 rctrl |= RCTRL_EMEN; 1100 rctrl |= RCTRL_EMEN;
1004 } 1101 }
1005 1102
1006 if (priv->vlan_enable)
1007 rctrl |= RCTRL_VLAN;
1008
1009 if (priv->padding) { 1103 if (priv->padding) {
1010 rctrl &= ~RCTRL_PAL_MASK; 1104 rctrl &= ~RCTRL_PAL_MASK;
1011 rctrl |= RCTRL_PADDING(priv->padding); 1105 rctrl |= RCTRL_PADDING(priv->padding);
@@ -1094,11 +1188,11 @@ static int gfar_enet_open(struct net_device *dev)
1094 return err; 1188 return err;
1095} 1189}
1096 1190
1097static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp) 1191static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1098{ 1192{
1099 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN); 1193 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
1100 1194
1101 memset(fcb, 0, GMAC_FCB_LEN); 1195 cacheable_memzero(fcb, GMAC_FCB_LEN);
1102 1196
1103 return fcb; 1197 return fcb;
1104} 1198}
@@ -1137,96 +1231,140 @@ void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1137 fcb->vlctl = vlan_tx_tag_get(skb); 1231 fcb->vlctl = vlan_tx_tag_get(skb);
1138} 1232}
1139 1233
1234static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1235 struct txbd8 *base, int ring_size)
1236{
1237 struct txbd8 *new_bd = bdp + stride;
1238
1239 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1240}
1241
1242static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1243 int ring_size)
1244{
1245 return skip_txbd(bdp, 1, base, ring_size);
1246}
1247
1140/* This is called by the kernel when a frame is ready for transmission. */ 1248/* This is called by the kernel when a frame is ready for transmission. */
1141/* It is pointed to by the dev->hard_start_xmit function pointer */ 1249/* It is pointed to by the dev->hard_start_xmit function pointer */
1142static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1250static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1143{ 1251{
1144 struct gfar_private *priv = netdev_priv(dev); 1252 struct gfar_private *priv = netdev_priv(dev);
1145 struct txfcb *fcb = NULL; 1253 struct txfcb *fcb = NULL;
1146 struct txbd8 *txbdp; 1254 struct txbd8 *txbdp, *txbdp_start, *base;
1147 u16 status; 1255 u32 lstatus;
1256 int i;
1257 u32 bufaddr;
1148 unsigned long flags; 1258 unsigned long flags;
1259 unsigned int nr_frags, length;
1260
1261 base = priv->tx_bd_base;
1262
1263 /* total number of fragments in the SKB */
1264 nr_frags = skb_shinfo(skb)->nr_frags;
1265
1266 spin_lock_irqsave(&priv->txlock, flags);
1267
1268 /* check if there is space to queue this packet */
1269 if (nr_frags > priv->num_txbdfree) {
1270 /* no space, stop the queue */
1271 netif_stop_queue(dev);
1272 dev->stats.tx_fifo_errors++;
1273 spin_unlock_irqrestore(&priv->txlock, flags);
1274 return NETDEV_TX_BUSY;
1275 }
1149 1276
1150 /* Update transmit stats */ 1277 /* Update transmit stats */
1151 dev->stats.tx_bytes += skb->len; 1278 dev->stats.tx_bytes += skb->len;
1152 1279
1153 /* Lock priv now */ 1280 txbdp = txbdp_start = priv->cur_tx;
1154 spin_lock_irqsave(&priv->txlock, flags);
1155 1281
1156 /* Point at the first free tx descriptor */ 1282 if (nr_frags == 0) {
1157 txbdp = priv->cur_tx; 1283 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1284 } else {
1285 /* Place the fragment addresses and lengths into the TxBDs */
1286 for (i = 0; i < nr_frags; i++) {
1287 /* Point at the next BD, wrapping as needed */
1288 txbdp = next_txbd(txbdp, base, priv->tx_ring_size);
1289
1290 length = skb_shinfo(skb)->frags[i].size;
1158 1291
1159 /* Clear all but the WRAP status flags */ 1292 lstatus = txbdp->lstatus | length |
1160 status = txbdp->status & TXBD_WRAP; 1293 BD_LFLAG(TXBD_READY);
1294
1295 /* Handle the last BD specially */
1296 if (i == nr_frags - 1)
1297 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1298
1299 bufaddr = dma_map_page(&dev->dev,
1300 skb_shinfo(skb)->frags[i].page,
1301 skb_shinfo(skb)->frags[i].page_offset,
1302 length,
1303 DMA_TO_DEVICE);
1304
1305 /* set the TxBD length and buffer pointer */
1306 txbdp->bufPtr = bufaddr;
1307 txbdp->lstatus = lstatus;
1308 }
1309
1310 lstatus = txbdp_start->lstatus;
1311 }
1161 1312
1162 /* Set up checksumming */ 1313 /* Set up checksumming */
1163 if (likely((dev->features & NETIF_F_IP_CSUM) 1314 if (CHECKSUM_PARTIAL == skb->ip_summed) {
1164 && (CHECKSUM_PARTIAL == skb->ip_summed))) { 1315 fcb = gfar_add_fcb(skb);
1165 fcb = gfar_add_fcb(skb, txbdp); 1316 lstatus |= BD_LFLAG(TXBD_TOE);
1166 status |= TXBD_TOE;
1167 gfar_tx_checksum(skb, fcb); 1317 gfar_tx_checksum(skb, fcb);
1168 } 1318 }
1169 1319
1170 if (priv->vlan_enable && 1320 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
1171 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
1172 if (unlikely(NULL == fcb)) { 1321 if (unlikely(NULL == fcb)) {
1173 fcb = gfar_add_fcb(skb, txbdp); 1322 fcb = gfar_add_fcb(skb);
1174 status |= TXBD_TOE; 1323 lstatus |= BD_LFLAG(TXBD_TOE);
1175 } 1324 }
1176 1325
1177 gfar_tx_vlan(skb, fcb); 1326 gfar_tx_vlan(skb, fcb);
1178 } 1327 }
1179 1328
1180 /* Set buffer length and pointer */ 1329 /* setup the TxBD length and buffer pointer for the first BD */
1181 txbdp->length = skb->len;
1182 txbdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1183 skb->len, DMA_TO_DEVICE);
1184
1185 /* Save the skb pointer so we can free it later */
1186 priv->tx_skbuff[priv->skb_curtx] = skb; 1330 priv->tx_skbuff[priv->skb_curtx] = skb;
1331 txbdp_start->bufPtr = dma_map_single(&dev->dev, skb->data,
1332 skb_headlen(skb), DMA_TO_DEVICE);
1187 1333
1188 /* Update the current skb pointer (wrapping if this was the last) */ 1334 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1189 priv->skb_curtx =
1190 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1191
1192 /* Flag the BD as interrupt-causing */
1193 status |= TXBD_INTERRUPT;
1194 1335
1195 /* Flag the BD as ready to go, last in frame, and */ 1336 /*
1196 /* in need of CRC */ 1337 * The powerpc-specific eieio() is used, as wmb() has too strong
1197 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
1198
1199 dev->trans_start = jiffies;
1200
1201 /* The powerpc-specific eieio() is used, as wmb() has too strong
1202 * semantics (it requires synchronization between cacheable and 1338 * semantics (it requires synchronization between cacheable and
1203 * uncacheable mappings, which eieio doesn't provide and which we 1339 * uncacheable mappings, which eieio doesn't provide and which we
1204 * don't need), thus requiring a more expensive sync instruction. At 1340 * don't need), thus requiring a more expensive sync instruction. At
1205 * some point, the set of architecture-independent barrier functions 1341 * some point, the set of architecture-independent barrier functions
1206 * should be expanded to include weaker barriers. 1342 * should be expanded to include weaker barriers.
1207 */ 1343 */
1208
1209 eieio(); 1344 eieio();
1210 txbdp->status = status;
1211 1345
1212 /* If this was the last BD in the ring, the next one */ 1346 txbdp_start->lstatus = lstatus;
1213 /* is at the beginning of the ring */ 1347
1214 if (txbdp->status & TXBD_WRAP) 1348 /* Update the current skb pointer to the next entry we will use
1215 txbdp = priv->tx_bd_base; 1349 * (wrapping if necessary) */
1216 else 1350 priv->skb_curtx = (priv->skb_curtx + 1) &
1217 txbdp++; 1351 TX_RING_MOD_MASK(priv->tx_ring_size);
1352
1353 priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size);
1354
1355 /* reduce TxBD free count */
1356 priv->num_txbdfree -= (nr_frags + 1);
1357
1358 dev->trans_start = jiffies;
1218 1359
1219 /* If the next BD still needs to be cleaned up, then the bds 1360 /* If the next BD still needs to be cleaned up, then the bds
1220 are full. We need to tell the kernel to stop sending us stuff. */ 1361 are full. We need to tell the kernel to stop sending us stuff. */
1221 if (txbdp == priv->dirty_tx) { 1362 if (!priv->num_txbdfree) {
1222 netif_stop_queue(dev); 1363 netif_stop_queue(dev);
1223 1364
1224 dev->stats.tx_fifo_errors++; 1365 dev->stats.tx_fifo_errors++;
1225 } 1366 }
1226 1367
1227 /* Update the current txbd to the next one */
1228 priv->cur_tx = txbdp;
1229
1230 /* Tell the DMA to go go go */ 1368 /* Tell the DMA to go go go */
1231 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 1369 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1232 1370
@@ -1270,11 +1408,15 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1270{ 1408{
1271 struct gfar_private *priv = netdev_priv(dev); 1409 struct gfar_private *priv = netdev_priv(dev);
1272 unsigned long flags; 1410 unsigned long flags;
1411 struct vlan_group *old_grp;
1273 u32 tempval; 1412 u32 tempval;
1274 1413
1275 spin_lock_irqsave(&priv->rxlock, flags); 1414 spin_lock_irqsave(&priv->rxlock, flags);
1276 1415
1277 priv->vlgrp = grp; 1416 old_grp = priv->vlgrp;
1417
1418 if (old_grp == grp)
1419 return;
1278 1420
1279 if (grp) { 1421 if (grp) {
1280 /* Enable VLAN tag insertion */ 1422 /* Enable VLAN tag insertion */
@@ -1286,6 +1428,7 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1286 /* Enable VLAN tag extraction */ 1428 /* Enable VLAN tag extraction */
1287 tempval = gfar_read(&priv->regs->rctrl); 1429 tempval = gfar_read(&priv->regs->rctrl);
1288 tempval |= RCTRL_VLEX; 1430 tempval |= RCTRL_VLEX;
1431 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1289 gfar_write(&priv->regs->rctrl, tempval); 1432 gfar_write(&priv->regs->rctrl, tempval);
1290 } else { 1433 } else {
1291 /* Disable VLAN tag insertion */ 1434 /* Disable VLAN tag insertion */
@@ -1296,9 +1439,16 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1296 /* Disable VLAN tag extraction */ 1439 /* Disable VLAN tag extraction */
1297 tempval = gfar_read(&priv->regs->rctrl); 1440 tempval = gfar_read(&priv->regs->rctrl);
1298 tempval &= ~RCTRL_VLEX; 1441 tempval &= ~RCTRL_VLEX;
1442 /* If parse is no longer required, then disable parser */
1443 if (tempval & RCTRL_REQ_PARSER)
1444 tempval |= RCTRL_PRSDEP_INIT;
1445 else
1446 tempval &= ~RCTRL_PRSDEP_INIT;
1299 gfar_write(&priv->regs->rctrl, tempval); 1447 gfar_write(&priv->regs->rctrl, tempval);
1300 } 1448 }
1301 1449
1450 gfar_change_mtu(dev, dev->mtu);
1451
1302 spin_unlock_irqrestore(&priv->rxlock, flags); 1452 spin_unlock_irqrestore(&priv->rxlock, flags);
1303} 1453}
1304 1454
@@ -1309,14 +1459,9 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1309 int oldsize = priv->rx_buffer_size; 1459 int oldsize = priv->rx_buffer_size;
1310 int frame_size = new_mtu + ETH_HLEN; 1460 int frame_size = new_mtu + ETH_HLEN;
1311 1461
1312 if (priv->vlan_enable) 1462 if (priv->vlgrp)
1313 frame_size += VLAN_HLEN; 1463 frame_size += VLAN_HLEN;
1314 1464
1315 if (gfar_uses_fcb(priv))
1316 frame_size += GMAC_FCB_LEN;
1317
1318 frame_size += priv->padding;
1319
1320 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 1465 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1321 if (netif_msg_drv(priv)) 1466 if (netif_msg_drv(priv))
1322 printk(KERN_ERR "%s: Invalid MTU setting\n", 1467 printk(KERN_ERR "%s: Invalid MTU setting\n",
@@ -1324,6 +1469,11 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1324 return -EINVAL; 1469 return -EINVAL;
1325 } 1470 }
1326 1471
1472 if (gfar_uses_fcb(priv))
1473 frame_size += GMAC_FCB_LEN;
1474
1475 frame_size += priv->padding;
1476
1327 tempsize = 1477 tempsize =
1328 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + 1478 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1329 INCREMENTAL_BUFFER_SIZE; 1479 INCREMENTAL_BUFFER_SIZE;
@@ -1388,83 +1538,85 @@ static void gfar_timeout(struct net_device *dev)
1388/* Interrupt Handler for Transmit complete */ 1538/* Interrupt Handler for Transmit complete */
1389static int gfar_clean_tx_ring(struct net_device *dev) 1539static int gfar_clean_tx_ring(struct net_device *dev)
1390{ 1540{
1391 struct txbd8 *bdp;
1392 struct gfar_private *priv = netdev_priv(dev); 1541 struct gfar_private *priv = netdev_priv(dev);
1542 struct txbd8 *bdp;
1543 struct txbd8 *lbdp = NULL;
1544 struct txbd8 *base = priv->tx_bd_base;
1545 struct sk_buff *skb;
1546 int skb_dirtytx;
1547 int tx_ring_size = priv->tx_ring_size;
1548 int frags = 0;
1549 int i;
1393 int howmany = 0; 1550 int howmany = 0;
1551 u32 lstatus;
1394 1552
1395 bdp = priv->dirty_tx; 1553 bdp = priv->dirty_tx;
1396 while ((bdp->status & TXBD_READY) == 0) { 1554 skb_dirtytx = priv->skb_dirtytx;
1397 /* If dirty_tx and cur_tx are the same, then either the */
1398 /* ring is empty or full now (it could only be full in the beginning, */
1399 /* obviously). If it is empty, we are done. */
1400 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1401 break;
1402 1555
1403 howmany++; 1556 while ((skb = priv->tx_skbuff[skb_dirtytx])) {
1557 frags = skb_shinfo(skb)->nr_frags;
1558 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1404 1559
1405 /* Deferred means some collisions occurred during transmit, */ 1560 lstatus = lbdp->lstatus;
1406 /* but we eventually sent the packet. */
1407 if (bdp->status & TXBD_DEF)
1408 dev->stats.collisions++;
1409 1561
1410 /* Unmap the DMA memory */ 1562 /* Only clean completed frames */
1411 dma_unmap_single(&priv->dev->dev, bdp->bufPtr, 1563 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
1412 bdp->length, DMA_TO_DEVICE); 1564 (lstatus & BD_LENGTH_MASK))
1565 break;
1566
1567 dma_unmap_single(&dev->dev,
1568 bdp->bufPtr,
1569 bdp->length,
1570 DMA_TO_DEVICE);
1413 1571
1414 /* Free the sk buffer associated with this TxBD */ 1572 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1415 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1573 bdp = next_txbd(bdp, base, tx_ring_size);
1416 1574
1417 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 1575 for (i = 0; i < frags; i++) {
1418 priv->skb_dirtytx = 1576 dma_unmap_page(&dev->dev,
1419 (priv->skb_dirtytx + 1577 bdp->bufPtr,
1420 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1578 bdp->length,
1579 DMA_TO_DEVICE);
1580 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
1581 bdp = next_txbd(bdp, base, tx_ring_size);
1582 }
1421 1583
1422 /* Clean BD length for empty detection */ 1584 dev_kfree_skb_any(skb);
1423 bdp->length = 0; 1585 priv->tx_skbuff[skb_dirtytx] = NULL;
1424 1586
1425 /* update bdp to point at next bd in the ring (wrapping if necessary) */ 1587 skb_dirtytx = (skb_dirtytx + 1) &
1426 if (bdp->status & TXBD_WRAP) 1588 TX_RING_MOD_MASK(tx_ring_size);
1427 bdp = priv->tx_bd_base;
1428 else
1429 bdp++;
1430 1589
1431 /* Move dirty_tx to be the next bd */ 1590 howmany++;
1432 priv->dirty_tx = bdp; 1591 priv->num_txbdfree += frags + 1;
1592 }
1593
1594 /* If we freed a buffer, we can restart transmission, if necessary */
1595 if (netif_queue_stopped(dev) && priv->num_txbdfree)
1596 netif_wake_queue(dev);
1433 1597
1434 /* We freed a buffer, so now we can restart transmission */ 1598 /* Update dirty indicators */
1435 if (netif_queue_stopped(dev)) 1599 priv->skb_dirtytx = skb_dirtytx;
1436 netif_wake_queue(dev); 1600 priv->dirty_tx = bdp;
1437 } /* while ((bdp->status & TXBD_READY) == 0) */
1438 1601
1439 dev->stats.tx_packets += howmany; 1602 dev->stats.tx_packets += howmany;
1440 1603
1441 return howmany; 1604 return howmany;
1442} 1605}
1443 1606
1444/* Interrupt Handler for Transmit complete */ 1607static void gfar_schedule_cleanup(struct net_device *dev)
1445static irqreturn_t gfar_transmit(int irq, void *dev_id)
1446{ 1608{
1447 struct net_device *dev = (struct net_device *) dev_id;
1448 struct gfar_private *priv = netdev_priv(dev); 1609 struct gfar_private *priv = netdev_priv(dev);
1449 1610 if (netif_rx_schedule_prep(&priv->napi)) {
1450 /* Clear IEVENT */ 1611 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1451 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); 1612 __netif_rx_schedule(&priv->napi);
1452
1453 /* Lock priv */
1454 spin_lock(&priv->txlock);
1455
1456 gfar_clean_tx_ring(dev);
1457
1458 /* If we are coalescing the interrupts, reset the timer */
1459 /* Otherwise, clear it */
1460 if (likely(priv->txcoalescing)) {
1461 gfar_write(&priv->regs->txic, 0);
1462 gfar_write(&priv->regs->txic,
1463 mk_ic_value(priv->txcount, priv->txtime));
1464 } 1613 }
1614}
1465 1615
1466 spin_unlock(&priv->txlock); 1616/* Interrupt Handler for Transmit complete */
1467 1617static irqreturn_t gfar_transmit(int irq, void *dev_id)
1618{
1619 gfar_schedule_cleanup((struct net_device *)dev_id);
1468 return IRQ_HANDLED; 1620 return IRQ_HANDLED;
1469} 1621}
1470 1622
@@ -1472,20 +1624,19 @@ static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp,
1472 struct sk_buff *skb) 1624 struct sk_buff *skb)
1473{ 1625{
1474 struct gfar_private *priv = netdev_priv(dev); 1626 struct gfar_private *priv = netdev_priv(dev);
1475 u32 * status_len = (u32 *)bdp; 1627 u32 lstatus;
1476 u16 flags;
1477 1628
1478 bdp->bufPtr = dma_map_single(&dev->dev, skb->data, 1629 bdp->bufPtr = dma_map_single(&dev->dev, skb->data,
1479 priv->rx_buffer_size, DMA_FROM_DEVICE); 1630 priv->rx_buffer_size, DMA_FROM_DEVICE);
1480 1631
1481 flags = RXBD_EMPTY | RXBD_INTERRUPT; 1632 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
1482 1633
1483 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1) 1634 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1484 flags |= RXBD_WRAP; 1635 lstatus |= BD_LFLAG(RXBD_WRAP);
1485 1636
1486 eieio(); 1637 eieio();
1487 1638
1488 *status_len = (u32)flags << 16; 1639 bdp->lstatus = lstatus;
1489} 1640}
1490 1641
1491 1642
@@ -1552,28 +1703,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
1552 1703
1553irqreturn_t gfar_receive(int irq, void *dev_id) 1704irqreturn_t gfar_receive(int irq, void *dev_id)
1554{ 1705{
1555 struct net_device *dev = (struct net_device *) dev_id; 1706 gfar_schedule_cleanup((struct net_device *)dev_id);
1556 struct gfar_private *priv = netdev_priv(dev);
1557 u32 tempval;
1558
1559 /* support NAPI */
1560 /* Clear IEVENT, so interrupts aren't called again
1561 * because of the packets that have already arrived */
1562 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1563
1564 if (netif_rx_schedule_prep(dev, &priv->napi)) {
1565 tempval = gfar_read(&priv->regs->imask);
1566 tempval &= IMASK_RTX_DISABLED;
1567 gfar_write(&priv->regs->imask, tempval);
1568
1569 __netif_rx_schedule(dev, &priv->napi);
1570 } else {
1571 if (netif_msg_rx_err(priv))
1572 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1573 dev->name, gfar_read(&priv->regs->ievent),
1574 gfar_read(&priv->regs->imask));
1575 }
1576
1577 return IRQ_HANDLED; 1707 return IRQ_HANDLED;
1578} 1708}
1579 1709
@@ -1589,59 +1719,38 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1589} 1719}
1590 1720
1591 1721
1592static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1593{
1594 struct rxfcb *fcb = (struct rxfcb *)skb->data;
1595
1596 /* Remove the FCB from the skb */
1597 skb_pull(skb, GMAC_FCB_LEN);
1598
1599 return fcb;
1600}
1601
1602/* gfar_process_frame() -- handle one incoming packet if skb 1722/* gfar_process_frame() -- handle one incoming packet if skb
1603 * isn't NULL. */ 1723 * isn't NULL. */
1604static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 1724static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1605 int length) 1725 int amount_pull)
1606{ 1726{
1607 struct gfar_private *priv = netdev_priv(dev); 1727 struct gfar_private *priv = netdev_priv(dev);
1608 struct rxfcb *fcb = NULL; 1728 struct rxfcb *fcb = NULL;
1609 1729
1610 if (NULL == skb) { 1730 int ret;
1611 if (netif_msg_rx_err(priv))
1612 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1613 dev->stats.rx_dropped++;
1614 priv->extra_stats.rx_skbmissing++;
1615 } else {
1616 int ret;
1617
1618 /* Prep the skb for the packet */
1619 skb_put(skb, length);
1620 1731
1621 /* Grab the FCB if there is one */ 1732 /* fcb is at the beginning if exists */
1622 if (gfar_uses_fcb(priv)) 1733 fcb = (struct rxfcb *)skb->data;
1623 fcb = gfar_get_fcb(skb);
1624 1734
1625 /* Remove the padded bytes, if there are any */ 1735 /* Remove the FCB from the skb */
1626 if (priv->padding) 1736 /* Remove the padded bytes, if there are any */
1627 skb_pull(skb, priv->padding); 1737 if (amount_pull)
1738 skb_pull(skb, amount_pull);
1628 1739
1629 if (priv->rx_csum_enable) 1740 if (priv->rx_csum_enable)
1630 gfar_rx_checksum(skb, fcb); 1741 gfar_rx_checksum(skb, fcb);
1631 1742
1632 /* Tell the skb what kind of packet this is */ 1743 /* Tell the skb what kind of packet this is */
1633 skb->protocol = eth_type_trans(skb, dev); 1744 skb->protocol = eth_type_trans(skb, dev);
1634 1745
1635 /* Send the packet up the stack */ 1746 /* Send the packet up the stack */
1636 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN))) { 1747 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1637 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, 1748 ret = vlan_hwaccel_receive_skb(skb, priv->vlgrp, fcb->vlctl);
1638 fcb->vlctl); 1749 else
1639 } else 1750 ret = netif_receive_skb(skb);
1640 ret = netif_receive_skb(skb);
1641 1751
1642 if (NET_RX_DROP == ret) 1752 if (NET_RX_DROP == ret)
1643 priv->extra_stats.kernel_dropped++; 1753 priv->extra_stats.kernel_dropped++;
1644 }
1645 1754
1646 return 0; 1755 return 0;
1647} 1756}
@@ -1652,14 +1761,19 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1652 */ 1761 */
1653int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 1762int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1654{ 1763{
1655 struct rxbd8 *bdp; 1764 struct rxbd8 *bdp, *base;
1656 struct sk_buff *skb; 1765 struct sk_buff *skb;
1657 u16 pkt_len; 1766 int pkt_len;
1767 int amount_pull;
1658 int howmany = 0; 1768 int howmany = 0;
1659 struct gfar_private *priv = netdev_priv(dev); 1769 struct gfar_private *priv = netdev_priv(dev);
1660 1770
1661 /* Get the first full descriptor */ 1771 /* Get the first full descriptor */
1662 bdp = priv->cur_rx; 1772 bdp = priv->cur_rx;
1773 base = priv->rx_bd_base;
1774
1775 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1776 priv->padding;
1663 1777
1664 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 1778 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1665 struct sk_buff *newskb; 1779 struct sk_buff *newskb;
@@ -1680,23 +1794,30 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1680 1794
1681 if (unlikely(!newskb)) 1795 if (unlikely(!newskb))
1682 newskb = skb; 1796 newskb = skb;
1683 1797 else if (skb)
1684 if (skb)
1685 dev_kfree_skb_any(skb); 1798 dev_kfree_skb_any(skb);
1686 } else { 1799 } else {
1687 /* Increment the number of packets */ 1800 /* Increment the number of packets */
1688 dev->stats.rx_packets++; 1801 dev->stats.rx_packets++;
1689 howmany++; 1802 howmany++;
1690 1803
1691 /* Remove the FCS from the packet length */ 1804 if (likely(skb)) {
1692 pkt_len = bdp->length - 4; 1805 pkt_len = bdp->length - ETH_FCS_LEN;
1806 /* Remove the FCS from the packet length */
1807 skb_put(skb, pkt_len);
1808 dev->stats.rx_bytes += pkt_len;
1693 1809
1694 gfar_process_frame(dev, skb, pkt_len); 1810 gfar_process_frame(dev, skb, amount_pull);
1695 1811
1696 dev->stats.rx_bytes += pkt_len; 1812 } else {
1697 } 1813 if (netif_msg_rx_err(priv))
1814 printk(KERN_WARNING
1815 "%s: Missing skb!\n", dev->name);
1816 dev->stats.rx_dropped++;
1817 priv->extra_stats.rx_skbmissing++;
1818 }
1698 1819
1699 dev->last_rx = jiffies; 1820 }
1700 1821
1701 priv->rx_skbuff[priv->skb_currx] = newskb; 1822 priv->rx_skbuff[priv->skb_currx] = newskb;
1702 1823
@@ -1704,10 +1825,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1704 gfar_new_rxbdp(dev, bdp, newskb); 1825 gfar_new_rxbdp(dev, bdp, newskb);
1705 1826
1706 /* Update to the next pointer */ 1827 /* Update to the next pointer */
1707 if (bdp->status & RXBD_WRAP) 1828 bdp = next_bd(bdp, base, priv->rx_ring_size);
1708 bdp = priv->rx_bd_base;
1709 else
1710 bdp++;
1711 1829
1712 /* update to point at the next skb */ 1830 /* update to point at the next skb */
1713 priv->skb_currx = 1831 priv->skb_currx =
@@ -1725,19 +1843,27 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1725{ 1843{
1726 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 1844 struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1727 struct net_device *dev = priv->dev; 1845 struct net_device *dev = priv->dev;
1728 int howmany; 1846 int tx_cleaned = 0;
1847 int rx_cleaned = 0;
1729 unsigned long flags; 1848 unsigned long flags;
1730 1849
1850 /* Clear IEVENT, so interrupts aren't called again
1851 * because of the packets that have already arrived */
1852 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1853
1731 /* If we fail to get the lock, don't bother with the TX BDs */ 1854 /* If we fail to get the lock, don't bother with the TX BDs */
1732 if (spin_trylock_irqsave(&priv->txlock, flags)) { 1855 if (spin_trylock_irqsave(&priv->txlock, flags)) {
1733 gfar_clean_tx_ring(dev); 1856 tx_cleaned = gfar_clean_tx_ring(dev);
1734 spin_unlock_irqrestore(&priv->txlock, flags); 1857 spin_unlock_irqrestore(&priv->txlock, flags);
1735 } 1858 }
1736 1859
1737 howmany = gfar_clean_rx_ring(dev, budget); 1860 rx_cleaned = gfar_clean_rx_ring(dev, budget);
1861
1862 if (tx_cleaned)
1863 return budget;
1738 1864
1739 if (howmany < budget) { 1865 if (rx_cleaned < budget) {
1740 netif_rx_complete(dev, napi); 1866 netif_rx_complete(napi);
1741 1867
1742 /* Clear the halt bit in RSTAT */ 1868 /* Clear the halt bit in RSTAT */
1743 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 1869 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
@@ -1748,12 +1874,15 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1748 /* Otherwise, clear it */ 1874 /* Otherwise, clear it */
1749 if (likely(priv->rxcoalescing)) { 1875 if (likely(priv->rxcoalescing)) {
1750 gfar_write(&priv->regs->rxic, 0); 1876 gfar_write(&priv->regs->rxic, 0);
1751 gfar_write(&priv->regs->rxic, 1877 gfar_write(&priv->regs->rxic, priv->rxic);
1752 mk_ic_value(priv->rxcount, priv->rxtime)); 1878 }
1879 if (likely(priv->txcoalescing)) {
1880 gfar_write(&priv->regs->txic, 0);
1881 gfar_write(&priv->regs->txic, priv->txic);
1753 } 1882 }
1754 } 1883 }
1755 1884
1756 return howmany; 1885 return rx_cleaned;
1757} 1886}
1758 1887
1759#ifdef CONFIG_NET_POLL_CONTROLLER 1888#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1767,7 +1896,7 @@ static void gfar_netpoll(struct net_device *dev)
1767 struct gfar_private *priv = netdev_priv(dev); 1896 struct gfar_private *priv = netdev_priv(dev);
1768 1897
1769 /* If the device has multiple interrupts, run tx/rx */ 1898 /* If the device has multiple interrupts, run tx/rx */
1770 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1899 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1771 disable_irq(priv->interruptTransmit); 1900 disable_irq(priv->interruptTransmit);
1772 disable_irq(priv->interruptReceive); 1901 disable_irq(priv->interruptReceive);
1773 disable_irq(priv->interruptError); 1902 disable_irq(priv->interruptError);
@@ -2061,7 +2190,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2061 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK); 2190 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK);
2062 2191
2063 /* Magic Packet is not an error. */ 2192 /* Magic Packet is not an error. */
2064 if ((priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 2193 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2065 (events & IEVENT_MAG)) 2194 (events & IEVENT_MAG))
2066 events &= ~IEVENT_MAG; 2195 events &= ~IEVENT_MAG;
2067 2196
@@ -2127,16 +2256,24 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2127/* work with hotplug and coldplug */ 2256/* work with hotplug and coldplug */
2128MODULE_ALIAS("platform:fsl-gianfar"); 2257MODULE_ALIAS("platform:fsl-gianfar");
2129 2258
2259static struct of_device_id gfar_match[] =
2260{
2261 {
2262 .type = "network",
2263 .compatible = "gianfar",
2264 },
2265 {},
2266};
2267
2130/* Structure for a device driver */ 2268/* Structure for a device driver */
2131static struct platform_driver gfar_driver = { 2269static struct of_platform_driver gfar_driver = {
2270 .name = "fsl-gianfar",
2271 .match_table = gfar_match,
2272
2132 .probe = gfar_probe, 2273 .probe = gfar_probe,
2133 .remove = gfar_remove, 2274 .remove = gfar_remove,
2134 .suspend = gfar_suspend, 2275 .suspend = gfar_suspend,
2135 .resume = gfar_resume, 2276 .resume = gfar_resume,
2136 .driver = {
2137 .name = "fsl-gianfar",
2138 .owner = THIS_MODULE,
2139 },
2140}; 2277};
2141 2278
2142static int __init gfar_init(void) 2279static int __init gfar_init(void)
@@ -2146,7 +2283,7 @@ static int __init gfar_init(void)
2146 if (err) 2283 if (err)
2147 return err; 2284 return err;
2148 2285
2149 err = platform_driver_register(&gfar_driver); 2286 err = of_register_platform_driver(&gfar_driver);
2150 2287
2151 if (err) 2288 if (err)
2152 gfar_mdio_exit(); 2289 gfar_mdio_exit();
@@ -2156,7 +2293,7 @@ static int __init gfar_init(void)
2156 2293
2157static void __exit gfar_exit(void) 2294static void __exit gfar_exit(void)
2158{ 2295{
2159 platform_driver_unregister(&gfar_driver); 2296 of_unregister_platform_driver(&gfar_driver);
2160 gfar_mdio_exit(); 2297 gfar_mdio_exit();
2161} 2298}
2162 2299
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index f46e9b63af13..b1a83344acc7 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -189,6 +189,18 @@ extern const char gfar_driver_version[];
189#define mk_ic_value(count, time) (IC_ICEN | \ 189#define mk_ic_value(count, time) (IC_ICEN | \
190 mk_ic_icft(count) | \ 190 mk_ic_icft(count) | \
191 mk_ic_ictt(time)) 191 mk_ic_ictt(time))
192#define get_icft_value(ic) (((unsigned long)ic & IC_ICFT_MASK) >> \
193 IC_ICFT_SHIFT)
194#define get_ictt_value(ic) ((unsigned long)ic & IC_ICTT_MASK)
195
196#define DEFAULT_TXIC mk_ic_value(DEFAULT_TXCOUNT, DEFAULT_TXTIME)
197#define DEFAULT_RXIC mk_ic_value(DEFAULT_RXCOUNT, DEFAULT_RXTIME)
198
199#define skip_bd(bdp, stride, base, ring_size) ({ \
200 typeof(bdp) new_bd = (bdp) + (stride); \
201 (new_bd >= (base) + (ring_size)) ? (new_bd - (ring_size)) : new_bd; })
202
203#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size)
192 204
193#define RCTRL_PAL_MASK 0x001f0000 205#define RCTRL_PAL_MASK 0x001f0000
194#define RCTRL_VLEX 0x00002000 206#define RCTRL_VLEX 0x00002000
@@ -200,8 +212,10 @@ extern const char gfar_driver_version[];
200#define RCTRL_PRSDEP_INIT 0x000000c0 212#define RCTRL_PRSDEP_INIT 0x000000c0
201#define RCTRL_PROM 0x00000008 213#define RCTRL_PROM 0x00000008
202#define RCTRL_EMEN 0x00000002 214#define RCTRL_EMEN 0x00000002
203#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN \ 215#define RCTRL_REQ_PARSER (RCTRL_VLEX | RCTRL_IPCSEN | \
204 | RCTRL_TUCSEN | RCTRL_PRSDEP_INIT) 216 RCTRL_TUCSEN)
217#define RCTRL_CHECKSUMMING (RCTRL_IPCSEN | RCTRL_TUCSEN | \
218 RCTRL_PRSDEP_INIT)
205#define RCTRL_EXTHASH (RCTRL_GHTX) 219#define RCTRL_EXTHASH (RCTRL_GHTX)
206#define RCTRL_VLAN (RCTRL_PRSDEP_INIT) 220#define RCTRL_VLAN (RCTRL_PRSDEP_INIT)
207#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK) 221#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK)
@@ -237,7 +251,7 @@ extern const char gfar_driver_version[];
237#define IEVENT_FIQ 0x00000004 251#define IEVENT_FIQ 0x00000004
238#define IEVENT_DPE 0x00000002 252#define IEVENT_DPE 0x00000002
239#define IEVENT_PERR 0x00000001 253#define IEVENT_PERR 0x00000001
240#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0) 254#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0 | IEVENT_BSY)
241#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) 255#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
242#define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK) 256#define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK)
243#define IEVENT_ERR_MASK \ 257#define IEVENT_ERR_MASK \
@@ -297,6 +311,8 @@ extern const char gfar_driver_version[];
297#define ATTRELI_EI_MASK 0x00003fff 311#define ATTRELI_EI_MASK 0x00003fff
298#define ATTRELI_EI(x) (x) 312#define ATTRELI_EI(x) (x)
299 313
314#define BD_LFLAG(flags) ((flags) << 16)
315#define BD_LENGTH_MASK 0x00ff
300 316
301/* TxBD status field bits */ 317/* TxBD status field bits */
302#define TXBD_READY 0x8000 318#define TXBD_READY 0x8000
@@ -358,10 +374,17 @@ extern const char gfar_driver_version[];
358#define RXFCB_PERR_MASK 0x000c 374#define RXFCB_PERR_MASK 0x000c
359#define RXFCB_PERR_BADL3 0x0008 375#define RXFCB_PERR_BADL3 0x0008
360 376
377#define GFAR_INT_NAME_MAX IFNAMSIZ + 4
378
361struct txbd8 379struct txbd8
362{ 380{
363 u16 status; /* Status Fields */ 381 union {
364 u16 length; /* Buffer length */ 382 struct {
383 u16 status; /* Status Fields */
384 u16 length; /* Buffer length */
385 };
386 u32 lstatus;
387 };
365 u32 bufPtr; /* Buffer Pointer */ 388 u32 bufPtr; /* Buffer Pointer */
366}; 389};
367 390
@@ -376,8 +399,13 @@ struct txfcb {
376 399
377struct rxbd8 400struct rxbd8
378{ 401{
379 u16 status; /* Status Fields */ 402 union {
380 u16 length; /* Buffer Length */ 403 struct {
404 u16 status; /* Status Fields */
405 u16 length; /* Buffer Length */
406 };
407 u32 lstatus;
408 };
381 u32 bufPtr; /* Buffer Pointer */ 409 u32 bufPtr; /* Buffer Pointer */
382}; 410};
383 411
@@ -657,6 +685,19 @@ struct gfar {
657 685
658}; 686};
659 687
688/* Flags related to gianfar device features */
689#define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001
690#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002
691#define FSL_GIANFAR_DEV_HAS_RMON 0x00000004
692#define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008
693#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
694#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
695#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
696#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
697#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
698#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
699#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
700
660/* Struct stolen almost completely (and shamelessly) from the FCC enet source 701/* Struct stolen almost completely (and shamelessly) from the FCC enet source
661 * (Ok, that's not so true anymore, but there is a family resemblence) 702 * (Ok, that's not so true anymore, but there is a family resemblence)
662 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base 703 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -681,8 +722,7 @@ struct gfar_private {
681 722
682 /* Configuration info for the coalescing features */ 723 /* Configuration info for the coalescing features */
683 unsigned char txcoalescing; 724 unsigned char txcoalescing;
684 unsigned short txcount; 725 unsigned long txic;
685 unsigned short txtime;
686 726
687 /* Buffer descriptor pointers */ 727 /* Buffer descriptor pointers */
688 struct txbd8 *tx_bd_base; /* First tx buffer descriptor */ 728 struct txbd8 *tx_bd_base; /* First tx buffer descriptor */
@@ -690,10 +730,12 @@ struct gfar_private {
690 struct txbd8 *dirty_tx; /* First buffer in line 730 struct txbd8 *dirty_tx; /* First buffer in line
691 to be transmitted */ 731 to be transmitted */
692 unsigned int tx_ring_size; 732 unsigned int tx_ring_size;
733 unsigned int num_txbdfree; /* number of TxBDs free */
693 734
694 /* RX Locked fields */ 735 /* RX Locked fields */
695 spinlock_t rxlock; 736 spinlock_t rxlock;
696 737
738 struct device_node *node;
697 struct net_device *dev; 739 struct net_device *dev;
698 struct napi_struct napi; 740 struct napi_struct napi;
699 741
@@ -703,8 +745,7 @@ struct gfar_private {
703 745
704 /* RX Coalescing values */ 746 /* RX Coalescing values */
705 unsigned char rxcoalescing; 747 unsigned char rxcoalescing;
706 unsigned short rxcount; 748 unsigned long rxic;
707 unsigned short rxtime;
708 749
709 struct rxbd8 *rx_bd_base; /* First Rx buffers */ 750 struct rxbd8 *rx_bd_base; /* First Rx buffers */
710 struct rxbd8 *cur_rx; /* Next free rx ring entry */ 751 struct rxbd8 *cur_rx; /* Next free rx ring entry */
@@ -733,8 +774,10 @@ struct gfar_private {
733 /* Bitfield update lock */ 774 /* Bitfield update lock */
734 spinlock_t bflock; 775 spinlock_t bflock;
735 776
736 unsigned char vlan_enable:1, 777 phy_interface_t interface;
737 rx_csum_enable:1, 778 char phy_bus_id[BUS_ID_SIZE];
779 u32 device_flags;
780 unsigned char rx_csum_enable:1,
738 extended_hash:1, 781 extended_hash:1,
739 bd_stash_en:1, 782 bd_stash_en:1,
740 wol_en:1; /* Wake-on-LAN enabled */ 783 wol_en:1; /* Wake-on-LAN enabled */
@@ -744,11 +787,9 @@ struct gfar_private {
744 unsigned int interruptReceive; 787 unsigned int interruptReceive;
745 unsigned int interruptError; 788 unsigned int interruptError;
746 789
747 /* info structure initialized by platform code */
748 struct gianfar_platform_data *einfo;
749
750 /* PHY stuff */ 790 /* PHY stuff */
751 struct phy_device *phydev; 791 struct phy_device *phydev;
792 struct phy_device *tbiphy;
752 struct mii_bus *mii_bus; 793 struct mii_bus *mii_bus;
753 int oldspeed; 794 int oldspeed;
754 int oldduplex; 795 int oldduplex;
@@ -757,6 +798,11 @@ struct gfar_private {
757 uint32_t msg_enable; 798 uint32_t msg_enable;
758 799
759 struct work_struct reset_task; 800 struct work_struct reset_task;
801
802 char int_name_tx[GFAR_INT_NAME_MAX];
803 char int_name_rx[GFAR_INT_NAME_MAX];
804 char int_name_er[GFAR_INT_NAME_MAX];
805
760 /* Network Statistics */ 806 /* Network Statistics */
761 struct gfar_extra_stats extra_stats; 807 struct gfar_extra_stats extra_stats;
762}; 808};
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index fb7d3ccc0fdc..59b3b5d98efe 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -121,7 +121,7 @@ static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
121{ 121{
122 struct gfar_private *priv = netdev_priv(dev); 122 struct gfar_private *priv = netdev_priv(dev);
123 123
124 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) 124 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
125 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); 125 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
126 else 126 else
127 memcpy(buf, stat_gstrings, 127 memcpy(buf, stat_gstrings,
@@ -138,7 +138,7 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
138 struct gfar_private *priv = netdev_priv(dev); 138 struct gfar_private *priv = netdev_priv(dev);
139 u64 *extra = (u64 *) & priv->extra_stats; 139 u64 *extra = (u64 *) & priv->extra_stats;
140 140
141 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 141 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
142 u32 __iomem *rmon = (u32 __iomem *) & priv->regs->rmon; 142 u32 __iomem *rmon = (u32 __iomem *) & priv->regs->rmon;
143 struct gfar_stats *stats = (struct gfar_stats *) buf; 143 struct gfar_stats *stats = (struct gfar_stats *) buf;
144 144
@@ -158,7 +158,7 @@ static int gfar_sset_count(struct net_device *dev, int sset)
158 158
159 switch (sset) { 159 switch (sset) {
160 case ETH_SS_STATS: 160 case ETH_SS_STATS:
161 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) 161 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
162 return GFAR_STATS_LEN; 162 return GFAR_STATS_LEN;
163 else 163 else
164 return GFAR_EXTRA_STATS_LEN; 164 return GFAR_EXTRA_STATS_LEN;
@@ -201,8 +201,8 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
201 if (NULL == phydev) 201 if (NULL == phydev)
202 return -ENODEV; 202 return -ENODEV;
203 203
204 cmd->maxtxpkt = priv->txcount; 204 cmd->maxtxpkt = get_icft_value(priv->txic);
205 cmd->maxrxpkt = priv->rxcount; 205 cmd->maxrxpkt = get_icft_value(priv->rxic);
206 206
207 return phy_ethtool_gset(phydev, cmd); 207 return phy_ethtool_gset(phydev, cmd);
208} 208}
@@ -279,18 +279,26 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
279static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 279static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
280{ 280{
281 struct gfar_private *priv = netdev_priv(dev); 281 struct gfar_private *priv = netdev_priv(dev);
282 unsigned long rxtime;
283 unsigned long rxcount;
284 unsigned long txtime;
285 unsigned long txcount;
282 286
283 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 287 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
284 return -EOPNOTSUPP; 288 return -EOPNOTSUPP;
285 289
286 if (NULL == priv->phydev) 290 if (NULL == priv->phydev)
287 return -ENODEV; 291 return -ENODEV;
288 292
289 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime); 293 rxtime = get_ictt_value(priv->rxic);
290 cvals->rx_max_coalesced_frames = priv->rxcount; 294 rxcount = get_icft_value(priv->rxic);
295 txtime = get_ictt_value(priv->txic);
296 txcount = get_icft_value(priv->txic);;
297 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
298 cvals->rx_max_coalesced_frames = rxcount;
291 299
292 cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, priv->txtime); 300 cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
293 cvals->tx_max_coalesced_frames = priv->txcount; 301 cvals->tx_max_coalesced_frames = txcount;
294 302
295 cvals->use_adaptive_rx_coalesce = 0; 303 cvals->use_adaptive_rx_coalesce = 0;
296 cvals->use_adaptive_tx_coalesce = 0; 304 cvals->use_adaptive_tx_coalesce = 0;
@@ -332,7 +340,7 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
332{ 340{
333 struct gfar_private *priv = netdev_priv(dev); 341 struct gfar_private *priv = netdev_priv(dev);
334 342
335 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 343 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
336 return -EOPNOTSUPP; 344 return -EOPNOTSUPP;
337 345
338 /* Set up rx coalescing */ 346 /* Set up rx coalescing */
@@ -358,8 +366,9 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
358 return -EINVAL; 366 return -EINVAL;
359 } 367 }
360 368
361 priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs); 369 priv->rxic = mk_ic_value(
362 priv->rxcount = cvals->rx_max_coalesced_frames; 370 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs),
371 cvals->rx_max_coalesced_frames);
363 372
364 /* Set up tx coalescing */ 373 /* Set up tx coalescing */
365 if ((cvals->tx_coalesce_usecs == 0) || 374 if ((cvals->tx_coalesce_usecs == 0) ||
@@ -381,20 +390,17 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
381 return -EINVAL; 390 return -EINVAL;
382 } 391 }
383 392
384 priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs); 393 priv->txic = mk_ic_value(
385 priv->txcount = cvals->tx_max_coalesced_frames; 394 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs),
395 cvals->tx_max_coalesced_frames);
386 396
397 gfar_write(&priv->regs->rxic, 0);
387 if (priv->rxcoalescing) 398 if (priv->rxcoalescing)
388 gfar_write(&priv->regs->rxic, 399 gfar_write(&priv->regs->rxic, priv->rxic);
389 mk_ic_value(priv->rxcount, priv->rxtime));
390 else
391 gfar_write(&priv->regs->rxic, 0);
392 400
401 gfar_write(&priv->regs->txic, 0);
393 if (priv->txcoalescing) 402 if (priv->txcoalescing)
394 gfar_write(&priv->regs->txic, 403 gfar_write(&priv->regs->txic, priv->txic);
395 mk_ic_value(priv->txcount, priv->txtime));
396 else
397 gfar_write(&priv->regs->txic, 0);
398 404
399 return 0; 405 return 0;
400} 406}
@@ -456,11 +462,12 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
456 spin_lock(&priv->rxlock); 462 spin_lock(&priv->rxlock);
457 463
458 gfar_halt(dev); 464 gfar_halt(dev);
459 gfar_clean_rx_ring(dev, priv->rx_ring_size);
460 465
461 spin_unlock(&priv->rxlock); 466 spin_unlock(&priv->rxlock);
462 spin_unlock_irqrestore(&priv->txlock, flags); 467 spin_unlock_irqrestore(&priv->txlock, flags);
463 468
469 gfar_clean_rx_ring(dev, priv->rx_ring_size);
470
464 /* Now we take down the rings to rebuild them */ 471 /* Now we take down the rings to rebuild them */
465 stop_gfar(dev); 472 stop_gfar(dev);
466 } 473 }
@@ -468,11 +475,13 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
468 /* Change the size */ 475 /* Change the size */
469 priv->rx_ring_size = rvals->rx_pending; 476 priv->rx_ring_size = rvals->rx_pending;
470 priv->tx_ring_size = rvals->tx_pending; 477 priv->tx_ring_size = rvals->tx_pending;
478 priv->num_txbdfree = priv->tx_ring_size;
471 479
472 /* Rebuild the rings with the new size */ 480 /* Rebuild the rings with the new size */
473 if (dev->flags & IFF_UP) 481 if (dev->flags & IFF_UP) {
474 err = startup_gfar(dev); 482 err = startup_gfar(dev);
475 483 netif_wake_queue(dev);
484 }
476 return err; 485 return err;
477} 486}
478 487
@@ -482,7 +491,7 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
482 unsigned long flags; 491 unsigned long flags;
483 int err = 0; 492 int err = 0;
484 493
485 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 494 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
486 return -EOPNOTSUPP; 495 return -EOPNOTSUPP;
487 496
488 if (dev->flags & IFF_UP) { 497 if (dev->flags & IFF_UP) {
@@ -492,11 +501,12 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
492 spin_lock(&priv->rxlock); 501 spin_lock(&priv->rxlock);
493 502
494 gfar_halt(dev); 503 gfar_halt(dev);
495 gfar_clean_rx_ring(dev, priv->rx_ring_size);
496 504
497 spin_unlock(&priv->rxlock); 505 spin_unlock(&priv->rxlock);
498 spin_unlock_irqrestore(&priv->txlock, flags); 506 spin_unlock_irqrestore(&priv->txlock, flags);
499 507
508 gfar_clean_rx_ring(dev, priv->rx_ring_size);
509
500 /* Now we take down the rings to rebuild them */ 510 /* Now we take down the rings to rebuild them */
501 stop_gfar(dev); 511 stop_gfar(dev);
502 } 512 }
@@ -505,9 +515,10 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
505 priv->rx_csum_enable = data; 515 priv->rx_csum_enable = data;
506 spin_unlock_irqrestore(&priv->bflock, flags); 516 spin_unlock_irqrestore(&priv->bflock, flags);
507 517
508 if (dev->flags & IFF_UP) 518 if (dev->flags & IFF_UP) {
509 err = startup_gfar(dev); 519 err = startup_gfar(dev);
510 520 netif_wake_queue(dev);
521 }
511 return err; 522 return err;
512} 523}
513 524
@@ -515,7 +526,7 @@ static uint32_t gfar_get_rx_csum(struct net_device *dev)
515{ 526{
516 struct gfar_private *priv = netdev_priv(dev); 527 struct gfar_private *priv = netdev_priv(dev);
517 528
518 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 529 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
519 return 0; 530 return 0;
520 531
521 return priv->rx_csum_enable; 532 return priv->rx_csum_enable;
@@ -523,22 +534,19 @@ static uint32_t gfar_get_rx_csum(struct net_device *dev)
523 534
524static int gfar_set_tx_csum(struct net_device *dev, uint32_t data) 535static int gfar_set_tx_csum(struct net_device *dev, uint32_t data)
525{ 536{
526 unsigned long flags;
527 struct gfar_private *priv = netdev_priv(dev); 537 struct gfar_private *priv = netdev_priv(dev);
528 538
529 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 539 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
530 return -EOPNOTSUPP; 540 return -EOPNOTSUPP;
531 541
532 spin_lock_irqsave(&priv->txlock, flags); 542 netif_tx_lock_bh(dev);
533 gfar_halt(dev);
534 543
535 if (data) 544 if (data)
536 dev->features |= NETIF_F_IP_CSUM; 545 dev->features |= NETIF_F_IP_CSUM;
537 else 546 else
538 dev->features &= ~NETIF_F_IP_CSUM; 547 dev->features &= ~NETIF_F_IP_CSUM;
539 548
540 gfar_start(dev); 549 netif_tx_unlock_bh(dev);
541 spin_unlock_irqrestore(&priv->txlock, flags);
542 550
543 return 0; 551 return 0;
544} 552}
@@ -547,7 +555,7 @@ static uint32_t gfar_get_tx_csum(struct net_device *dev)
547{ 555{
548 struct gfar_private *priv = netdev_priv(dev); 556 struct gfar_private *priv = netdev_priv(dev);
549 557
550 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 558 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
551 return 0; 559 return 0;
552 560
553 return (dev->features & NETIF_F_IP_CSUM) != 0; 561 return (dev->features & NETIF_F_IP_CSUM) != 0;
@@ -570,7 +578,7 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
570{ 578{
571 struct gfar_private *priv = netdev_priv(dev); 579 struct gfar_private *priv = netdev_priv(dev);
572 580
573 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) { 581 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
574 wol->supported = WAKE_MAGIC; 582 wol->supported = WAKE_MAGIC;
575 wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0; 583 wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
576 } else { 584 } else {
@@ -583,7 +591,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
583 struct gfar_private *priv = netdev_priv(dev); 591 struct gfar_private *priv = netdev_priv(dev);
584 unsigned long flags; 592 unsigned long flags;
585 593
586 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 594 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
587 wol->wolopts != 0) 595 wol->wolopts != 0)
588 return -EINVAL; 596 return -EINVAL;
589 597
@@ -616,6 +624,7 @@ const struct ethtool_ops gfar_ethtool_ops = {
616 .get_tx_csum = gfar_get_tx_csum, 624 .get_tx_csum = gfar_get_tx_csum,
617 .set_rx_csum = gfar_set_rx_csum, 625 .set_rx_csum = gfar_set_rx_csum,
618 .set_tx_csum = gfar_set_tx_csum, 626 .set_tx_csum = gfar_set_tx_csum,
627 .set_sg = ethtool_op_set_sg,
619 .get_msglevel = gfar_get_msglevel, 628 .get_msglevel = gfar_get_msglevel,
620 .set_msglevel = gfar_set_msglevel, 629 .set_msglevel = gfar_set_msglevel,
621#ifdef CONFIG_PM 630#ifdef CONFIG_PM
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 0e2595d24933..f3706e415b45 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -34,6 +34,8 @@
34#include <linux/crc32.h> 34#include <linux/crc32.h>
35#include <linux/mii.h> 35#include <linux/mii.h>
36#include <linux/phy.h> 36#include <linux/phy.h>
37#include <linux/of.h>
38#include <linux/of_platform.h>
37 39
38#include <asm/io.h> 40#include <asm/io.h>
39#include <asm/irq.h> 41#include <asm/irq.h>
@@ -150,19 +152,83 @@ static int gfar_mdio_reset(struct mii_bus *bus)
150 return 0; 152 return 0;
151} 153}
152 154
155/* Allocate an array which provides irq #s for each PHY on the given bus */
156static int *create_irq_map(struct device_node *np)
157{
158 int *irqs;
159 int i;
160 struct device_node *child = NULL;
161
162 irqs = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
163
164 if (!irqs)
165 return NULL;
166
167 for (i = 0; i < PHY_MAX_ADDR; i++)
168 irqs[i] = PHY_POLL;
169
170 while ((child = of_get_next_child(np, child)) != NULL) {
171 int irq = irq_of_parse_and_map(child, 0);
172 const u32 *id;
173
174 if (irq == NO_IRQ)
175 continue;
176
177 id = of_get_property(child, "reg", NULL);
178
179 if (!id)
180 continue;
181
182 if (*id < PHY_MAX_ADDR && *id >= 0)
183 irqs[*id] = irq;
184 else
185 printk(KERN_WARNING "%s: "
186 "%d is not a valid PHY address\n",
187 np->full_name, *id);
188 }
189
190 return irqs;
191}
192
193
194void gfar_mdio_bus_name(char *name, struct device_node *np)
195{
196 const u32 *reg;
197
198 reg = of_get_property(np, "reg", NULL);
153 199
154static int gfar_mdio_probe(struct device *dev) 200 snprintf(name, MII_BUS_ID_SIZE, "%s@%x", np->name, reg ? *reg : 0);
201}
202
203/* Scan the bus in reverse, looking for an empty spot */
204static int gfar_mdio_find_free(struct mii_bus *new_bus)
205{
206 int i;
207
208 for (i = PHY_MAX_ADDR; i > 0; i--) {
209 u32 phy_id;
210
211 if (get_phy_id(new_bus, i, &phy_id))
212 return -1;
213
214 if (phy_id == 0xffffffff)
215 break;
216 }
217
218 return i;
219}
220
221static int gfar_mdio_probe(struct of_device *ofdev,
222 const struct of_device_id *match)
155{ 223{
156 struct platform_device *pdev = to_platform_device(dev);
157 struct gianfar_mdio_data *pdata;
158 struct gfar_mii __iomem *regs; 224 struct gfar_mii __iomem *regs;
159 struct gfar __iomem *enet_regs; 225 struct gfar __iomem *enet_regs;
160 struct mii_bus *new_bus; 226 struct mii_bus *new_bus;
161 struct resource *r; 227 int err = 0;
162 int i, err = 0; 228 u64 addr, size;
163 229 struct device_node *np = ofdev->node;
164 if (NULL == dev) 230 struct device_node *tbi;
165 return -EINVAL; 231 int tbiaddr = -1;
166 232
167 new_bus = mdiobus_alloc(); 233 new_bus = mdiobus_alloc();
168 if (NULL == new_bus) 234 if (NULL == new_bus)
@@ -172,31 +238,28 @@ static int gfar_mdio_probe(struct device *dev)
172 new_bus->read = &gfar_mdio_read, 238 new_bus->read = &gfar_mdio_read,
173 new_bus->write = &gfar_mdio_write, 239 new_bus->write = &gfar_mdio_write,
174 new_bus->reset = &gfar_mdio_reset, 240 new_bus->reset = &gfar_mdio_reset,
175 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); 241 gfar_mdio_bus_name(new_bus->id, np);
176
177 pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data;
178
179 if (NULL == pdata) {
180 printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id);
181 return -ENODEV;
182 }
183
184 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
185 242
186 /* Set the PHY base address */ 243 /* Set the PHY base address */
187 regs = ioremap(r->start, sizeof (struct gfar_mii)); 244 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
245 regs = ioremap(addr, size);
188 246
189 if (NULL == regs) { 247 if (NULL == regs) {
190 err = -ENOMEM; 248 err = -ENOMEM;
191 goto reg_map_fail; 249 goto err_free_bus;
192 } 250 }
193 251
194 new_bus->priv = (void __force *)regs; 252 new_bus->priv = (void __force *)regs;
195 253
196 new_bus->irq = pdata->irq; 254 new_bus->irq = create_irq_map(np);
255
256 if (new_bus->irq == NULL) {
257 err = -ENOMEM;
258 goto err_unmap_regs;
259 }
197 260
198 new_bus->parent = dev; 261 new_bus->parent = &ofdev->dev;
199 dev_set_drvdata(dev, new_bus); 262 dev_set_drvdata(&ofdev->dev, new_bus);
200 263
201 /* 264 /*
202 * This is mildly evil, but so is our hardware for doing this. 265 * This is mildly evil, but so is our hardware for doing this.
@@ -206,96 +269,109 @@ static int gfar_mdio_probe(struct device *dev)
206 enet_regs = (struct gfar __iomem *) 269 enet_regs = (struct gfar __iomem *)
207 ((char *)regs - offsetof(struct gfar, gfar_mii_regs)); 270 ((char *)regs - offsetof(struct gfar, gfar_mii_regs));
208 271
209 /* Scan the bus, looking for an empty spot for TBIPA */ 272 for_each_child_of_node(np, tbi) {
210 gfar_write(&enet_regs->tbipa, 0); 273 if (!strncmp(tbi->type, "tbi-phy", 8))
211 for (i = PHY_MAX_ADDR; i > 0; i--) { 274 break;
212 u32 phy_id; 275 }
213 276
214 err = get_phy_id(new_bus, i, &phy_id); 277 if (tbi) {
215 if (err) 278 const u32 *prop = of_get_property(tbi, "reg", NULL);
216 goto bus_register_fail;
217 279
218 if (phy_id == 0xffffffff) 280 if (prop)
219 break; 281 tbiaddr = *prop;
220 } 282 }
221 283
222 /* The bus is full. We don't support using 31 PHYs, sorry */ 284 if (tbiaddr == -1) {
223 if (i == 0) { 285 gfar_write(&enet_regs->tbipa, 0);
286
287 tbiaddr = gfar_mdio_find_free(new_bus);
288 }
289
290 /*
291 * We define TBIPA at 0 to be illegal, opting to fail for boards that
292 * have PHYs at 1-31, rather than change tbipa and rescan.
293 */
294 if (tbiaddr == 0) {
224 err = -EBUSY; 295 err = -EBUSY;
225 296
226 goto bus_register_fail; 297 goto err_free_irqs;
227 } 298 }
228 299
229 gfar_write(&enet_regs->tbipa, i); 300 gfar_write(&enet_regs->tbipa, tbiaddr);
301
302 /*
303 * The TBIPHY-only buses will find PHYs at every address,
304 * so we mask them all but the TBI
305 */
306 if (!of_device_is_compatible(np, "fsl,gianfar-mdio"))
307 new_bus->phy_mask = ~(1 << tbiaddr);
230 308
231 err = mdiobus_register(new_bus); 309 err = mdiobus_register(new_bus);
232 310
233 if (0 != err) { 311 if (err != 0) {
234 printk (KERN_ERR "%s: Cannot register as MDIO bus\n", 312 printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
235 new_bus->name); 313 new_bus->name);
236 goto bus_register_fail; 314 goto err_free_irqs;
237 } 315 }
238 316
239 return 0; 317 return 0;
240 318
241bus_register_fail: 319err_free_irqs:
320 kfree(new_bus->irq);
321err_unmap_regs:
242 iounmap(regs); 322 iounmap(regs);
243reg_map_fail: 323err_free_bus:
244 mdiobus_free(new_bus); 324 mdiobus_free(new_bus);
245 325
246 return err; 326 return err;
247} 327}
248 328
249 329
250static int gfar_mdio_remove(struct device *dev) 330static int gfar_mdio_remove(struct of_device *ofdev)
251{ 331{
252 struct mii_bus *bus = dev_get_drvdata(dev); 332 struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
253 333
254 mdiobus_unregister(bus); 334 mdiobus_unregister(bus);
255 335
256 dev_set_drvdata(dev, NULL); 336 dev_set_drvdata(&ofdev->dev, NULL);
257 337
258 iounmap((void __iomem *)bus->priv); 338 iounmap((void __iomem *)bus->priv);
259 bus->priv = NULL; 339 bus->priv = NULL;
340 kfree(bus->irq);
260 mdiobus_free(bus); 341 mdiobus_free(bus);
261 342
262 return 0; 343 return 0;
263} 344}
264 345
265static struct device_driver gianfar_mdio_driver = { 346static struct of_device_id gfar_mdio_match[] =
347{
348 {
349 .compatible = "fsl,gianfar-mdio",
350 },
351 {
352 .compatible = "fsl,gianfar-tbi",
353 },
354 {
355 .type = "mdio",
356 .compatible = "gianfar",
357 },
358 {},
359};
360
361static struct of_platform_driver gianfar_mdio_driver = {
266 .name = "fsl-gianfar_mdio", 362 .name = "fsl-gianfar_mdio",
267 .bus = &platform_bus_type, 363 .match_table = gfar_mdio_match,
364
268 .probe = gfar_mdio_probe, 365 .probe = gfar_mdio_probe,
269 .remove = gfar_mdio_remove, 366 .remove = gfar_mdio_remove,
270}; 367};
271 368
272static int match_mdio_bus(struct device *dev, void *data)
273{
274 const struct gfar_private *priv = data;
275 const struct platform_device *pdev = to_platform_device(dev);
276
277 return !strcmp(pdev->name, gianfar_mdio_driver.name) &&
278 pdev->id == priv->einfo->mdio_bus;
279}
280
281/* Given a gfar_priv structure, find the mii_bus controlled by this device (not
282 * necessarily the same as the bus the gfar's PHY is on), if one exists.
283 * Normally only the first gianfar controls a mii_bus. */
284struct mii_bus *gfar_get_miibus(const struct gfar_private *priv)
285{
286 /*const*/ struct device *d;
287
288 d = bus_find_device(gianfar_mdio_driver.bus, NULL, (void *)priv,
289 match_mdio_bus);
290 return d ? dev_get_drvdata(d) : NULL;
291}
292
293int __init gfar_mdio_init(void) 369int __init gfar_mdio_init(void)
294{ 370{
295 return driver_register(&gianfar_mdio_driver); 371 return of_register_platform_driver(&gianfar_mdio_driver);
296} 372}
297 373
298void gfar_mdio_exit(void) 374void gfar_mdio_exit(void)
299{ 375{
300 driver_unregister(&gianfar_mdio_driver); 376 of_unregister_platform_driver(&gianfar_mdio_driver);
301} 377}
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
index 02dc970ca1ff..65c242cd468a 100644
--- a/drivers/net/gianfar_mii.h
+++ b/drivers/net/gianfar_mii.h
@@ -49,4 +49,6 @@ int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
49struct mii_bus *gfar_get_miibus(const struct gfar_private *priv); 49struct mii_bus *gfar_get_miibus(const struct gfar_private *priv);
50int __init gfar_mdio_init(void); 50int __init gfar_mdio_init(void);
51void gfar_mdio_exit(void); 51void gfar_mdio_exit(void);
52
53void gfar_mdio_bus_name(char *name, struct device_node *np);
52#endif /* GIANFAR_PHY_H */ 54#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 3199526bcecb..32200227c923 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -568,6 +568,19 @@ static void set_rx_mode(struct net_device *dev);
568static const struct ethtool_ops ethtool_ops; 568static const struct ethtool_ops ethtool_ops;
569static const struct ethtool_ops ethtool_ops_no_mii; 569static const struct ethtool_ops ethtool_ops_no_mii;
570 570
571static const struct net_device_ops hamachi_netdev_ops = {
572 .ndo_open = hamachi_open,
573 .ndo_stop = hamachi_close,
574 .ndo_start_xmit = hamachi_start_xmit,
575 .ndo_get_stats = hamachi_get_stats,
576 .ndo_set_multicast_list = set_rx_mode,
577 .ndo_change_mtu = eth_change_mtu,
578 .ndo_validate_addr = eth_validate_addr,
579 .ndo_tx_timeout = hamachi_tx_timeout,
580 .ndo_do_ioctl = netdev_ioctl,
581};
582
583
571static int __devinit hamachi_init_one (struct pci_dev *pdev, 584static int __devinit hamachi_init_one (struct pci_dev *pdev,
572 const struct pci_device_id *ent) 585 const struct pci_device_id *ent)
573{ 586{
@@ -582,7 +595,6 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
582 void *ring_space; 595 void *ring_space;
583 dma_addr_t ring_dma; 596 dma_addr_t ring_dma;
584 int ret = -ENOMEM; 597 int ret = -ENOMEM;
585 DECLARE_MAC_BUF(mac);
586 598
587/* when built into the kernel, we only print version if device is found */ 599/* when built into the kernel, we only print version if device is found */
588#ifndef MODULE 600#ifndef MODULE
@@ -723,17 +735,11 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
723 735
724 736
725 /* The Hamachi-specific entries in the device structure. */ 737 /* The Hamachi-specific entries in the device structure. */
726 dev->open = &hamachi_open; 738 dev->netdev_ops = &hamachi_netdev_ops;
727 dev->hard_start_xmit = &hamachi_start_xmit;
728 dev->stop = &hamachi_close;
729 dev->get_stats = &hamachi_get_stats;
730 dev->set_multicast_list = &set_rx_mode;
731 dev->do_ioctl = &netdev_ioctl;
732 if (chip_tbl[hmp->chip_id].flags & CanHaveMII) 739 if (chip_tbl[hmp->chip_id].flags & CanHaveMII)
733 SET_ETHTOOL_OPS(dev, &ethtool_ops); 740 SET_ETHTOOL_OPS(dev, &ethtool_ops);
734 else 741 else
735 SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii); 742 SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii);
736 dev->tx_timeout = &hamachi_tx_timeout;
737 dev->watchdog_timeo = TX_TIMEOUT; 743 dev->watchdog_timeo = TX_TIMEOUT;
738 if (mtu) 744 if (mtu)
739 dev->mtu = mtu; 745 dev->mtu = mtu;
@@ -744,9 +750,9 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
744 goto err_out_unmap_rx; 750 goto err_out_unmap_rx;
745 } 751 }
746 752
747 printk(KERN_INFO "%s: %s type %x at %p, %s, IRQ %d.\n", 753 printk(KERN_INFO "%s: %s type %x at %p, %pM, IRQ %d.\n",
748 dev->name, chip_tbl[chip_id].name, readl(ioaddr + ChipRev), 754 dev->name, chip_tbl[chip_id].name, readl(ioaddr + ChipRev),
749 ioaddr, print_mac(mac, dev->dev_addr), irq); 755 ioaddr, dev->dev_addr, irq);
750 i = readb(ioaddr + PCIClkMeas); 756 i = readb(ioaddr + PCIClkMeas);
751 printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers " 757 printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers "
752 "%2.2x, LPA %4.4x.\n", 758 "%2.2x, LPA %4.4x.\n",
@@ -1646,7 +1652,6 @@ static int hamachi_rx(struct net_device *dev)
1646#endif /* RX_CHECKSUM */ 1652#endif /* RX_CHECKSUM */
1647 1653
1648 netif_rx(skb); 1654 netif_rx(skb);
1649 dev->last_rx = jiffies;
1650 hmp->stats.rx_packets++; 1655 hmp->stats.rx_packets++;
1651 } 1656 }
1652 entry = (++hmp->cur_rx) % RX_RING_SIZE; 1657 entry = (++hmp->cur_rx) % RX_RING_SIZE;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 0f501d2ca935..50f1e172ee8f 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -373,7 +373,6 @@ static void sp_bump(struct sixpack *sp, char cmd)
373 memcpy(ptr, sp->cooked_buf + 1, count); 373 memcpy(ptr, sp->cooked_buf + 1, count);
374 skb->protocol = ax25_type_trans(skb, sp->dev); 374 skb->protocol = ax25_type_trans(skb, sp->dev);
375 netif_rx(skb); 375 netif_rx(skb);
376 sp->dev->last_rx = jiffies;
377 sp->dev->stats.rx_packets++; 376 sp->dev->stats.rx_packets++;
378 377
379 return; 378 return;
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 00bc7fbb6b37..81a65e3a1c05 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -555,7 +555,6 @@ static void do_rxpacket(struct net_device *dev)
555 memcpy(cp, bc->hdlcrx.buf, pktlen - 1); 555 memcpy(cp, bc->hdlcrx.buf, pktlen - 1);
556 skb->protocol = ax25_type_trans(skb, dev); 556 skb->protocol = ax25_type_trans(skb, dev);
557 netif_rx(skb); 557 netif_rx(skb);
558 dev->last_rx = jiffies;
559 bc->stats.rx_packets++; 558 bc->stats.rx_packets++;
560} 559}
561 560
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 58f4b1d7bf1f..46f8f3390e7d 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -230,7 +230,6 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
230 230
231 skb->protocol = ax25_type_trans(skb, dev); 231 skb->protocol = ax25_type_trans(skb, dev);
232 netif_rx(skb); 232 netif_rx(skb);
233 dev->last_rx = jiffies;
234unlock: 233unlock:
235 234
236 rcu_read_unlock(); 235 rcu_read_unlock();
@@ -441,16 +440,15 @@ static int bpq_seq_show(struct seq_file *seq, void *v)
441 "dev ether destination accept from\n"); 440 "dev ether destination accept from\n");
442 else { 441 else {
443 const struct bpqdev *bpqdev = v; 442 const struct bpqdev *bpqdev = v;
444 DECLARE_MAC_BUF(mac);
445 443
446 seq_printf(seq, "%-5s %-10s %s ", 444 seq_printf(seq, "%-5s %-10s %pM ",
447 bpqdev->axdev->name, bpqdev->ethdev->name, 445 bpqdev->axdev->name, bpqdev->ethdev->name,
448 print_mac(mac, bpqdev->dest_addr)); 446 bpqdev->dest_addr);
449 447
450 if (is_multicast_ether_addr(bpqdev->acpt_addr)) 448 if (is_multicast_ether_addr(bpqdev->acpt_addr))
451 seq_printf(seq, "*\n"); 449 seq_printf(seq, "*\n");
452 else 450 else
453 seq_printf(seq, "%s\n", print_mac(mac, bpqdev->acpt_addr)); 451 seq_printf(seq, "%pM\n", bpqdev->acpt_addr);
454 452
455 } 453 }
456 return 0; 454 return 0;
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index e8cfadefa4b6..e67103396ed7 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -572,7 +572,7 @@ static int __init setup_adapter(int card_base, int type, int n)
572 priv->param.persist = 256; 572 priv->param.persist = 256;
573 priv->param.dma = -1; 573 priv->param.dma = -1;
574 INIT_WORK(&priv->rx_work, rx_bh); 574 INIT_WORK(&priv->rx_work, rx_bh);
575 dev->priv = priv; 575 dev->ml_priv = priv;
576 sprintf(dev->name, "dmascc%i", 2 * n + i); 576 sprintf(dev->name, "dmascc%i", 2 * n + i);
577 dev->base_addr = card_base; 577 dev->base_addr = card_base;
578 dev->irq = irq; 578 dev->irq = irq;
@@ -720,7 +720,7 @@ static int read_scc_data(struct scc_priv *priv)
720 720
721static int scc_open(struct net_device *dev) 721static int scc_open(struct net_device *dev)
722{ 722{
723 struct scc_priv *priv = dev->priv; 723 struct scc_priv *priv = dev->ml_priv;
724 struct scc_info *info = priv->info; 724 struct scc_info *info = priv->info;
725 int card_base = priv->card_base; 725 int card_base = priv->card_base;
726 726
@@ -862,7 +862,7 @@ static int scc_open(struct net_device *dev)
862 862
863static int scc_close(struct net_device *dev) 863static int scc_close(struct net_device *dev)
864{ 864{
865 struct scc_priv *priv = dev->priv; 865 struct scc_priv *priv = dev->ml_priv;
866 struct scc_info *info = priv->info; 866 struct scc_info *info = priv->info;
867 int card_base = priv->card_base; 867 int card_base = priv->card_base;
868 868
@@ -891,7 +891,7 @@ static int scc_close(struct net_device *dev)
891 891
892static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 892static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
893{ 893{
894 struct scc_priv *priv = dev->priv; 894 struct scc_priv *priv = dev->ml_priv;
895 895
896 switch (cmd) { 896 switch (cmd) {
897 case SIOCGSCCPARAM: 897 case SIOCGSCCPARAM:
@@ -918,7 +918,7 @@ static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
918 918
919static int scc_send_packet(struct sk_buff *skb, struct net_device *dev) 919static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
920{ 920{
921 struct scc_priv *priv = dev->priv; 921 struct scc_priv *priv = dev->ml_priv;
922 unsigned long flags; 922 unsigned long flags;
923 int i; 923 int i;
924 924
@@ -963,7 +963,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
963 963
964static struct net_device_stats *scc_get_stats(struct net_device *dev) 964static struct net_device_stats *scc_get_stats(struct net_device *dev)
965{ 965{
966 struct scc_priv *priv = dev->priv; 966 struct scc_priv *priv = dev->ml_priv;
967 967
968 return &priv->stats; 968 return &priv->stats;
969} 969}
@@ -1283,7 +1283,6 @@ static void rx_bh(struct work_struct *ugli_api)
1283 memcpy(&data[1], priv->rx_buf[i], cb); 1283 memcpy(&data[1], priv->rx_buf[i], cb);
1284 skb->protocol = ax25_type_trans(skb, priv->dev); 1284 skb->protocol = ax25_type_trans(skb, priv->dev);
1285 netif_rx(skb); 1285 netif_rx(skb);
1286 priv->dev->last_rx = jiffies;
1287 priv->stats.rx_packets++; 1286 priv->stats.rx_packets++;
1288 priv->stats.rx_bytes += cb; 1287 priv->stats.rx_bytes += cb;
1289 } 1288 }
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index c258a0586e61..8eba61a1d4ab 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -162,7 +162,6 @@ static void hdlc_rx_flag(struct net_device *dev, struct hdlcdrv_state *s)
162 memcpy(cp, s->hdlcrx.buffer, pkt_len - 1); 162 memcpy(cp, s->hdlcrx.buffer, pkt_len - 1);
163 skb->protocol = ax25_type_trans(skb, dev); 163 skb->protocol = ax25_type_trans(skb, dev);
164 netif_rx(skb); 164 netif_rx(skb);
165 dev->last_rx = jiffies;
166 s->stats.rx_packets++; 165 s->stats.rx_packets++;
167} 166}
168 167
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index b8e25c4624d2..bbdb311b8420 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -303,7 +303,6 @@ static void ax_bump(struct mkiss *ax)
303 memcpy(skb_put(skb,count), ax->rbuff, count); 303 memcpy(skb_put(skb,count), ax->rbuff, count);
304 skb->protocol = ax25_type_trans(skb, ax->dev); 304 skb->protocol = ax25_type_trans(skb, ax->dev);
305 netif_rx(skb); 305 netif_rx(skb);
306 ax->dev->last_rx = jiffies;
307 ax->stats.rx_packets++; 306 ax->stats.rx_packets++;
308 ax->stats.rx_bytes += count; 307 ax->stats.rx_bytes += count;
309 spin_unlock_bh(&ax->buflock); 308 spin_unlock_bh(&ax->buflock);
@@ -847,12 +846,13 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
847 unsigned int cmd, unsigned long arg) 846 unsigned int cmd, unsigned long arg)
848{ 847{
849 struct mkiss *ax = mkiss_get(tty); 848 struct mkiss *ax = mkiss_get(tty);
850 struct net_device *dev = ax->dev; 849 struct net_device *dev;
851 unsigned int tmp, err; 850 unsigned int tmp, err;
852 851
853 /* First make sure we're connected. */ 852 /* First make sure we're connected. */
854 if (ax == NULL) 853 if (ax == NULL)
855 return -ENXIO; 854 return -ENXIO;
855 dev = ax->dev;
856 856
857 switch (cmd) { 857 switch (cmd) {
858 case SIOCGIFNAME: 858 case SIOCGIFNAME:
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index c17e39bc5460..c011af7088ea 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1518,7 +1518,7 @@ static int scc_net_alloc(const char *name, struct scc_channel *scc)
1518 if (!dev) 1518 if (!dev)
1519 return -ENOMEM; 1519 return -ENOMEM;
1520 1520
1521 dev->priv = scc; 1521 dev->ml_priv = scc;
1522 scc->dev = dev; 1522 scc->dev = dev;
1523 spin_lock_init(&scc->lock); 1523 spin_lock_init(&scc->lock);
1524 init_timer(&scc->tx_t); 1524 init_timer(&scc->tx_t);
@@ -1575,7 +1575,7 @@ static void scc_net_setup(struct net_device *dev)
1575 1575
1576static int scc_net_open(struct net_device *dev) 1576static int scc_net_open(struct net_device *dev)
1577{ 1577{
1578 struct scc_channel *scc = (struct scc_channel *) dev->priv; 1578 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1579 1579
1580 if (!scc->init) 1580 if (!scc->init)
1581 return -EINVAL; 1581 return -EINVAL;
@@ -1593,7 +1593,7 @@ static int scc_net_open(struct net_device *dev)
1593 1593
1594static int scc_net_close(struct net_device *dev) 1594static int scc_net_close(struct net_device *dev)
1595{ 1595{
1596 struct scc_channel *scc = (struct scc_channel *) dev->priv; 1596 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1597 unsigned long flags; 1597 unsigned long flags;
1598 1598
1599 netif_stop_queue(dev); 1599 netif_stop_queue(dev);
@@ -1627,7 +1627,6 @@ static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
1627 skb->protocol = ax25_type_trans(skb, scc->dev); 1627 skb->protocol = ax25_type_trans(skb, scc->dev);
1628 1628
1629 netif_rx(skb); 1629 netif_rx(skb);
1630 scc->dev->last_rx = jiffies;
1631 return; 1630 return;
1632} 1631}
1633 1632
@@ -1635,7 +1634,7 @@ static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
1635 1634
1636static int scc_net_tx(struct sk_buff *skb, struct net_device *dev) 1635static int scc_net_tx(struct sk_buff *skb, struct net_device *dev)
1637{ 1636{
1638 struct scc_channel *scc = (struct scc_channel *) dev->priv; 1637 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1639 unsigned long flags; 1638 unsigned long flags;
1640 char kisscmd; 1639 char kisscmd;
1641 1640
@@ -1705,7 +1704,7 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1705 struct scc_mem_config memcfg; 1704 struct scc_mem_config memcfg;
1706 struct scc_hw_config hwcfg; 1705 struct scc_hw_config hwcfg;
1707 struct scc_calibrate cal; 1706 struct scc_calibrate cal;
1708 struct scc_channel *scc = (struct scc_channel *) dev->priv; 1707 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1709 int chan; 1708 int chan;
1710 unsigned char device_name[IFNAMSIZ]; 1709 unsigned char device_name[IFNAMSIZ];
1711 void __user *arg = ifr->ifr_data; 1710 void __user *arg = ifr->ifr_data;
@@ -1952,7 +1951,7 @@ static int scc_net_set_mac_address(struct net_device *dev, void *addr)
1952 1951
1953static struct net_device_stats *scc_net_get_stats(struct net_device *dev) 1952static struct net_device_stats *scc_net_get_stats(struct net_device *dev)
1954{ 1953{
1955 struct scc_channel *scc = (struct scc_channel *) dev->priv; 1954 struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
1956 1955
1957 scc->dev_stat.rx_errors = scc->stat.rxerrs + scc->stat.rx_over; 1956 scc->dev_stat.rx_errors = scc->stat.rxerrs + scc->stat.rx_over;
1958 scc->dev_stat.tx_errors = scc->stat.txerrs + scc->stat.tx_under; 1957 scc->dev_stat.tx_errors = scc->stat.txerrs + scc->stat.tx_under;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1c942862a3f4..5407f7486c9c 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -515,7 +515,6 @@ static inline void yam_rx_flag(struct net_device *dev, struct yam_port *yp)
515 memcpy(cp, yp->rx_buf, pkt_len - 1); 515 memcpy(cp, yp->rx_buf, pkt_len - 1);
516 skb->protocol = ax25_type_trans(skb, dev); 516 skb->protocol = ax25_type_trans(skb, dev);
517 netif_rx(skb); 517 netif_rx(skb);
518 dev->last_rx = jiffies;
519 ++yp->stats.rx_packets; 518 ++yp->stats.rx_packets;
520 } 519 }
521 } 520 }
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index c01e290d09d2..b507dbc16e62 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -158,6 +158,21 @@ out:
158} 158}
159#endif 159#endif
160 160
161static const struct net_device_ops hpp_netdev_ops = {
162 .ndo_open = hpp_open,
163 .ndo_stop = hpp_close,
164 .ndo_start_xmit = eip_start_xmit,
165 .ndo_tx_timeout = eip_tx_timeout,
166 .ndo_get_stats = eip_get_stats,
167 .ndo_set_multicast_list = eip_set_multicast_list,
168 .ndo_validate_addr = eth_validate_addr,
169 .ndo_change_mtu = eth_change_mtu,
170#ifdef CONFIG_NET_POLL_CONTROLLER
171 .ndo_poll_controller = eip_poll,
172#endif
173};
174
175
161/* Do the interesting part of the probe at a single address. */ 176/* Do the interesting part of the probe at a single address. */
162static int __init hpp_probe1(struct net_device *dev, int ioaddr) 177static int __init hpp_probe1(struct net_device *dev, int ioaddr)
163{ 178{
@@ -166,7 +181,6 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
166 const char name[] = "HP-PC-LAN+"; 181 const char name[] = "HP-PC-LAN+";
167 int mem_start; 182 int mem_start;
168 static unsigned version_printed; 183 static unsigned version_printed;
169 DECLARE_MAC_BUF(mac);
170 184
171 if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) 185 if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
172 return -EBUSY; 186 return -EBUSY;
@@ -193,7 +207,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
193 } 207 }
194 checksum += inb(ioaddr + 14); 208 checksum += inb(ioaddr + 14);
195 209
196 printk("%s", print_mac(mac, dev->dev_addr)); 210 printk("%pM", dev->dev_addr);
197 211
198 if (checksum != 0xff) { 212 if (checksum != 0xff) {
199 printk(" bad checksum %2.2x.\n", checksum); 213 printk(" bad checksum %2.2x.\n", checksum);
@@ -227,11 +241,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
227 /* Set the base address to point to the NIC, not the "real" base! */ 241 /* Set the base address to point to the NIC, not the "real" base! */
228 dev->base_addr = ioaddr + NIC_OFFSET; 242 dev->base_addr = ioaddr + NIC_OFFSET;
229 243
230 dev->open = &hpp_open; 244 dev->netdev_ops = &hpp_netdev_ops;
231 dev->stop = &hpp_close;
232#ifdef CONFIG_NET_POLL_CONTROLLER
233 dev->poll_controller = eip_poll;
234#endif
235 245
236 ei_status.name = name; 246 ei_status.name = name;
237 ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */ 247 ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
@@ -302,8 +312,7 @@ hpp_open(struct net_device *dev)
302 /* Select the operational page. */ 312 /* Select the operational page. */
303 outw(Perf_Page, ioaddr + HP_PAGING); 313 outw(Perf_Page, ioaddr + HP_PAGING);
304 314
305 eip_open(dev); 315 return eip_open(dev);
306 return 0;
307} 316}
308 317
309static int 318static int
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 0a8c64930ad3..5c4d78c1ff42 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -59,8 +59,6 @@ static unsigned int hppclan_portlist[] __initdata =
59 59
60static int hp_probe1(struct net_device *dev, int ioaddr); 60static int hp_probe1(struct net_device *dev, int ioaddr);
61 61
62static int hp_open(struct net_device *dev);
63static int hp_close(struct net_device *dev);
64static void hp_reset_8390(struct net_device *dev); 62static void hp_reset_8390(struct net_device *dev);
65static void hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, 63static void hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
66 int ring_page); 64 int ring_page);
@@ -127,7 +125,6 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
127 int i, retval, board_id, wordmode; 125 int i, retval, board_id, wordmode;
128 const char *name; 126 const char *name;
129 static unsigned version_printed; 127 static unsigned version_printed;
130 DECLARE_MAC_BUF(mac);
131 128
132 if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME)) 129 if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
133 return -EBUSY; 130 return -EBUSY;
@@ -161,7 +158,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
161 for(i = 0; i < ETHER_ADDR_LEN; i++) 158 for(i = 0; i < ETHER_ADDR_LEN; i++)
162 dev->dev_addr[i] = inb(ioaddr + i); 159 dev->dev_addr[i] = inb(ioaddr + i);
163 160
164 printk(" %s", print_mac(mac, dev->dev_addr)); 161 printk(" %pM", dev->dev_addr);
165 162
166 /* Snarf the interrupt now. Someday this could be moved to open(). */ 163 /* Snarf the interrupt now. Someday this could be moved to open(). */
167 if (dev->irq < 2) { 164 if (dev->irq < 2) {
@@ -199,11 +196,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
199 196
200 /* Set the base address to point to the NIC, not the "real" base! */ 197 /* Set the base address to point to the NIC, not the "real" base! */
201 dev->base_addr = ioaddr + NIC_OFFSET; 198 dev->base_addr = ioaddr + NIC_OFFSET;
202 dev->open = &hp_open; 199 dev->netdev_ops = &eip_netdev_ops;
203 dev->stop = &hp_close;
204#ifdef CONFIG_NET_POLL_CONTROLLER
205 dev->poll_controller = eip_poll;
206#endif
207 200
208 ei_status.name = name; 201 ei_status.name = name;
209 ei_status.word16 = wordmode; 202 ei_status.word16 = wordmode;
@@ -228,20 +221,6 @@ out:
228 return retval; 221 return retval;
229} 222}
230 223
231static int
232hp_open(struct net_device *dev)
233{
234 eip_open(dev);
235 return 0;
236}
237
238static int
239hp_close(struct net_device *dev)
240{
241 eip_close(dev);
242 return 0;
243}
244
245static void 224static void
246hp_reset_8390(struct net_device *dev) 225hp_reset_8390(struct net_device *dev)
247{ 226{
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 571dd80fb850..ebe7651fcb86 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1212,7 +1212,7 @@ static int hp100_init_rxpdl(struct net_device *dev,
1212 *(pdlptr + 2) = (u_int) virt_to_whatever(dev, pdlptr); /* Address Frag 1 */ 1212 *(pdlptr + 2) = (u_int) virt_to_whatever(dev, pdlptr); /* Address Frag 1 */
1213 *(pdlptr + 3) = 4; /* Length Frag 1 */ 1213 *(pdlptr + 3) = 4; /* Length Frag 1 */
1214 1214
1215 return ((((MAX_RX_FRAG * 2 + 2) + 3) / 4) * 4); 1215 return roundup(MAX_RX_FRAG * 2 + 2, 4);
1216} 1216}
1217 1217
1218 1218
@@ -1227,7 +1227,7 @@ static int hp100_init_txpdl(struct net_device *dev,
1227 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */ 1227 ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */
1228 ringptr->skb = (void *) NULL; 1228 ringptr->skb = (void *) NULL;
1229 1229
1230 return ((((MAX_TX_FRAG * 2 + 2) + 3) / 4) * 4); 1230 return roundup(MAX_TX_FRAG * 2 + 2, 4);
1231} 1231}
1232 1232
1233/* 1233/*
@@ -1256,7 +1256,7 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
1256 /* Note: This depends on the alloc_skb functions allocating more 1256 /* Note: This depends on the alloc_skb functions allocating more
1257 * space than requested, i.e. aligning to 16bytes */ 1257 * space than requested, i.e. aligning to 16bytes */
1258 1258
1259 ringptr->skb = dev_alloc_skb(((MAX_ETHER_SIZE + 2 + 3) / 4) * 4); 1259 ringptr->skb = dev_alloc_skb(roundup(MAX_ETHER_SIZE + 2, 4));
1260 1260
1261 if (NULL != ringptr->skb) { 1261 if (NULL != ringptr->skb) {
1262 /* 1262 /*
@@ -1279,7 +1279,7 @@ static int hp100_build_rx_pdl(hp100_ring_t * ringptr,
1279#ifdef HP100_DEBUG_BM 1279#ifdef HP100_DEBUG_BM
1280 printk("hp100: %s: build_rx_pdl: PDH@0x%x, skb->data (len %d) at 0x%x\n", 1280 printk("hp100: %s: build_rx_pdl: PDH@0x%x, skb->data (len %d) at 0x%x\n",
1281 dev->name, (u_int) ringptr->pdl, 1281 dev->name, (u_int) ringptr->pdl,
1282 ((MAX_ETHER_SIZE + 2 + 3) / 4) * 4, 1282 roundup(MAX_ETHER_SIZE + 2, 4),
1283 (unsigned int) ringptr->skb->data); 1283 (unsigned int) ringptr->skb->data);
1284#endif 1284#endif
1285 1285
@@ -1834,7 +1834,6 @@ static void hp100_rx(struct net_device *dev)
1834 ptr[9], ptr[10], ptr[11]); 1834 ptr[9], ptr[10], ptr[11]);
1835#endif 1835#endif
1836 netif_rx(skb); 1836 netif_rx(skb);
1837 dev->last_rx = jiffies;
1838 lp->stats.rx_packets++; 1837 lp->stats.rx_packets++;
1839 lp->stats.rx_bytes += pkt_len; 1838 lp->stats.rx_bytes += pkt_len;
1840 } 1839 }
@@ -1925,7 +1924,6 @@ static void hp100_rx_bm(struct net_device *dev)
1925 1924
1926 netif_rx(ptr->skb); /* Up and away... */ 1925 netif_rx(ptr->skb); /* Up and away... */
1927 1926
1928 dev->last_rx = jiffies;
1929 lp->stats.rx_packets++; 1927 lp->stats.rx_packets++;
1930 lp->stats.rx_bytes += pkt_len; 1928 lp->stats.rx_bytes += pkt_len;
1931 } 1929 }
@@ -2093,9 +2091,8 @@ static void hp100_set_multicast_list(struct net_device *dev)
2093 addrs = dmi->dmi_addr; 2091 addrs = dmi->dmi_addr;
2094 if ((*addrs & 0x01) == 0x01) { /* multicast address? */ 2092 if ((*addrs & 0x01) == 0x01) { /* multicast address? */
2095#ifdef HP100_DEBUG 2093#ifdef HP100_DEBUG
2096 DECLARE_MAC_BUF(mac); 2094 printk("hp100: %s: multicast = %pM, ",
2097 printk("hp100: %s: multicast = %s, ", 2095 dev->name, addrs);
2098 dev->name, print_mac(mac, addrs));
2099#endif 2096#endif
2100 for (j = idx = 0; j < 6; j++) { 2097 for (j = idx = 0; j < 6; j++) {
2101 idx ^= *addrs++ & 0x3f; 2098 idx ^= *addrs++ & 0x3f;
@@ -3057,12 +3054,3 @@ static void __exit hp100_module_exit(void)
3057 3054
3058module_init(hp100_module_init) 3055module_init(hp100_module_init)
3059module_exit(hp100_module_exit) 3056module_exit(hp100_module_exit)
3060
3061
3062/*
3063 * Local variables:
3064 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c hp100.c"
3065 * c-indent-level: 2
3066 * tab-width: 8
3067 * End:
3068 */
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index b96cf2dcb109..9cb38a8d4387 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -94,6 +94,21 @@ static int __devinit hydra_init_one(struct zorro_dev *z,
94 return 0; 94 return 0;
95} 95}
96 96
97static const struct net_device_ops hydra_netdev_ops = {
98 .ndo_open = hydra_open,
99 .ndo_stop = hydra_close,
100
101 .ndo_start_xmit = ei_start_xmit,
102 .ndo_tx_timeout = ei_tx_timeout,
103 .ndo_get_stats = ei_get_stats,
104 .ndo_set_multicast_list = ei_set_multicast_list,
105 .ndo_validate_addr = eth_validate_addr,
106 .ndo_change_mtu = eth_change_mtu,
107#ifdef CONFIG_NET_POLL_CONTROLLER
108 .ndo_poll_controller = ei_poll,
109#endif
110};
111
97static int __devinit hydra_init(struct zorro_dev *z) 112static int __devinit hydra_init(struct zorro_dev *z)
98{ 113{
99 struct net_device *dev; 114 struct net_device *dev;
@@ -103,14 +118,13 @@ static int __devinit hydra_init(struct zorro_dev *z)
103 int start_page, stop_page; 118 int start_page, stop_page;
104 int j; 119 int j;
105 int err; 120 int err;
106 DECLARE_MAC_BUF(mac);
107 121
108 static u32 hydra_offsets[16] = { 122 static u32 hydra_offsets[16] = {
109 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 123 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
110 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 124 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
111 }; 125 };
112 126
113 dev = ____alloc_ei_netdev(0); 127 dev = alloc_ei_netdev();
114 if (!dev) 128 if (!dev)
115 return -ENOMEM; 129 return -ENOMEM;
116 130
@@ -145,12 +159,8 @@ static int __devinit hydra_init(struct zorro_dev *z)
145 ei_status.block_output = &hydra_block_output; 159 ei_status.block_output = &hydra_block_output;
146 ei_status.get_8390_hdr = &hydra_get_8390_hdr; 160 ei_status.get_8390_hdr = &hydra_get_8390_hdr;
147 ei_status.reg_offset = hydra_offsets; 161 ei_status.reg_offset = hydra_offsets;
148 dev->open = &hydra_open;
149 dev->stop = &hydra_close;
150#ifdef CONFIG_NET_POLL_CONTROLLER
151 dev->poll_controller = __ei_poll;
152#endif
153 162
163 dev->netdev_ops = &hydra_netdev_ops;
154 __NS8390_init(dev, 0); 164 __NS8390_init(dev, 0);
155 165
156 err = register_netdev(dev); 166 err = register_netdev(dev);
@@ -163,8 +173,8 @@ static int __devinit hydra_init(struct zorro_dev *z)
163 zorro_set_drvdata(z, dev); 173 zorro_set_drvdata(z, dev);
164 174
165 printk(KERN_INFO "%s: Hydra at 0x%08lx, address " 175 printk(KERN_INFO "%s: Hydra at 0x%08lx, address "
166 "%s (hydra.c " HYDRA_VERSION ")\n", 176 "%pM (hydra.c " HYDRA_VERSION ")\n",
167 dev->name, z->resource.start, print_mac(mac, dev->dev_addr)); 177 dev->name, z->resource.start, dev->dev_addr);
168 178
169 return 0; 179 return 0;
170} 180}
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 901212aa37cb..87a706694fb3 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -396,9 +396,7 @@ static void emac_hash_mc(struct emac_instance *dev)
396 396
397 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) { 397 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
398 int slot, reg, mask; 398 int slot, reg, mask;
399 DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL, 399 DBG2(dev, "mc %pM" NL, dmi->dmi_addr);
400 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
401 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
402 400
403 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr)); 401 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr));
404 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot); 402 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
@@ -2865,11 +2863,8 @@ static int __devinit emac_probe(struct of_device *ofdev,
2865 wake_up_all(&emac_probe_wait); 2863 wake_up_all(&emac_probe_wait);
2866 2864
2867 2865
2868 printk(KERN_INFO 2866 printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
2869 "%s: EMAC-%d %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n", 2867 ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
2870 ndev->name, dev->cell_index, np->full_name,
2871 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2872 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2873 2868
2874 if (dev->phy_mode == PHY_MODE_SGMII) 2869 if (dev->phy_mode == PHY_MODE_SGMII)
2875 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name); 2870 printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name);
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index f02764725a22..5b5bf9f9861a 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -605,7 +605,6 @@ static void irqrx_handler(struct net_device *dev)
605 skb->ip_summed = CHECKSUM_NONE; 605 skb->ip_summed = CHECKSUM_NONE;
606 606
607 /* bookkeeping */ 607 /* bookkeeping */
608 dev->last_rx = jiffies;
609 dev->stats.rx_packets++; 608 dev->stats.rx_packets++;
610 dev->stats.rx_bytes += rda.length; 609 dev->stats.rx_bytes += rda.length;
611 610
@@ -914,7 +913,6 @@ static int __devinit ibmlana_init_one(struct device *kdev)
914 int base = 0, irq = 0, iobase = 0, memlen = 0; 913 int base = 0, irq = 0, iobase = 0, memlen = 0;
915 ibmlana_priv *priv; 914 ibmlana_priv *priv;
916 ibmlana_medium medium; 915 ibmlana_medium medium;
917 DECLARE_MAC_BUF(mac);
918 916
919 dev = alloc_etherdev(sizeof(ibmlana_priv)); 917 dev = alloc_etherdev(sizeof(ibmlana_priv));
920 if (!dev) 918 if (!dev)
@@ -990,10 +988,10 @@ static int __devinit ibmlana_init_one(struct device *kdev)
990 /* print config */ 988 /* print config */
991 989
992 printk(KERN_INFO "%s: IRQ %d, I/O %#lx, memory %#lx-%#lx, " 990 printk(KERN_INFO "%s: IRQ %d, I/O %#lx, memory %#lx-%#lx, "
993 "MAC address %s.\n", 991 "MAC address %pM.\n",
994 dev->name, priv->realirq, dev->base_addr, 992 dev->name, priv->realirq, dev->base_addr,
995 dev->mem_start, dev->mem_end - 1, 993 dev->mem_start, dev->mem_end - 1,
996 print_mac(mac, dev->dev_addr)); 994 dev->dev_addr);
997 printk(KERN_INFO "%s: %s medium\n", dev->name, MediaNames[priv->medium]); 995 printk(KERN_INFO "%s: %s medium\n", dev->name, MediaNames[priv->medium]);
998 996
999 /* reset board */ 997 /* reset board */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index c2d57f836088..1f055a955089 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -527,7 +527,7 @@ retry:
527 527
528static int ibmveth_open(struct net_device *netdev) 528static int ibmveth_open(struct net_device *netdev)
529{ 529{
530 struct ibmveth_adapter *adapter = netdev->priv; 530 struct ibmveth_adapter *adapter = netdev_priv(netdev);
531 u64 mac_address = 0; 531 u64 mac_address = 0;
532 int rxq_entries = 1; 532 int rxq_entries = 1;
533 unsigned long lpar_rc; 533 unsigned long lpar_rc;
@@ -666,7 +666,7 @@ static int ibmveth_open(struct net_device *netdev)
666 666
667static int ibmveth_close(struct net_device *netdev) 667static int ibmveth_close(struct net_device *netdev)
668{ 668{
669 struct ibmveth_adapter *adapter = netdev->priv; 669 struct ibmveth_adapter *adapter = netdev_priv(netdev);
670 long lpar_rc; 670 long lpar_rc;
671 671
672 ibmveth_debug_printk("close starting\n"); 672 ibmveth_debug_printk("close starting\n");
@@ -722,7 +722,7 @@ static u32 netdev_get_link(struct net_device *dev) {
722 722
723static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data) 723static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
724{ 724{
725 struct ibmveth_adapter *adapter = dev->priv; 725 struct ibmveth_adapter *adapter = netdev_priv(dev);
726 726
727 if (data) 727 if (data)
728 adapter->rx_csum = 1; 728 adapter->rx_csum = 1;
@@ -741,7 +741,7 @@ static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
741 741
742static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data) 742static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
743{ 743{
744 struct ibmveth_adapter *adapter = dev->priv; 744 struct ibmveth_adapter *adapter = netdev_priv(dev);
745 745
746 if (data) { 746 if (data) {
747 dev->features |= NETIF_F_IP_CSUM; 747 dev->features |= NETIF_F_IP_CSUM;
@@ -753,7 +753,7 @@ static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
753static int ibmveth_set_csum_offload(struct net_device *dev, u32 data, 753static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
754 void (*done) (struct net_device *, u32)) 754 void (*done) (struct net_device *, u32))
755{ 755{
756 struct ibmveth_adapter *adapter = dev->priv; 756 struct ibmveth_adapter *adapter = netdev_priv(dev);
757 u64 set_attr, clr_attr, ret_attr; 757 u64 set_attr, clr_attr, ret_attr;
758 long ret; 758 long ret;
759 int rc1 = 0, rc2 = 0; 759 int rc1 = 0, rc2 = 0;
@@ -805,7 +805,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
805 805
806static int ibmveth_set_rx_csum(struct net_device *dev, u32 data) 806static int ibmveth_set_rx_csum(struct net_device *dev, u32 data)
807{ 807{
808 struct ibmveth_adapter *adapter = dev->priv; 808 struct ibmveth_adapter *adapter = netdev_priv(dev);
809 809
810 if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum)) 810 if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum))
811 return 0; 811 return 0;
@@ -815,7 +815,7 @@ static int ibmveth_set_rx_csum(struct net_device *dev, u32 data)
815 815
816static int ibmveth_set_tx_csum(struct net_device *dev, u32 data) 816static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
817{ 817{
818 struct ibmveth_adapter *adapter = dev->priv; 818 struct ibmveth_adapter *adapter = netdev_priv(dev);
819 int rc = 0; 819 int rc = 0;
820 820
821 if (data && (dev->features & NETIF_F_IP_CSUM)) 821 if (data && (dev->features & NETIF_F_IP_CSUM))
@@ -833,7 +833,7 @@ static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
833 833
834static u32 ibmveth_get_rx_csum(struct net_device *dev) 834static u32 ibmveth_get_rx_csum(struct net_device *dev)
835{ 835{
836 struct ibmveth_adapter *adapter = dev->priv; 836 struct ibmveth_adapter *adapter = netdev_priv(dev);
837 return adapter->rx_csum; 837 return adapter->rx_csum;
838} 838}
839 839
@@ -862,7 +862,7 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
862 struct ethtool_stats *stats, u64 *data) 862 struct ethtool_stats *stats, u64 *data)
863{ 863{
864 int i; 864 int i;
865 struct ibmveth_adapter *adapter = dev->priv; 865 struct ibmveth_adapter *adapter = netdev_priv(dev);
866 866
867 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++) 867 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
868 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset); 868 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
@@ -889,7 +889,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
889 889
890static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) 890static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
891{ 891{
892 struct ibmveth_adapter *adapter = netdev->priv; 892 struct ibmveth_adapter *adapter = netdev_priv(netdev);
893 union ibmveth_buf_desc desc; 893 union ibmveth_buf_desc desc;
894 unsigned long lpar_rc; 894 unsigned long lpar_rc;
895 unsigned long correlator; 895 unsigned long correlator;
@@ -1014,7 +1014,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1014 netdev->stats.rx_packets++; 1014 netdev->stats.rx_packets++;
1015 netdev->stats.rx_bytes += length; 1015 netdev->stats.rx_bytes += length;
1016 frames_processed++; 1016 frames_processed++;
1017 netdev->last_rx = jiffies;
1018 } 1017 }
1019 } while (frames_processed < budget); 1018 } while (frames_processed < budget);
1020 1019
@@ -1029,7 +1028,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1029 1028
1030 ibmveth_assert(lpar_rc == H_SUCCESS); 1029 ibmveth_assert(lpar_rc == H_SUCCESS);
1031 1030
1032 netif_rx_complete(netdev, napi); 1031 netif_rx_complete(napi);
1033 1032
1034 if (ibmveth_rxq_pending_buffer(adapter) && 1033 if (ibmveth_rxq_pending_buffer(adapter) &&
1035 netif_rx_reschedule(netdev, napi)) { 1034 netif_rx_reschedule(netdev, napi)) {
@@ -1045,21 +1044,21 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1045static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) 1044static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1046{ 1045{
1047 struct net_device *netdev = dev_instance; 1046 struct net_device *netdev = dev_instance;
1048 struct ibmveth_adapter *adapter = netdev->priv; 1047 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1049 unsigned long lpar_rc; 1048 unsigned long lpar_rc;
1050 1049
1051 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 1050 if (netif_rx_schedule_prep(&adapter->napi)) {
1052 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1051 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1053 VIO_IRQ_DISABLE); 1052 VIO_IRQ_DISABLE);
1054 ibmveth_assert(lpar_rc == H_SUCCESS); 1053 ibmveth_assert(lpar_rc == H_SUCCESS);
1055 __netif_rx_schedule(netdev, &adapter->napi); 1054 __netif_rx_schedule(&adapter->napi);
1056 } 1055 }
1057 return IRQ_HANDLED; 1056 return IRQ_HANDLED;
1058} 1057}
1059 1058
1060static void ibmveth_set_multicast_list(struct net_device *netdev) 1059static void ibmveth_set_multicast_list(struct net_device *netdev)
1061{ 1060{
1062 struct ibmveth_adapter *adapter = netdev->priv; 1061 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1063 unsigned long lpar_rc; 1062 unsigned long lpar_rc;
1064 1063
1065 if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) { 1064 if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
@@ -1107,7 +1106,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1107 1106
1108static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 1107static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1109{ 1108{
1110 struct ibmveth_adapter *adapter = dev->priv; 1109 struct ibmveth_adapter *adapter = netdev_priv(dev);
1111 struct vio_dev *viodev = adapter->vdev; 1110 struct vio_dev *viodev = adapter->vdev;
1112 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH; 1111 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1113 int i; 1112 int i;
@@ -1159,7 +1158,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1159#ifdef CONFIG_NET_POLL_CONTROLLER 1158#ifdef CONFIG_NET_POLL_CONTROLLER
1160static void ibmveth_poll_controller(struct net_device *dev) 1159static void ibmveth_poll_controller(struct net_device *dev)
1161{ 1160{
1162 ibmveth_replenish_task(dev->priv); 1161 ibmveth_replenish_task(netdev_priv(dev));
1163 ibmveth_interrupt(dev->irq, dev); 1162 ibmveth_interrupt(dev->irq, dev);
1164} 1163}
1165#endif 1164#endif
@@ -1241,7 +1240,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1241 if(!netdev) 1240 if(!netdev)
1242 return -ENOMEM; 1241 return -ENOMEM;
1243 1242
1244 adapter = netdev->priv; 1243 adapter = netdev_priv(netdev);
1245 dev->dev.driver_data = netdev; 1244 dev->dev.driver_data = netdev;
1246 1245
1247 adapter->vdev = dev; 1246 adapter->vdev = dev;
@@ -1337,7 +1336,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
1337static int __devexit ibmveth_remove(struct vio_dev *dev) 1336static int __devexit ibmveth_remove(struct vio_dev *dev)
1338{ 1337{
1339 struct net_device *netdev = dev->dev.driver_data; 1338 struct net_device *netdev = dev->dev.driver_data;
1340 struct ibmveth_adapter *adapter = netdev->priv; 1339 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1341 int i; 1340 int i;
1342 1341
1343 for(i = 0; i<IbmVethNumBufferPools; i++) 1342 for(i = 0; i<IbmVethNumBufferPools; i++)
@@ -1371,13 +1370,12 @@ static int ibmveth_show(struct seq_file *seq, void *v)
1371 struct ibmveth_adapter *adapter = seq->private; 1370 struct ibmveth_adapter *adapter = seq->private;
1372 char *current_mac = ((char*) &adapter->netdev->dev_addr); 1371 char *current_mac = ((char*) &adapter->netdev->dev_addr);
1373 char *firmware_mac = ((char*) &adapter->mac_addr) ; 1372 char *firmware_mac = ((char*) &adapter->mac_addr) ;
1374 DECLARE_MAC_BUF(mac);
1375 1373
1376 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version); 1374 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1377 1375
1378 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address); 1376 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1379 seq_printf(seq, "Current MAC: %s\n", print_mac(mac, current_mac)); 1377 seq_printf(seq, "Current MAC: %pM\n", current_mac);
1380 seq_printf(seq, "Firmware MAC: %s\n", print_mac(mac, firmware_mac)); 1378 seq_printf(seq, "Firmware MAC: %pM\n", firmware_mac);
1381 1379
1382 seq_printf(seq, "\nAdapter Statistics:\n"); 1380 seq_printf(seq, "\nAdapter Statistics:\n");
1383 seq_printf(seq, " TX: vio_map_single failres: %ld\n", adapter->tx_map_failed); 1381 seq_printf(seq, " TX: vio_map_single failres: %ld\n", adapter->tx_map_failed);
@@ -1472,7 +1470,7 @@ const char * buf, size_t count)
1472 kobj); 1470 kobj);
1473 struct net_device *netdev = 1471 struct net_device *netdev =
1474 container_of(kobj->parent, struct device, kobj)->driver_data; 1472 container_of(kobj->parent, struct device, kobj)->driver_data;
1475 struct ibmveth_adapter *adapter = netdev->priv; 1473 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1476 long value = simple_strtol(buf, NULL, 10); 1474 long value = simple_strtol(buf, NULL, 10);
1477 long rc; 1475 long rc;
1478 1476
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index e4fbefc8c82f..60a263001933 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -137,18 +137,23 @@ resched:
137 137
138} 138}
139 139
140static const struct net_device_ops ifb_netdev_ops = {
141 .ndo_open = ifb_open,
142 .ndo_stop = ifb_close,
143 .ndo_start_xmit = ifb_xmit,
144 .ndo_validate_addr = eth_validate_addr,
145};
146
140static void ifb_setup(struct net_device *dev) 147static void ifb_setup(struct net_device *dev)
141{ 148{
142 /* Initialize the device structure. */ 149 /* Initialize the device structure. */
143 dev->hard_start_xmit = ifb_xmit;
144 dev->open = &ifb_open;
145 dev->stop = &ifb_close;
146 dev->destructor = free_netdev; 150 dev->destructor = free_netdev;
151 dev->netdev_ops = &ifb_netdev_ops;
147 152
148 /* Fill in device structure with ethernet-generic values. */ 153 /* Fill in device structure with ethernet-generic values. */
149 ether_setup(dev); 154 ether_setup(dev);
150 dev->tx_queue_len = TX_Q_LIMIT; 155 dev->tx_queue_len = TX_Q_LIMIT;
151 dev->change_mtu = NULL; 156
152 dev->flags |= IFF_NOARP; 157 dev->flags |= IFF_NOARP;
153 dev->flags &= ~IFF_MULTICAST; 158 dev->flags &= ~IFF_MULTICAST;
154 random_ether_addr(dev->dev_addr); 159 random_ether_addr(dev->dev_addr);
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index ce700689fb57..40d03426c122 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -168,18 +168,12 @@
168#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 168#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
169#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ 169#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
170#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ 170#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
171/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
172#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ 171#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
173#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ 172#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
174#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ 173#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
175#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ 174#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
176/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
177#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
178#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
179#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
180#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ 175#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
181#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ 176#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
182#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
183#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 177#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
184 178
185/* 179/*
@@ -329,6 +323,7 @@
329#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ 323#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
330#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ 324#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
331#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ 325#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
326#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
332/* Extended desc bits for Linksec and timesync */ 327/* Extended desc bits for Linksec and timesync */
333 328
334/* Transmit Control */ 329/* Transmit Control */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index e18747c70bec..97f0049a5d6b 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -50,13 +50,6 @@ void igb_remove_device(struct e1000_hw *hw)
50 kfree(hw->dev_spec); 50 kfree(hw->dev_spec);
51} 51}
52 52
53static void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
54{
55 struct igb_adapter *adapter = hw->back;
56
57 pci_read_config_word(adapter->pdev, reg, value);
58}
59
60static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 53static s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
61{ 54{
62 struct igb_adapter *adapter = hw->back; 55 struct igb_adapter *adapter = hw->back;
@@ -83,8 +76,8 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
83{ 76{
84 struct e1000_bus_info *bus = &hw->bus; 77 struct e1000_bus_info *bus = &hw->bus;
85 s32 ret_val; 78 s32 ret_val;
86 u32 status; 79 u32 reg;
87 u16 pcie_link_status, pci_header_type; 80 u16 pcie_link_status;
88 81
89 bus->type = e1000_bus_type_pci_express; 82 bus->type = e1000_bus_type_pci_express;
90 bus->speed = e1000_bus_speed_2500; 83 bus->speed = e1000_bus_speed_2500;
@@ -99,14 +92,8 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
99 PCIE_LINK_WIDTH_MASK) >> 92 PCIE_LINK_WIDTH_MASK) >>
100 PCIE_LINK_WIDTH_SHIFT); 93 PCIE_LINK_WIDTH_SHIFT);
101 94
102 igb_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type); 95 reg = rd32(E1000_STATUS);
103 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) { 96 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
104 status = rd32(E1000_STATUS);
105 bus->func = (status & E1000_STATUS_FUNC_MASK)
106 >> E1000_STATUS_FUNC_SHIFT;
107 } else {
108 bus->func = 0;
109 }
110 97
111 return 0; 98 return 0;
112} 99}
@@ -229,8 +216,8 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
229 if (!hw->mac.disable_av) 216 if (!hw->mac.disable_av)
230 rar_high |= E1000_RAH_AV; 217 rar_high |= E1000_RAH_AV;
231 218
232 array_wr32(E1000_RA, (index << 1), rar_low); 219 wr32(E1000_RAL(index), rar_low);
233 array_wr32(E1000_RA, ((index << 1) + 1), rar_high); 220 wr32(E1000_RAH(index), rar_high);
234} 221}
235 222
236/** 223/**
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 95523af26056..bdf5d839c4bf 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -221,6 +221,10 @@
221#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 221#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
222#define E1000_RA 0x05400 /* Receive Address - RW Array */ 222#define E1000_RA 0x05400 /* Receive Address - RW Array */
223#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ 223#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
224#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
225 (0x054E0 + ((_i - 16) * 8)))
226#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
227 (0x054E4 + ((_i - 16) * 8)))
224#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ 228#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
225#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */ 229#define E1000_VMD_CTL 0x0581C /* VMDq Control - RW */
226#define E1000_WUC 0x05800 /* Wakeup Control - RW */ 230#define E1000_WUC 0x05800 /* Wakeup Control - RW */
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 4ff6f0567f3f..5a27825cc48a 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -43,8 +43,6 @@ struct igb_adapter;
43#endif 43#endif
44 44
45/* Interrupt defines */ 45/* Interrupt defines */
46#define IGB_MAX_TX_CLEAN 72
47
48#define IGB_MIN_DYN_ITR 3000 46#define IGB_MIN_DYN_ITR 3000
49#define IGB_MAX_DYN_ITR 96000 47#define IGB_MAX_DYN_ITR 96000
50 48
@@ -127,7 +125,8 @@ struct igb_buffer {
127 /* TX */ 125 /* TX */
128 struct { 126 struct {
129 unsigned long time_stamp; 127 unsigned long time_stamp;
130 u32 length; 128 u16 length;
129 u16 next_to_watch;
131 }; 130 };
132 /* RX */ 131 /* RX */
133 struct { 132 struct {
@@ -160,7 +159,8 @@ struct igb_ring {
160 u16 itr_register; 159 u16 itr_register;
161 u16 cpu; 160 u16 cpu;
162 161
163 int queue_index; 162 u16 queue_index;
163 u16 reg_idx;
164 unsigned int total_bytes; 164 unsigned int total_bytes;
165 unsigned int total_packets; 165 unsigned int total_packets;
166 166
@@ -294,6 +294,8 @@ struct igb_adapter {
294 unsigned int lro_flushed; 294 unsigned int lro_flushed;
295 unsigned int lro_no_desc; 295 unsigned int lro_no_desc;
296#endif 296#endif
297 unsigned int tx_ring_count;
298 unsigned int rx_ring_count;
297}; 299};
298 300
299#define IGB_FLAG_HAS_MSI (1 << 0) 301#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -325,7 +327,41 @@ extern void igb_reset(struct igb_adapter *);
325extern int igb_set_spd_dplx(struct igb_adapter *, u16); 327extern int igb_set_spd_dplx(struct igb_adapter *, u16);
326extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *); 328extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *);
327extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *); 329extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *);
330extern void igb_free_tx_resources(struct igb_ring *);
331extern void igb_free_rx_resources(struct igb_ring *);
328extern void igb_update_stats(struct igb_adapter *); 332extern void igb_update_stats(struct igb_adapter *);
329extern void igb_set_ethtool_ops(struct net_device *); 333extern void igb_set_ethtool_ops(struct net_device *);
330 334
335static inline s32 igb_reset_phy(struct e1000_hw *hw)
336{
337 if (hw->phy.ops.reset_phy)
338 return hw->phy.ops.reset_phy(hw);
339
340 return 0;
341}
342
343static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
344{
345 if (hw->phy.ops.read_phy_reg)
346 return hw->phy.ops.read_phy_reg(hw, offset, data);
347
348 return 0;
349}
350
351static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
352{
353 if (hw->phy.ops.write_phy_reg)
354 return hw->phy.ops.write_phy_reg(hw, offset, data);
355
356 return 0;
357}
358
359static inline s32 igb_get_phy_info(struct e1000_hw *hw)
360{
361 if (hw->phy.ops.get_phy_info)
362 return hw->phy.ops.get_phy_info(hw);
363
364 return 0;
365}
366
331#endif /* _IGB_H_ */ 367#endif /* _IGB_H_ */
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 89964fa739a0..3c831f1472ad 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -101,8 +101,8 @@ static const struct igb_stats igb_gstrings_stats[] = {
101}; 101};
102 102
103#define IGB_QUEUE_STATS_LEN \ 103#define IGB_QUEUE_STATS_LEN \
104 ((((struct igb_adapter *)netdev->priv)->num_rx_queues + \ 104 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues + \
105 ((struct igb_adapter *)netdev->priv)->num_tx_queues) * \ 105 ((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
106 (sizeof(struct igb_queue_stats) / sizeof(u64))) 106 (sizeof(struct igb_queue_stats) / sizeof(u64)))
107#define IGB_GLOBAL_STATS_LEN \ 107#define IGB_GLOBAL_STATS_LEN \
108 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) 108 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
@@ -494,8 +494,6 @@ static void igb_get_regs(struct net_device *netdev,
494 494
495 /* These should probably be added to e1000_regs.h instead */ 495 /* These should probably be added to e1000_regs.h instead */
496 #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4)) 496 #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
497 #define E1000_RAL(_i) (0x05400 + ((_i) * 8))
498 #define E1000_RAH(_i) (0x05404 + ((_i) * 8))
499 #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) 497 #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
500 #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) 498 #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
501 #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) 499 #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -714,15 +712,13 @@ static void igb_get_ringparam(struct net_device *netdev,
714 struct ethtool_ringparam *ring) 712 struct ethtool_ringparam *ring)
715{ 713{
716 struct igb_adapter *adapter = netdev_priv(netdev); 714 struct igb_adapter *adapter = netdev_priv(netdev);
717 struct igb_ring *tx_ring = adapter->tx_ring;
718 struct igb_ring *rx_ring = adapter->rx_ring;
719 715
720 ring->rx_max_pending = IGB_MAX_RXD; 716 ring->rx_max_pending = IGB_MAX_RXD;
721 ring->tx_max_pending = IGB_MAX_TXD; 717 ring->tx_max_pending = IGB_MAX_TXD;
722 ring->rx_mini_max_pending = 0; 718 ring->rx_mini_max_pending = 0;
723 ring->rx_jumbo_max_pending = 0; 719 ring->rx_jumbo_max_pending = 0;
724 ring->rx_pending = rx_ring->count; 720 ring->rx_pending = adapter->rx_ring_count;
725 ring->tx_pending = tx_ring->count; 721 ring->tx_pending = adapter->tx_ring_count;
726 ring->rx_mini_pending = 0; 722 ring->rx_mini_pending = 0;
727 ring->rx_jumbo_pending = 0; 723 ring->rx_jumbo_pending = 0;
728} 724}
@@ -731,12 +727,9 @@ static int igb_set_ringparam(struct net_device *netdev,
731 struct ethtool_ringparam *ring) 727 struct ethtool_ringparam *ring)
732{ 728{
733 struct igb_adapter *adapter = netdev_priv(netdev); 729 struct igb_adapter *adapter = netdev_priv(netdev);
734 struct igb_buffer *old_buf; 730 struct igb_ring *temp_ring;
735 struct igb_buffer *old_rx_buf;
736 void *old_desc;
737 int i, err; 731 int i, err;
738 u32 new_rx_count, new_tx_count, old_size; 732 u32 new_rx_count, new_tx_count;
739 dma_addr_t old_dma;
740 733
741 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 734 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
742 return -EINVAL; 735 return -EINVAL;
@@ -749,12 +742,19 @@ static int igb_set_ringparam(struct net_device *netdev,
749 new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); 742 new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD);
750 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 743 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
751 744
752 if ((new_tx_count == adapter->tx_ring->count) && 745 if ((new_tx_count == adapter->tx_ring_count) &&
753 (new_rx_count == adapter->rx_ring->count)) { 746 (new_rx_count == adapter->rx_ring_count)) {
754 /* nothing to do */ 747 /* nothing to do */
755 return 0; 748 return 0;
756 } 749 }
757 750
751 if (adapter->num_tx_queues > adapter->num_rx_queues)
752 temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring));
753 else
754 temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring));
755 if (!temp_ring)
756 return -ENOMEM;
757
758 while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) 758 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
759 msleep(1); 759 msleep(1);
760 760
@@ -766,62 +766,55 @@ static int igb_set_ringparam(struct net_device *netdev,
766 * because the ISRs in MSI-X mode get passed pointers 766 * because the ISRs in MSI-X mode get passed pointers
767 * to the tx and rx ring structs. 767 * to the tx and rx ring structs.
768 */ 768 */
769 if (new_tx_count != adapter->tx_ring->count) { 769 if (new_tx_count != adapter->tx_ring_count) {
770 memcpy(temp_ring, adapter->tx_ring,
771 adapter->num_tx_queues * sizeof(struct igb_ring));
772
770 for (i = 0; i < adapter->num_tx_queues; i++) { 773 for (i = 0; i < adapter->num_tx_queues; i++) {
771 /* Save existing descriptor ring */ 774 temp_ring[i].count = new_tx_count;
772 old_buf = adapter->tx_ring[i].buffer_info; 775 err = igb_setup_tx_resources(adapter, &temp_ring[i]);
773 old_desc = adapter->tx_ring[i].desc;
774 old_size = adapter->tx_ring[i].size;
775 old_dma = adapter->tx_ring[i].dma;
776 /* Try to allocate a new one */
777 adapter->tx_ring[i].buffer_info = NULL;
778 adapter->tx_ring[i].desc = NULL;
779 adapter->tx_ring[i].count = new_tx_count;
780 err = igb_setup_tx_resources(adapter,
781 &adapter->tx_ring[i]);
782 if (err) { 776 if (err) {
783 /* Restore the old one so at least 777 while (i) {
784 the adapter still works, even if 778 i--;
785 we failed the request */ 779 igb_free_tx_resources(&temp_ring[i]);
786 adapter->tx_ring[i].buffer_info = old_buf; 780 }
787 adapter->tx_ring[i].desc = old_desc;
788 adapter->tx_ring[i].size = old_size;
789 adapter->tx_ring[i].dma = old_dma;
790 goto err_setup; 781 goto err_setup;
791 } 782 }
792 /* Free the old buffer manually */
793 vfree(old_buf);
794 pci_free_consistent(adapter->pdev, old_size,
795 old_desc, old_dma);
796 } 783 }
784
785 for (i = 0; i < adapter->num_tx_queues; i++)
786 igb_free_tx_resources(&adapter->tx_ring[i]);
787
788 memcpy(adapter->tx_ring, temp_ring,
789 adapter->num_tx_queues * sizeof(struct igb_ring));
790
791 adapter->tx_ring_count = new_tx_count;
797 } 792 }
798 793
799 if (new_rx_count != adapter->rx_ring->count) { 794 if (new_rx_count != adapter->rx_ring->count) {
800 for (i = 0; i < adapter->num_rx_queues; i++) { 795 memcpy(temp_ring, adapter->rx_ring,
796 adapter->num_rx_queues * sizeof(struct igb_ring));
801 797
802 old_rx_buf = adapter->rx_ring[i].buffer_info; 798 for (i = 0; i < adapter->num_rx_queues; i++) {
803 old_desc = adapter->rx_ring[i].desc; 799 temp_ring[i].count = new_rx_count;
804 old_size = adapter->rx_ring[i].size; 800 err = igb_setup_rx_resources(adapter, &temp_ring[i]);
805 old_dma = adapter->rx_ring[i].dma;
806
807 adapter->rx_ring[i].buffer_info = NULL;
808 adapter->rx_ring[i].desc = NULL;
809 adapter->rx_ring[i].dma = 0;
810 adapter->rx_ring[i].count = new_rx_count;
811 err = igb_setup_rx_resources(adapter,
812 &adapter->rx_ring[i]);
813 if (err) { 801 if (err) {
814 adapter->rx_ring[i].buffer_info = old_rx_buf; 802 while (i) {
815 adapter->rx_ring[i].desc = old_desc; 803 i--;
816 adapter->rx_ring[i].size = old_size; 804 igb_free_rx_resources(&temp_ring[i]);
817 adapter->rx_ring[i].dma = old_dma; 805 }
818 goto err_setup; 806 goto err_setup;
819 } 807 }
820 808
821 vfree(old_rx_buf);
822 pci_free_consistent(adapter->pdev, old_size, old_desc,
823 old_dma);
824 } 809 }
810
811 for (i = 0; i < adapter->num_rx_queues; i++)
812 igb_free_rx_resources(&adapter->rx_ring[i]);
813
814 memcpy(adapter->rx_ring, temp_ring,
815 adapter->num_rx_queues * sizeof(struct igb_ring));
816
817 adapter->rx_ring_count = new_rx_count;
825 } 818 }
826 819
827 err = 0; 820 err = 0;
@@ -830,6 +823,7 @@ err_setup:
830 igb_up(adapter); 823 igb_up(adapter);
831 824
832 clear_bit(__IGB_RESETTING, &adapter->state); 825 clear_bit(__IGB_RESETTING, &adapter->state);
826 vfree(temp_ring);
833 return err; 827 return err;
834} 828}
835 829
@@ -1343,8 +1337,9 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1343 wr32(E1000_RDLEN(0), rx_ring->size); 1337 wr32(E1000_RDLEN(0), rx_ring->size);
1344 wr32(E1000_RDH(0), 0); 1338 wr32(E1000_RDH(0), 0);
1345 wr32(E1000_RDT(0), 0); 1339 wr32(E1000_RDT(0), 0);
1340 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1346 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | 1341 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1347 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1342 E1000_RCTL_RDMTS_HALF |
1348 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1343 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1349 wr32(E1000_RCTL, rctl); 1344 wr32(E1000_RCTL, rctl);
1350 wr32(E1000_SRRCTL(0), 0); 1345 wr32(E1000_SRRCTL(0), 0);
@@ -1380,10 +1375,10 @@ static void igb_phy_disable_receiver(struct igb_adapter *adapter)
1380 struct e1000_hw *hw = &adapter->hw; 1375 struct e1000_hw *hw = &adapter->hw;
1381 1376
1382 /* Write out to PHY registers 29 and 30 to disable the Receiver. */ 1377 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1383 hw->phy.ops.write_phy_reg(hw, 29, 0x001F); 1378 igb_write_phy_reg(hw, 29, 0x001F);
1384 hw->phy.ops.write_phy_reg(hw, 30, 0x8FFC); 1379 igb_write_phy_reg(hw, 30, 0x8FFC);
1385 hw->phy.ops.write_phy_reg(hw, 29, 0x001A); 1380 igb_write_phy_reg(hw, 29, 0x001A);
1386 hw->phy.ops.write_phy_reg(hw, 30, 0x8FF0); 1381 igb_write_phy_reg(hw, 30, 0x8FF0);
1387} 1382}
1388 1383
1389static int igb_integrated_phy_loopback(struct igb_adapter *adapter) 1384static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
@@ -1396,17 +1391,17 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1396 1391
1397 if (hw->phy.type == e1000_phy_m88) { 1392 if (hw->phy.type == e1000_phy_m88) {
1398 /* Auto-MDI/MDIX Off */ 1393 /* Auto-MDI/MDIX Off */
1399 hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 1394 igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1400 /* reset to update Auto-MDI/MDIX */ 1395 /* reset to update Auto-MDI/MDIX */
1401 hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x9140); 1396 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1402 /* autoneg off */ 1397 /* autoneg off */
1403 hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x8140); 1398 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1404 } 1399 }
1405 1400
1406 ctrl_reg = rd32(E1000_CTRL); 1401 ctrl_reg = rd32(E1000_CTRL);
1407 1402
1408 /* force 1000, set loopback */ 1403 /* force 1000, set loopback */
1409 hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x4140); 1404 igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
1410 1405
1411 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1406 /* Now set up the MAC to the same speed/duplex as the PHY. */
1412 ctrl_reg = rd32(E1000_CTRL); 1407 ctrl_reg = rd32(E1000_CTRL);
@@ -1500,10 +1495,10 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
1500 wr32(E1000_RCTL, rctl); 1495 wr32(E1000_RCTL, rctl);
1501 1496
1502 hw->mac.autoneg = true; 1497 hw->mac.autoneg = true;
1503 hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_reg); 1498 igb_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
1504 if (phy_reg & MII_CR_LOOPBACK) { 1499 if (phy_reg & MII_CR_LOOPBACK) {
1505 phy_reg &= ~MII_CR_LOOPBACK; 1500 phy_reg &= ~MII_CR_LOOPBACK;
1506 hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_reg); 1501 igb_write_phy_reg(hw, PHY_CONTROL, phy_reg);
1507 igb_phy_sw_reset(hw); 1502 igb_phy_sw_reset(hw);
1508 } 1503 }
1509} 1504}
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 20d27e622ec1..022794e579c7 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -42,6 +42,7 @@
42#include <linux/delay.h> 42#include <linux/delay.h>
43#include <linux/interrupt.h> 43#include <linux/interrupt.h>
44#include <linux/if_ether.h> 44#include <linux/if_ether.h>
45#include <linux/aer.h>
45#ifdef CONFIG_IGB_DCA 46#ifdef CONFIG_IGB_DCA
46#include <linux/dca.h> 47#include <linux/dca.h>
47#endif 48#endif
@@ -76,8 +77,6 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
76static int igb_setup_all_rx_resources(struct igb_adapter *); 77static int igb_setup_all_rx_resources(struct igb_adapter *);
77static void igb_free_all_tx_resources(struct igb_adapter *); 78static void igb_free_all_tx_resources(struct igb_adapter *);
78static void igb_free_all_rx_resources(struct igb_adapter *); 79static void igb_free_all_rx_resources(struct igb_adapter *);
79static void igb_free_tx_resources(struct igb_ring *);
80static void igb_free_rx_resources(struct igb_ring *);
81void igb_update_stats(struct igb_adapter *); 80void igb_update_stats(struct igb_adapter *);
82static int igb_probe(struct pci_dev *, const struct pci_device_id *); 81static int igb_probe(struct pci_dev *, const struct pci_device_id *);
83static void __devexit igb_remove(struct pci_dev *pdev); 82static void __devexit igb_remove(struct pci_dev *pdev);
@@ -232,6 +231,40 @@ static void __exit igb_exit_module(void)
232 231
233module_exit(igb_exit_module); 232module_exit(igb_exit_module);
234 233
234#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
235/**
236 * igb_cache_ring_register - Descriptor ring to register mapping
237 * @adapter: board private structure to initialize
238 *
239 * Once we know the feature-set enabled for the device, we'll cache
240 * the register offset the descriptor ring is assigned to.
241 **/
242static void igb_cache_ring_register(struct igb_adapter *adapter)
243{
244 int i;
245
246 switch (adapter->hw.mac.type) {
247 case e1000_82576:
248 /* The queues are allocated for virtualization such that VF 0
249 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
250 * In order to avoid collision we start at the first free queue
251 * and continue consuming queues in the same sequence
252 */
253 for (i = 0; i < adapter->num_rx_queues; i++)
254 adapter->rx_ring[i].reg_idx = Q_IDX_82576(i);
255 for (i = 0; i < adapter->num_tx_queues; i++)
256 adapter->tx_ring[i].reg_idx = Q_IDX_82576(i);
257 break;
258 case e1000_82575:
259 default:
260 for (i = 0; i < adapter->num_rx_queues; i++)
261 adapter->rx_ring[i].reg_idx = i;
262 for (i = 0; i < adapter->num_tx_queues; i++)
263 adapter->tx_ring[i].reg_idx = i;
264 break;
265 }
266}
267
235/** 268/**
236 * igb_alloc_queues - Allocate memory for all rings 269 * igb_alloc_queues - Allocate memory for all rings
237 * @adapter: board private structure to initialize 270 * @adapter: board private structure to initialize
@@ -259,11 +292,13 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
259 292
260 for (i = 0; i < adapter->num_tx_queues; i++) { 293 for (i = 0; i < adapter->num_tx_queues; i++) {
261 struct igb_ring *ring = &(adapter->tx_ring[i]); 294 struct igb_ring *ring = &(adapter->tx_ring[i]);
295 ring->count = adapter->tx_ring_count;
262 ring->adapter = adapter; 296 ring->adapter = adapter;
263 ring->queue_index = i; 297 ring->queue_index = i;
264 } 298 }
265 for (i = 0; i < adapter->num_rx_queues; i++) { 299 for (i = 0; i < adapter->num_rx_queues; i++) {
266 struct igb_ring *ring = &(adapter->rx_ring[i]); 300 struct igb_ring *ring = &(adapter->rx_ring[i]);
301 ring->count = adapter->rx_ring_count;
267 ring->adapter = adapter; 302 ring->adapter = adapter;
268 ring->queue_index = i; 303 ring->queue_index = i;
269 ring->itr_register = E1000_ITR; 304 ring->itr_register = E1000_ITR;
@@ -271,6 +306,8 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
271 /* set a default napi handler for each rx_ring */ 306 /* set a default napi handler for each rx_ring */
272 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); 307 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
273 } 308 }
309
310 igb_cache_ring_register(adapter);
274 return 0; 311 return 0;
275} 312}
276 313
@@ -311,36 +348,36 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 348 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
312 break; 349 break;
313 case e1000_82576: 350 case e1000_82576:
314 /* The 82576 uses a table-based method for assigning vectors. 351 /* 82576 uses a table-based method for assigning vectors.
315 Each queue has a single entry in the table to which we write 352 Each queue has a single entry in the table to which we write
316 a vector number along with a "valid" bit. Sadly, the layout 353 a vector number along with a "valid" bit. Sadly, the layout
317 of the table is somewhat counterintuitive. */ 354 of the table is somewhat counterintuitive. */
318 if (rx_queue > IGB_N0_QUEUE) { 355 if (rx_queue > IGB_N0_QUEUE) {
319 index = (rx_queue & 0x7); 356 index = (rx_queue >> 1);
320 ivar = array_rd32(E1000_IVAR0, index); 357 ivar = array_rd32(E1000_IVAR0, index);
321 if (rx_queue < 8) { 358 if (rx_queue & 0x1) {
322 /* vector goes into low byte of register */
323 ivar = ivar & 0xFFFFFF00;
324 ivar |= msix_vector | E1000_IVAR_VALID;
325 } else {
326 /* vector goes into third byte of register */ 359 /* vector goes into third byte of register */
327 ivar = ivar & 0xFF00FFFF; 360 ivar = ivar & 0xFF00FFFF;
328 ivar |= (msix_vector | E1000_IVAR_VALID) << 16; 361 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
362 } else {
363 /* vector goes into low byte of register */
364 ivar = ivar & 0xFFFFFF00;
365 ivar |= msix_vector | E1000_IVAR_VALID;
329 } 366 }
330 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector; 367 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
331 array_wr32(E1000_IVAR0, index, ivar); 368 array_wr32(E1000_IVAR0, index, ivar);
332 } 369 }
333 if (tx_queue > IGB_N0_QUEUE) { 370 if (tx_queue > IGB_N0_QUEUE) {
334 index = (tx_queue & 0x7); 371 index = (tx_queue >> 1);
335 ivar = array_rd32(E1000_IVAR0, index); 372 ivar = array_rd32(E1000_IVAR0, index);
336 if (tx_queue < 8) { 373 if (tx_queue & 0x1) {
337 /* vector goes into second byte of register */
338 ivar = ivar & 0xFFFF00FF;
339 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
340 } else {
341 /* vector goes into high byte of register */ 374 /* vector goes into high byte of register */
342 ivar = ivar & 0x00FFFFFF; 375 ivar = ivar & 0x00FFFFFF;
343 ivar |= (msix_vector | E1000_IVAR_VALID) << 24; 376 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
377 } else {
378 /* vector goes into second byte of register */
379 ivar = ivar & 0xFFFF00FF;
380 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
344 } 381 }
345 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector; 382 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
346 array_wr32(E1000_IVAR0, index, ivar); 383 array_wr32(E1000_IVAR0, index, ivar);
@@ -445,7 +482,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
445 482
446 for (i = 0; i < adapter->num_tx_queues; i++) { 483 for (i = 0; i < adapter->num_tx_queues; i++) {
447 struct igb_ring *ring = &(adapter->tx_ring[i]); 484 struct igb_ring *ring = &(adapter->tx_ring[i]);
448 sprintf(ring->name, "%s-tx%d", netdev->name, i); 485 sprintf(ring->name, "%s-tx-%d", netdev->name, i);
449 err = request_irq(adapter->msix_entries[vector].vector, 486 err = request_irq(adapter->msix_entries[vector].vector,
450 &igb_msix_tx, 0, ring->name, 487 &igb_msix_tx, 0, ring->name,
451 &(adapter->tx_ring[i])); 488 &(adapter->tx_ring[i]));
@@ -458,7 +495,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
458 for (i = 0; i < adapter->num_rx_queues; i++) { 495 for (i = 0; i < adapter->num_rx_queues; i++) {
459 struct igb_ring *ring = &(adapter->rx_ring[i]); 496 struct igb_ring *ring = &(adapter->rx_ring[i]);
460 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 497 if (strlen(netdev->name) < (IFNAMSIZ - 5))
461 sprintf(ring->name, "%s-rx%d", netdev->name, i); 498 sprintf(ring->name, "%s-rx-%d", netdev->name, i);
462 else 499 else
463 memcpy(ring->name, netdev->name, IFNAMSIZ); 500 memcpy(ring->name, netdev->name, IFNAMSIZ);
464 err = request_irq(adapter->msix_entries[vector].vector, 501 err = request_irq(adapter->msix_entries[vector].vector,
@@ -931,8 +968,7 @@ void igb_reset(struct igb_adapter *adapter)
931 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 968 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
932 969
933 igb_reset_adaptive(&adapter->hw); 970 igb_reset_adaptive(&adapter->hw);
934 if (adapter->hw.phy.ops.get_phy_info) 971 igb_get_phy_info(&adapter->hw);
935 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
936} 972}
937 973
938/** 974/**
@@ -950,6 +986,25 @@ static int igb_is_need_ioport(struct pci_dev *pdev)
950 } 986 }
951} 987}
952 988
989static const struct net_device_ops igb_netdev_ops = {
990 .ndo_open = igb_open,
991 .ndo_stop = igb_close,
992 .ndo_start_xmit = igb_xmit_frame_adv,
993 .ndo_get_stats = igb_get_stats,
994 .ndo_set_multicast_list = igb_set_multi,
995 .ndo_set_mac_address = igb_set_mac,
996 .ndo_change_mtu = igb_change_mtu,
997 .ndo_do_ioctl = igb_ioctl,
998 .ndo_tx_timeout = igb_tx_timeout,
999 .ndo_validate_addr = eth_validate_addr,
1000 .ndo_vlan_rx_register = igb_vlan_rx_register,
1001 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1002 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1003#ifdef CONFIG_NET_POLL_CONTROLLER
1004 .ndo_poll_controller = igb_netpoll,
1005#endif
1006};
1007
953/** 1008/**
954 * igb_probe - Device Initialization Routine 1009 * igb_probe - Device Initialization Routine
955 * @pdev: PCI device information struct 1010 * @pdev: PCI device information struct
@@ -1031,6 +1086,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1031 if (err) 1086 if (err)
1032 goto err_pci_reg; 1087 goto err_pci_reg;
1033 1088
1089 err = pci_enable_pcie_error_reporting(pdev);
1090 if (err) {
1091 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1092 "0x%x\n", err);
1093 /* non-fatal, continue */
1094 }
1095
1034 pci_set_master(pdev); 1096 pci_set_master(pdev);
1035 pci_save_state(pdev); 1097 pci_save_state(pdev);
1036 1098
@@ -1059,23 +1121,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1059 if (!adapter->hw.hw_addr) 1121 if (!adapter->hw.hw_addr)
1060 goto err_ioremap; 1122 goto err_ioremap;
1061 1123
1062 netdev->open = &igb_open; 1124 netdev->netdev_ops = &igb_netdev_ops;
1063 netdev->stop = &igb_close;
1064 netdev->get_stats = &igb_get_stats;
1065 netdev->set_multicast_list = &igb_set_multi;
1066 netdev->set_mac_address = &igb_set_mac;
1067 netdev->change_mtu = &igb_change_mtu;
1068 netdev->do_ioctl = &igb_ioctl;
1069 igb_set_ethtool_ops(netdev); 1125 igb_set_ethtool_ops(netdev);
1070 netdev->tx_timeout = &igb_tx_timeout;
1071 netdev->watchdog_timeo = 5 * HZ; 1126 netdev->watchdog_timeo = 5 * HZ;
1072 netdev->vlan_rx_register = igb_vlan_rx_register;
1073 netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
1074 netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
1075#ifdef CONFIG_NET_POLL_CONTROLLER
1076 netdev->poll_controller = igb_netpoll;
1077#endif
1078 netdev->hard_start_xmit = &igb_xmit_frame_adv;
1079 1127
1080 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1128 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1081 1129
@@ -1275,16 +1323,14 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1275 1323
1276 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 1324 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1277 /* print bus type/speed/width info */ 1325 /* print bus type/speed/width info */
1278 dev_info(&pdev->dev, 1326 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1279 "%s: (PCIe:%s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
1280 netdev->name, 1327 netdev->name,
1281 ((hw->bus.speed == e1000_bus_speed_2500) 1328 ((hw->bus.speed == e1000_bus_speed_2500)
1282 ? "2.5Gb/s" : "unknown"), 1329 ? "2.5Gb/s" : "unknown"),
1283 ((hw->bus.width == e1000_bus_width_pcie_x4) 1330 ((hw->bus.width == e1000_bus_width_pcie_x4)
1284 ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1) 1331 ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1)
1285 ? "Width x1" : "unknown"), 1332 ? "Width x1" : "unknown"),
1286 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 1333 netdev->dev_addr);
1287 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
1288 1334
1289 igb_read_part_num(hw, &part_num); 1335 igb_read_part_num(hw, &part_num);
1290 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name, 1336 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
@@ -1302,7 +1348,7 @@ err_register:
1302 igb_release_hw_control(adapter); 1348 igb_release_hw_control(adapter);
1303err_eeprom: 1349err_eeprom:
1304 if (!igb_check_reset_block(hw)) 1350 if (!igb_check_reset_block(hw))
1305 hw->phy.ops.reset_phy(hw); 1351 igb_reset_phy(hw);
1306 1352
1307 if (hw->flash_address) 1353 if (hw->flash_address)
1308 iounmap(hw->flash_address); 1354 iounmap(hw->flash_address);
@@ -1338,6 +1384,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1338#ifdef CONFIG_IGB_DCA 1384#ifdef CONFIG_IGB_DCA
1339 struct e1000_hw *hw = &adapter->hw; 1385 struct e1000_hw *hw = &adapter->hw;
1340#endif 1386#endif
1387 int err;
1341 1388
1342 /* flush_scheduled work may reschedule our watchdog task, so 1389 /* flush_scheduled work may reschedule our watchdog task, so
1343 * explicitly disable watchdog tasks from being rescheduled */ 1390 * explicitly disable watchdog tasks from being rescheduled */
@@ -1362,9 +1409,8 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1362 1409
1363 unregister_netdev(netdev); 1410 unregister_netdev(netdev);
1364 1411
1365 if (adapter->hw.phy.ops.reset_phy && 1412 if (!igb_check_reset_block(&adapter->hw))
1366 !igb_check_reset_block(&adapter->hw)) 1413 igb_reset_phy(&adapter->hw);
1367 adapter->hw.phy.ops.reset_phy(&adapter->hw);
1368 1414
1369 igb_remove_device(&adapter->hw); 1415 igb_remove_device(&adapter->hw);
1370 igb_reset_interrupt_capability(adapter); 1416 igb_reset_interrupt_capability(adapter);
@@ -1378,6 +1424,11 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1378 1424
1379 free_netdev(netdev); 1425 free_netdev(netdev);
1380 1426
1427 err = pci_disable_pcie_error_reporting(pdev);
1428 if (err)
1429 dev_err(&pdev->dev,
1430 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
1431
1381 pci_disable_device(pdev); 1432 pci_disable_device(pdev);
1382} 1433}
1383 1434
@@ -1397,6 +1448,8 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1397 1448
1398 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); 1449 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1399 1450
1451 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1452 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1400 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1453 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1401 adapter->rx_ps_hdr_size = 0; /* disable packet split */ 1454 adapter->rx_ps_hdr_size = 0; /* disable packet split */
1402 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1455 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
@@ -1558,8 +1611,7 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
1558 memset(tx_ring->buffer_info, 0, size); 1611 memset(tx_ring->buffer_info, 0, size);
1559 1612
1560 /* round up to nearest 4K */ 1613 /* round up to nearest 4K */
1561 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc) 1614 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1562 + sizeof(u32);
1563 tx_ring->size = ALIGN(tx_ring->size, 4096); 1615 tx_ring->size = ALIGN(tx_ring->size, 4096);
1564 1616
1565 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1617 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
@@ -1618,43 +1670,37 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1618 **/ 1670 **/
1619static void igb_configure_tx(struct igb_adapter *adapter) 1671static void igb_configure_tx(struct igb_adapter *adapter)
1620{ 1672{
1621 u64 tdba, tdwba; 1673 u64 tdba;
1622 struct e1000_hw *hw = &adapter->hw; 1674 struct e1000_hw *hw = &adapter->hw;
1623 u32 tctl; 1675 u32 tctl;
1624 u32 txdctl, txctrl; 1676 u32 txdctl, txctrl;
1625 int i; 1677 int i, j;
1626 1678
1627 for (i = 0; i < adapter->num_tx_queues; i++) { 1679 for (i = 0; i < adapter->num_tx_queues; i++) {
1628 struct igb_ring *ring = &(adapter->tx_ring[i]); 1680 struct igb_ring *ring = &(adapter->tx_ring[i]);
1629 1681 j = ring->reg_idx;
1630 wr32(E1000_TDLEN(i), 1682 wr32(E1000_TDLEN(j),
1631 ring->count * sizeof(struct e1000_tx_desc)); 1683 ring->count * sizeof(struct e1000_tx_desc));
1632 tdba = ring->dma; 1684 tdba = ring->dma;
1633 wr32(E1000_TDBAL(i), 1685 wr32(E1000_TDBAL(j),
1634 tdba & 0x00000000ffffffffULL); 1686 tdba & 0x00000000ffffffffULL);
1635 wr32(E1000_TDBAH(i), tdba >> 32); 1687 wr32(E1000_TDBAH(j), tdba >> 32);
1636
1637 tdwba = ring->dma + ring->count * sizeof(struct e1000_tx_desc);
1638 tdwba |= 1; /* enable head wb */
1639 wr32(E1000_TDWBAL(i),
1640 tdwba & 0x00000000ffffffffULL);
1641 wr32(E1000_TDWBAH(i), tdwba >> 32);
1642 1688
1643 ring->head = E1000_TDH(i); 1689 ring->head = E1000_TDH(j);
1644 ring->tail = E1000_TDT(i); 1690 ring->tail = E1000_TDT(j);
1645 writel(0, hw->hw_addr + ring->tail); 1691 writel(0, hw->hw_addr + ring->tail);
1646 writel(0, hw->hw_addr + ring->head); 1692 writel(0, hw->hw_addr + ring->head);
1647 txdctl = rd32(E1000_TXDCTL(i)); 1693 txdctl = rd32(E1000_TXDCTL(j));
1648 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1694 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1649 wr32(E1000_TXDCTL(i), txdctl); 1695 wr32(E1000_TXDCTL(j), txdctl);
1650 1696
1651 /* Turn off Relaxed Ordering on head write-backs. The 1697 /* Turn off Relaxed Ordering on head write-backs. The
1652 * writebacks MUST be delivered in order or it will 1698 * writebacks MUST be delivered in order or it will
1653 * completely screw up our bookeeping. 1699 * completely screw up our bookeeping.
1654 */ 1700 */
1655 txctrl = rd32(E1000_DCA_TXCTRL(i)); 1701 txctrl = rd32(E1000_DCA_TXCTRL(j));
1656 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1702 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1657 wr32(E1000_DCA_TXCTRL(i), txctrl); 1703 wr32(E1000_DCA_TXCTRL(j), txctrl);
1658 } 1704 }
1659 1705
1660 1706
@@ -1771,14 +1817,14 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1771 struct e1000_hw *hw = &adapter->hw; 1817 struct e1000_hw *hw = &adapter->hw;
1772 u32 rctl; 1818 u32 rctl;
1773 u32 srrctl = 0; 1819 u32 srrctl = 0;
1774 int i; 1820 int i, j;
1775 1821
1776 rctl = rd32(E1000_RCTL); 1822 rctl = rd32(E1000_RCTL);
1777 1823
1778 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 1824 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1825 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1779 1826
1780 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 1827 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1781 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1782 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1828 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1783 1829
1784 /* 1830 /*
@@ -1788,38 +1834,26 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1788 */ 1834 */
1789 rctl |= E1000_RCTL_SECRC; 1835 rctl |= E1000_RCTL_SECRC;
1790 1836
1791 rctl &= ~E1000_RCTL_SBP; 1837 /*
1838 * disable store bad packets, long packet enable, and clear size bits.
1839 */
1840 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_LPE | E1000_RCTL_SZ_256);
1792 1841
1793 if (adapter->netdev->mtu <= ETH_DATA_LEN) 1842 if (adapter->netdev->mtu > ETH_DATA_LEN)
1794 rctl &= ~E1000_RCTL_LPE;
1795 else
1796 rctl |= E1000_RCTL_LPE; 1843 rctl |= E1000_RCTL_LPE;
1797 if (adapter->rx_buffer_len <= IGB_RXBUFFER_2048) { 1844
1798 /* Setup buffer sizes */ 1845 /* Setup buffer sizes */
1799 rctl &= ~E1000_RCTL_SZ_4096; 1846 switch (adapter->rx_buffer_len) {
1800 rctl |= E1000_RCTL_BSEX; 1847 case IGB_RXBUFFER_256:
1801 switch (adapter->rx_buffer_len) { 1848 rctl |= E1000_RCTL_SZ_256;
1802 case IGB_RXBUFFER_256: 1849 break;
1803 rctl |= E1000_RCTL_SZ_256; 1850 case IGB_RXBUFFER_512:
1804 rctl &= ~E1000_RCTL_BSEX; 1851 rctl |= E1000_RCTL_SZ_512;
1805 break; 1852 break;
1806 case IGB_RXBUFFER_512: 1853 default:
1807 rctl |= E1000_RCTL_SZ_512; 1854 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
1808 rctl &= ~E1000_RCTL_BSEX; 1855 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1809 break; 1856 break;
1810 case IGB_RXBUFFER_1024:
1811 rctl |= E1000_RCTL_SZ_1024;
1812 rctl &= ~E1000_RCTL_BSEX;
1813 break;
1814 case IGB_RXBUFFER_2048:
1815 default:
1816 rctl |= E1000_RCTL_SZ_2048;
1817 rctl &= ~E1000_RCTL_BSEX;
1818 break;
1819 }
1820 } else {
1821 rctl &= ~E1000_RCTL_BSEX;
1822 srrctl = adapter->rx_buffer_len >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1823 } 1857 }
1824 1858
1825 /* 82575 and greater support packet-split where the protocol 1859 /* 82575 and greater support packet-split where the protocol
@@ -1841,8 +1875,10 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
1841 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 1875 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1842 } 1876 }
1843 1877
1844 for (i = 0; i < adapter->num_rx_queues; i++) 1878 for (i = 0; i < adapter->num_rx_queues; i++) {
1845 wr32(E1000_SRRCTL(i), srrctl); 1879 j = adapter->rx_ring[i].reg_idx;
1880 wr32(E1000_SRRCTL(j), srrctl);
1881 }
1846 1882
1847 wr32(E1000_RCTL, rctl); 1883 wr32(E1000_RCTL, rctl);
1848} 1884}
@@ -1859,7 +1895,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1859 struct e1000_hw *hw = &adapter->hw; 1895 struct e1000_hw *hw = &adapter->hw;
1860 u32 rctl, rxcsum; 1896 u32 rctl, rxcsum;
1861 u32 rxdctl; 1897 u32 rxdctl;
1862 int i; 1898 int i, j;
1863 1899
1864 /* disable receives while setting up the descriptors */ 1900 /* disable receives while setting up the descriptors */
1865 rctl = rd32(E1000_RCTL); 1901 rctl = rd32(E1000_RCTL);
@@ -1874,25 +1910,26 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1874 * the Base and Length of the Rx Descriptor Ring */ 1910 * the Base and Length of the Rx Descriptor Ring */
1875 for (i = 0; i < adapter->num_rx_queues; i++) { 1911 for (i = 0; i < adapter->num_rx_queues; i++) {
1876 struct igb_ring *ring = &(adapter->rx_ring[i]); 1912 struct igb_ring *ring = &(adapter->rx_ring[i]);
1913 j = ring->reg_idx;
1877 rdba = ring->dma; 1914 rdba = ring->dma;
1878 wr32(E1000_RDBAL(i), 1915 wr32(E1000_RDBAL(j),
1879 rdba & 0x00000000ffffffffULL); 1916 rdba & 0x00000000ffffffffULL);
1880 wr32(E1000_RDBAH(i), rdba >> 32); 1917 wr32(E1000_RDBAH(j), rdba >> 32);
1881 wr32(E1000_RDLEN(i), 1918 wr32(E1000_RDLEN(j),
1882 ring->count * sizeof(union e1000_adv_rx_desc)); 1919 ring->count * sizeof(union e1000_adv_rx_desc));
1883 1920
1884 ring->head = E1000_RDH(i); 1921 ring->head = E1000_RDH(j);
1885 ring->tail = E1000_RDT(i); 1922 ring->tail = E1000_RDT(j);
1886 writel(0, hw->hw_addr + ring->tail); 1923 writel(0, hw->hw_addr + ring->tail);
1887 writel(0, hw->hw_addr + ring->head); 1924 writel(0, hw->hw_addr + ring->head);
1888 1925
1889 rxdctl = rd32(E1000_RXDCTL(i)); 1926 rxdctl = rd32(E1000_RXDCTL(j));
1890 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 1927 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1891 rxdctl &= 0xFFF00000; 1928 rxdctl &= 0xFFF00000;
1892 rxdctl |= IGB_RX_PTHRESH; 1929 rxdctl |= IGB_RX_PTHRESH;
1893 rxdctl |= IGB_RX_HTHRESH << 8; 1930 rxdctl |= IGB_RX_HTHRESH << 8;
1894 rxdctl |= IGB_RX_WTHRESH << 16; 1931 rxdctl |= IGB_RX_WTHRESH << 16;
1895 wr32(E1000_RXDCTL(i), rxdctl); 1932 wr32(E1000_RXDCTL(j), rxdctl);
1896#ifdef CONFIG_IGB_LRO 1933#ifdef CONFIG_IGB_LRO
1897 /* Intitial LRO Settings */ 1934 /* Intitial LRO Settings */
1898 ring->lro_mgr.max_aggr = MAX_LRO_AGGR; 1935 ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
@@ -1922,7 +1959,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1922 shift = 6; 1959 shift = 6;
1923 for (j = 0; j < (32 * 4); j++) { 1960 for (j = 0; j < (32 * 4); j++) {
1924 reta.bytes[j & 3] = 1961 reta.bytes[j & 3] =
1925 (j % adapter->num_rx_queues) << shift; 1962 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
1926 if ((j & 3) == 3) 1963 if ((j & 3) == 3)
1927 writel(reta.dword, 1964 writel(reta.dword,
1928 hw->hw_addr + E1000_RETA(0) + (j & ~3)); 1965 hw->hw_addr + E1000_RETA(0) + (j & ~3));
@@ -1984,7 +2021,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1984 * 2021 *
1985 * Free all transmit software resources 2022 * Free all transmit software resources
1986 **/ 2023 **/
1987static void igb_free_tx_resources(struct igb_ring *tx_ring) 2024void igb_free_tx_resources(struct igb_ring *tx_ring)
1988{ 2025{
1989 struct pci_dev *pdev = tx_ring->adapter->pdev; 2026 struct pci_dev *pdev = tx_ring->adapter->pdev;
1990 2027
@@ -2082,7 +2119,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2082 * 2119 *
2083 * Free all receive software resources 2120 * Free all receive software resources
2084 **/ 2121 **/
2085static void igb_free_rx_resources(struct igb_ring *rx_ring) 2122void igb_free_rx_resources(struct igb_ring *rx_ring)
2086{ 2123{
2087 struct pci_dev *pdev = rx_ring->adapter->pdev; 2124 struct pci_dev *pdev = rx_ring->adapter->pdev;
2088 2125
@@ -2274,8 +2311,7 @@ static void igb_set_multi(struct net_device *netdev)
2274static void igb_update_phy_info(unsigned long data) 2311static void igb_update_phy_info(unsigned long data)
2275{ 2312{
2276 struct igb_adapter *adapter = (struct igb_adapter *) data; 2313 struct igb_adapter *adapter = (struct igb_adapter *) data;
2277 if (adapter->hw.phy.ops.get_phy_info) 2314 igb_get_phy_info(&adapter->hw);
2278 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
2279} 2315}
2280 2316
2281/** 2317/**
@@ -2330,9 +2366,10 @@ static void igb_watchdog_task(struct work_struct *work)
2330 &adapter->link_duplex); 2366 &adapter->link_duplex);
2331 2367
2332 ctrl = rd32(E1000_CTRL); 2368 ctrl = rd32(E1000_CTRL);
2333 dev_info(&adapter->pdev->dev, 2369 /* Links status message must follow this format */
2334 "NIC Link is Up %d Mbps %s, " 2370 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2335 "Flow Control: %s\n", 2371 "Flow Control: %s\n",
2372 netdev->name,
2336 adapter->link_speed, 2373 adapter->link_speed,
2337 adapter->link_duplex == FULL_DUPLEX ? 2374 adapter->link_duplex == FULL_DUPLEX ?
2338 "Full Duplex" : "Half Duplex", 2375 "Full Duplex" : "Half Duplex",
@@ -2367,7 +2404,9 @@ static void igb_watchdog_task(struct work_struct *work)
2367 if (netif_carrier_ok(netdev)) { 2404 if (netif_carrier_ok(netdev)) {
2368 adapter->link_speed = 0; 2405 adapter->link_speed = 0;
2369 adapter->link_duplex = 0; 2406 adapter->link_duplex = 0;
2370 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); 2407 /* Links status message must follow this format */
2408 printk(KERN_INFO "igb: %s NIC Link is Down\n",
2409 netdev->name);
2371 netif_carrier_off(netdev); 2410 netif_carrier_off(netdev);
2372 netif_tx_stop_all_queues(netdev); 2411 netif_tx_stop_all_queues(netdev);
2373 if (!test_bit(__IGB_DOWN, &adapter->state)) 2412 if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -2703,6 +2742,7 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
2703 context_desc->seqnum_seed = 0; 2742 context_desc->seqnum_seed = 0;
2704 2743
2705 buffer_info->time_stamp = jiffies; 2744 buffer_info->time_stamp = jiffies;
2745 buffer_info->next_to_watch = i;
2706 buffer_info->dma = 0; 2746 buffer_info->dma = 0;
2707 i++; 2747 i++;
2708 if (i == tx_ring->count) 2748 if (i == tx_ring->count)
@@ -2766,6 +2806,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2766 cpu_to_le32(tx_ring->queue_index << 4); 2806 cpu_to_le32(tx_ring->queue_index << 4);
2767 2807
2768 buffer_info->time_stamp = jiffies; 2808 buffer_info->time_stamp = jiffies;
2809 buffer_info->next_to_watch = i;
2769 buffer_info->dma = 0; 2810 buffer_info->dma = 0;
2770 2811
2771 i++; 2812 i++;
@@ -2784,8 +2825,8 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2784#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) 2825#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
2785 2826
2786static inline int igb_tx_map_adv(struct igb_adapter *adapter, 2827static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2787 struct igb_ring *tx_ring, 2828 struct igb_ring *tx_ring, struct sk_buff *skb,
2788 struct sk_buff *skb) 2829 unsigned int first)
2789{ 2830{
2790 struct igb_buffer *buffer_info; 2831 struct igb_buffer *buffer_info;
2791 unsigned int len = skb_headlen(skb); 2832 unsigned int len = skb_headlen(skb);
@@ -2799,6 +2840,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2799 buffer_info->length = len; 2840 buffer_info->length = len;
2800 /* set time_stamp *before* dma to help avoid a possible race */ 2841 /* set time_stamp *before* dma to help avoid a possible race */
2801 buffer_info->time_stamp = jiffies; 2842 buffer_info->time_stamp = jiffies;
2843 buffer_info->next_to_watch = i;
2802 buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len, 2844 buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
2803 PCI_DMA_TODEVICE); 2845 PCI_DMA_TODEVICE);
2804 count++; 2846 count++;
@@ -2816,6 +2858,7 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2816 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 2858 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2817 buffer_info->length = len; 2859 buffer_info->length = len;
2818 buffer_info->time_stamp = jiffies; 2860 buffer_info->time_stamp = jiffies;
2861 buffer_info->next_to_watch = i;
2819 buffer_info->dma = pci_map_page(adapter->pdev, 2862 buffer_info->dma = pci_map_page(adapter->pdev,
2820 frag->page, 2863 frag->page,
2821 frag->page_offset, 2864 frag->page_offset,
@@ -2828,8 +2871,9 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2828 i = 0; 2871 i = 0;
2829 } 2872 }
2830 2873
2831 i = (i == 0) ? tx_ring->count - 1 : i - 1; 2874 i = ((i == 0) ? tx_ring->count - 1 : i - 1);
2832 tx_ring->buffer_info[i].skb = skb; 2875 tx_ring->buffer_info[i].skb = skb;
2876 tx_ring->buffer_info[first].next_to_watch = i;
2833 2877
2834 return count; 2878 return count;
2835} 2879}
@@ -2936,6 +2980,7 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2936 struct igb_ring *tx_ring) 2980 struct igb_ring *tx_ring)
2937{ 2981{
2938 struct igb_adapter *adapter = netdev_priv(netdev); 2982 struct igb_adapter *adapter = netdev_priv(netdev);
2983 unsigned int first;
2939 unsigned int tx_flags = 0; 2984 unsigned int tx_flags = 0;
2940 unsigned int len; 2985 unsigned int len;
2941 u8 hdr_len = 0; 2986 u8 hdr_len = 0;
@@ -2972,6 +3017,8 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2972 if (skb->protocol == htons(ETH_P_IP)) 3017 if (skb->protocol == htons(ETH_P_IP))
2973 tx_flags |= IGB_TX_FLAGS_IPV4; 3018 tx_flags |= IGB_TX_FLAGS_IPV4;
2974 3019
3020 first = tx_ring->next_to_use;
3021
2975 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, 3022 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
2976 &hdr_len) : 0; 3023 &hdr_len) : 0;
2977 3024
@@ -2987,7 +3034,7 @@ static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2987 tx_flags |= IGB_TX_FLAGS_CSUM; 3034 tx_flags |= IGB_TX_FLAGS_CSUM;
2988 3035
2989 igb_tx_queue_adv(adapter, tx_ring, tx_flags, 3036 igb_tx_queue_adv(adapter, tx_ring, tx_flags,
2990 igb_tx_map_adv(adapter, tx_ring, skb), 3037 igb_tx_map_adv(adapter, tx_ring, skb, first),
2991 skb->len, hdr_len); 3038 skb->len, hdr_len);
2992 3039
2993 netdev->trans_start = jiffies; 3040 netdev->trans_start = jiffies;
@@ -3249,7 +3296,7 @@ void igb_update_stats(struct igb_adapter *adapter)
3249 /* Phy Stats */ 3296 /* Phy Stats */
3250 if (hw->phy.media_type == e1000_media_type_copper) { 3297 if (hw->phy.media_type == e1000_media_type_copper) {
3251 if ((adapter->link_speed == SPEED_1000) && 3298 if ((adapter->link_speed == SPEED_1000) &&
3252 (!hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS, 3299 (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
3253 &phy_tmp))) { 3300 &phy_tmp))) {
3254 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 3301 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3255 adapter->phy_stats.idle_errors += phy_tmp; 3302 adapter->phy_stats.idle_errors += phy_tmp;
@@ -3332,7 +3379,6 @@ static void igb_write_itr(struct igb_ring *ring)
3332static irqreturn_t igb_msix_rx(int irq, void *data) 3379static irqreturn_t igb_msix_rx(int irq, void *data)
3333{ 3380{
3334 struct igb_ring *rx_ring = data; 3381 struct igb_ring *rx_ring = data;
3335 struct igb_adapter *adapter = rx_ring->adapter;
3336 3382
3337 /* Write the ITR value calculated at the end of the 3383 /* Write the ITR value calculated at the end of the
3338 * previous interrupt. 3384 * previous interrupt.
@@ -3340,11 +3386,11 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
3340 3386
3341 igb_write_itr(rx_ring); 3387 igb_write_itr(rx_ring);
3342 3388
3343 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi)) 3389 if (netif_rx_schedule_prep(&rx_ring->napi))
3344 __netif_rx_schedule(adapter->netdev, &rx_ring->napi); 3390 __netif_rx_schedule(&rx_ring->napi);
3345 3391
3346#ifdef CONFIG_IGB_DCA 3392#ifdef CONFIG_IGB_DCA
3347 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3393 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3348 igb_update_rx_dca(rx_ring); 3394 igb_update_rx_dca(rx_ring);
3349#endif 3395#endif
3350 return IRQ_HANDLED; 3396 return IRQ_HANDLED;
@@ -3357,7 +3403,7 @@ static void igb_update_rx_dca(struct igb_ring *rx_ring)
3357 struct igb_adapter *adapter = rx_ring->adapter; 3403 struct igb_adapter *adapter = rx_ring->adapter;
3358 struct e1000_hw *hw = &adapter->hw; 3404 struct e1000_hw *hw = &adapter->hw;
3359 int cpu = get_cpu(); 3405 int cpu = get_cpu();
3360 int q = rx_ring - adapter->rx_ring; 3406 int q = rx_ring->reg_idx;
3361 3407
3362 if (rx_ring->cpu != cpu) { 3408 if (rx_ring->cpu != cpu) {
3363 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); 3409 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
@@ -3384,7 +3430,7 @@ static void igb_update_tx_dca(struct igb_ring *tx_ring)
3384 struct igb_adapter *adapter = tx_ring->adapter; 3430 struct igb_adapter *adapter = tx_ring->adapter;
3385 struct e1000_hw *hw = &adapter->hw; 3431 struct e1000_hw *hw = &adapter->hw;
3386 int cpu = get_cpu(); 3432 int cpu = get_cpu();
3387 int q = tx_ring - adapter->tx_ring; 3433 int q = tx_ring->reg_idx;
3388 3434
3389 if (tx_ring->cpu != cpu) { 3435 if (tx_ring->cpu != cpu) {
3390 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); 3436 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
@@ -3493,7 +3539,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
3493 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3539 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3494 } 3540 }
3495 3541
3496 netif_rx_schedule(netdev, &adapter->rx_ring[0].napi); 3542 netif_rx_schedule(&adapter->rx_ring[0].napi);
3497 3543
3498 return IRQ_HANDLED; 3544 return IRQ_HANDLED;
3499} 3545}
@@ -3531,7 +3577,7 @@ static irqreturn_t igb_intr(int irq, void *data)
3531 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3577 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3532 } 3578 }
3533 3579
3534 netif_rx_schedule(netdev, &adapter->rx_ring[0].napi); 3580 netif_rx_schedule(&adapter->rx_ring[0].napi);
3535 3581
3536 return IRQ_HANDLED; 3582 return IRQ_HANDLED;
3537} 3583}
@@ -3566,7 +3612,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
3566 !netif_running(netdev)) { 3612 !netif_running(netdev)) {
3567 if (adapter->itr_setting & 3) 3613 if (adapter->itr_setting & 3)
3568 igb_set_itr(adapter); 3614 igb_set_itr(adapter);
3569 netif_rx_complete(netdev, napi); 3615 netif_rx_complete(napi);
3570 if (!test_bit(__IGB_DOWN, &adapter->state)) 3616 if (!test_bit(__IGB_DOWN, &adapter->state))
3571 igb_irq_enable(adapter); 3617 igb_irq_enable(adapter);
3572 return 0; 3618 return 0;
@@ -3592,7 +3638,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3592 3638
3593 /* If not enough Rx work done, exit the polling mode */ 3639 /* If not enough Rx work done, exit the polling mode */
3594 if ((work_done == 0) || !netif_running(netdev)) { 3640 if ((work_done == 0) || !netif_running(netdev)) {
3595 netif_rx_complete(netdev, napi); 3641 netif_rx_complete(napi);
3596 3642
3597 if (adapter->itr_setting & 3) { 3643 if (adapter->itr_setting & 3) {
3598 if (adapter->num_rx_queues == 1) 3644 if (adapter->num_rx_queues == 1)
@@ -3610,12 +3656,6 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3610 return 1; 3656 return 1;
3611} 3657}
3612 3658
3613static inline u32 get_head(struct igb_ring *tx_ring)
3614{
3615 void *end = (struct e1000_tx_desc *)tx_ring->desc + tx_ring->count;
3616 return le32_to_cpu(*(volatile __le32 *)end);
3617}
3618
3619/** 3659/**
3620 * igb_clean_tx_irq - Reclaim resources after transmit completes 3660 * igb_clean_tx_irq - Reclaim resources after transmit completes
3621 * @adapter: board private structure 3661 * @adapter: board private structure
@@ -3624,24 +3664,25 @@ static inline u32 get_head(struct igb_ring *tx_ring)
3624static bool igb_clean_tx_irq(struct igb_ring *tx_ring) 3664static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3625{ 3665{
3626 struct igb_adapter *adapter = tx_ring->adapter; 3666 struct igb_adapter *adapter = tx_ring->adapter;
3627 struct e1000_hw *hw = &adapter->hw;
3628 struct net_device *netdev = adapter->netdev; 3667 struct net_device *netdev = adapter->netdev;
3629 struct e1000_tx_desc *tx_desc; 3668 struct e1000_hw *hw = &adapter->hw;
3630 struct igb_buffer *buffer_info; 3669 struct igb_buffer *buffer_info;
3631 struct sk_buff *skb; 3670 struct sk_buff *skb;
3632 unsigned int i; 3671 union e1000_adv_tx_desc *tx_desc, *eop_desc;
3633 u32 head, oldhead;
3634 unsigned int count = 0;
3635 unsigned int total_bytes = 0, total_packets = 0; 3672 unsigned int total_bytes = 0, total_packets = 0;
3636 bool retval = true; 3673 unsigned int i, eop, count = 0;
3674 bool cleaned = false;
3637 3675
3638 rmb();
3639 head = get_head(tx_ring);
3640 i = tx_ring->next_to_clean; 3676 i = tx_ring->next_to_clean;
3641 while (1) { 3677 eop = tx_ring->buffer_info[i].next_to_watch;
3642 while (i != head) { 3678 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
3643 tx_desc = E1000_TX_DESC(*tx_ring, i); 3679
3680 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3681 (count < tx_ring->count)) {
3682 for (cleaned = false; !cleaned; count++) {
3683 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3644 buffer_info = &tx_ring->buffer_info[i]; 3684 buffer_info = &tx_ring->buffer_info[i];
3685 cleaned = (i == eop);
3645 skb = buffer_info->skb; 3686 skb = buffer_info->skb;
3646 3687
3647 if (skb) { 3688 if (skb) {
@@ -3656,25 +3697,17 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3656 } 3697 }
3657 3698
3658 igb_unmap_and_free_tx_resource(adapter, buffer_info); 3699 igb_unmap_and_free_tx_resource(adapter, buffer_info);
3700 tx_desc->wb.status = 0;
3659 3701
3660 i++; 3702 i++;
3661 if (i == tx_ring->count) 3703 if (i == tx_ring->count)
3662 i = 0; 3704 i = 0;
3663
3664 count++;
3665 if (count == IGB_MAX_TX_CLEAN) {
3666 retval = false;
3667 goto done_cleaning;
3668 }
3669 } 3705 }
3670 oldhead = head; 3706
3671 rmb(); 3707 eop = tx_ring->buffer_info[i].next_to_watch;
3672 head = get_head(tx_ring); 3708 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
3673 if (head == oldhead) 3709 }
3674 goto done_cleaning; 3710
3675 } /* while (1) */
3676
3677done_cleaning:
3678 tx_ring->next_to_clean = i; 3711 tx_ring->next_to_clean = i;
3679 3712
3680 if (unlikely(count && 3713 if (unlikely(count &&
@@ -3701,7 +3734,6 @@ done_cleaning:
3701 && !(rd32(E1000_STATUS) & 3734 && !(rd32(E1000_STATUS) &
3702 E1000_STATUS_TXOFF)) { 3735 E1000_STATUS_TXOFF)) {
3703 3736
3704 tx_desc = E1000_TX_DESC(*tx_ring, i);
3705 /* detected Tx unit hang */ 3737 /* detected Tx unit hang */
3706 dev_err(&adapter->pdev->dev, 3738 dev_err(&adapter->pdev->dev,
3707 "Detected Tx Unit Hang\n" 3739 "Detected Tx Unit Hang\n"
@@ -3710,9 +3742,9 @@ done_cleaning:
3710 " TDT <%x>\n" 3742 " TDT <%x>\n"
3711 " next_to_use <%x>\n" 3743 " next_to_use <%x>\n"
3712 " next_to_clean <%x>\n" 3744 " next_to_clean <%x>\n"
3713 " head (WB) <%x>\n"
3714 "buffer_info[next_to_clean]\n" 3745 "buffer_info[next_to_clean]\n"
3715 " time_stamp <%lx>\n" 3746 " time_stamp <%lx>\n"
3747 " next_to_watch <%x>\n"
3716 " jiffies <%lx>\n" 3748 " jiffies <%lx>\n"
3717 " desc.status <%x>\n", 3749 " desc.status <%x>\n",
3718 tx_ring->queue_index, 3750 tx_ring->queue_index,
@@ -3720,10 +3752,10 @@ done_cleaning:
3720 readl(adapter->hw.hw_addr + tx_ring->tail), 3752 readl(adapter->hw.hw_addr + tx_ring->tail),
3721 tx_ring->next_to_use, 3753 tx_ring->next_to_use,
3722 tx_ring->next_to_clean, 3754 tx_ring->next_to_clean,
3723 head,
3724 tx_ring->buffer_info[i].time_stamp, 3755 tx_ring->buffer_info[i].time_stamp,
3756 eop,
3725 jiffies, 3757 jiffies,
3726 tx_desc->upper.fields.status); 3758 eop_desc->wb.status);
3727 netif_stop_subqueue(netdev, tx_ring->queue_index); 3759 netif_stop_subqueue(netdev, tx_ring->queue_index);
3728 } 3760 }
3729 } 3761 }
@@ -3733,7 +3765,7 @@ done_cleaning:
3733 tx_ring->tx_stats.packets += total_packets; 3765 tx_ring->tx_stats.packets += total_packets;
3734 adapter->net_stats.tx_bytes += total_bytes; 3766 adapter->net_stats.tx_bytes += total_bytes;
3735 adapter->net_stats.tx_packets += total_packets; 3767 adapter->net_stats.tx_packets += total_packets;
3736 return retval; 3768 return (count < tx_ring->count);
3737} 3769}
3738 3770
3739#ifdef CONFIG_IGB_LRO 3771#ifdef CONFIG_IGB_LRO
@@ -3919,8 +3951,10 @@ send_up:
3919 next_buffer = &rx_ring->buffer_info[i]; 3951 next_buffer = &rx_ring->buffer_info[i];
3920 3952
3921 if (!(staterr & E1000_RXD_STAT_EOP)) { 3953 if (!(staterr & E1000_RXD_STAT_EOP)) {
3922 buffer_info->skb = xchg(&next_buffer->skb, skb); 3954 buffer_info->skb = next_buffer->skb;
3923 buffer_info->dma = xchg(&next_buffer->dma, 0); 3955 buffer_info->dma = next_buffer->dma;
3956 next_buffer->skb = skb;
3957 next_buffer->dma = 0;
3924 goto next_desc; 3958 goto next_desc;
3925 } 3959 }
3926 3960
@@ -3938,8 +3972,6 @@ send_up:
3938 3972
3939 igb_receive_skb(rx_ring, staterr, rx_desc, skb); 3973 igb_receive_skb(rx_ring, staterr, rx_desc, skb);
3940 3974
3941 netdev->last_rx = jiffies;
3942
3943next_desc: 3975next_desc:
3944 rx_desc->wb.upper.status_error = 0; 3976 rx_desc->wb.upper.status_error = 0;
3945 3977
@@ -4102,9 +4134,8 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4102 case SIOCGMIIREG: 4134 case SIOCGMIIREG:
4103 if (!capable(CAP_NET_ADMIN)) 4135 if (!capable(CAP_NET_ADMIN))
4104 return -EPERM; 4136 return -EPERM;
4105 if (adapter->hw.phy.ops.read_phy_reg(&adapter->hw, 4137 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4106 data->reg_num 4138 &data->val_out))
4107 & 0x1F, &data->val_out))
4108 return -EIO; 4139 return -EIO;
4109 break; 4140 break;
4110 case SIOCSMIIREG: 4141 case SIOCSMIIREG:
@@ -4474,27 +4505,38 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
4474 struct net_device *netdev = pci_get_drvdata(pdev); 4505 struct net_device *netdev = pci_get_drvdata(pdev);
4475 struct igb_adapter *adapter = netdev_priv(netdev); 4506 struct igb_adapter *adapter = netdev_priv(netdev);
4476 struct e1000_hw *hw = &adapter->hw; 4507 struct e1000_hw *hw = &adapter->hw;
4508 pci_ers_result_t result;
4477 int err; 4509 int err;
4478 4510
4479 if (adapter->need_ioport) 4511 if (adapter->need_ioport)
4480 err = pci_enable_device(pdev); 4512 err = pci_enable_device(pdev);
4481 else 4513 else
4482 err = pci_enable_device_mem(pdev); 4514 err = pci_enable_device_mem(pdev);
4515
4483 if (err) { 4516 if (err) {
4484 dev_err(&pdev->dev, 4517 dev_err(&pdev->dev,
4485 "Cannot re-enable PCI device after reset.\n"); 4518 "Cannot re-enable PCI device after reset.\n");
4486 return PCI_ERS_RESULT_DISCONNECT; 4519 result = PCI_ERS_RESULT_DISCONNECT;
4487 } 4520 } else {
4488 pci_set_master(pdev); 4521 pci_set_master(pdev);
4489 pci_restore_state(pdev); 4522 pci_restore_state(pdev);
4490 4523
4491 pci_enable_wake(pdev, PCI_D3hot, 0); 4524 pci_enable_wake(pdev, PCI_D3hot, 0);
4492 pci_enable_wake(pdev, PCI_D3cold, 0); 4525 pci_enable_wake(pdev, PCI_D3cold, 0);
4493 4526
4494 igb_reset(adapter); 4527 igb_reset(adapter);
4495 wr32(E1000_WUS, ~0); 4528 wr32(E1000_WUS, ~0);
4529 result = PCI_ERS_RESULT_RECOVERED;
4530 }
4531
4532 err = pci_cleanup_aer_uncorrect_error_status(pdev);
4533 if (err) {
4534 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
4535 "failed 0x%0x\n", err);
4536 /* non-fatal, continue */
4537 }
4496 4538
4497 return PCI_ERS_RESULT_RECOVERED; 4539 return result;
4498} 4540}
4499 4541
4500/** 4542/**
@@ -4522,7 +4564,6 @@ static void igb_io_resume(struct pci_dev *pdev)
4522 /* let the f/w know that the h/w is now under the control of the 4564 /* let the f/w know that the h/w is now under the control of the
4523 * driver. */ 4565 * driver. */
4524 igb_get_hw_control(adapter); 4566 igb_get_hw_control(adapter);
4525
4526} 4567}
4527 4568
4528/* igb_main.c */ 4569/* igb_main.c */
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 1f25263dc7eb..170b12d1d70e 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -390,11 +390,8 @@ static int nic_init(struct ioc3 *ioc3)
390 } 390 }
391 391
392 printk("Found %s NIC", type); 392 printk("Found %s NIC", type);
393 if (type != unknown) { 393 if (type != unknown)
394 printk (" registration number %02x:%02x:%02x:%02x:%02x:%02x," 394 printk (" registration number %pM, CRC %02x", serial, crc);
395 " CRC %02x", serial[0], serial[1], serial[2],
396 serial[3], serial[4], serial[5], crc);
397 }
398 printk(".\n"); 395 printk(".\n");
399 396
400 return 0; 397 return 0;
@@ -443,12 +440,9 @@ static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
443 */ 440 */
444static void ioc3_get_eaddr(struct ioc3_private *ip) 441static void ioc3_get_eaddr(struct ioc3_private *ip)
445{ 442{
446 DECLARE_MAC_BUF(mac);
447
448 ioc3_get_eaddr_nic(ip); 443 ioc3_get_eaddr_nic(ip);
449 444
450 printk("Ethernet address is %s.\n", 445 printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr);
451 print_mac(mac, priv_netdev(ip)->dev_addr));
452} 446}
453 447
454static void __ioc3_set_mac_address(struct net_device *dev) 448static void __ioc3_set_mac_address(struct net_device *dev)
@@ -627,7 +621,6 @@ static inline void ioc3_rx(struct ioc3_private *ip)
627 rxb = (struct ioc3_erxbuf *) new_skb->data; 621 rxb = (struct ioc3_erxbuf *) new_skb->data;
628 skb_reserve(new_skb, RX_OFFSET); 622 skb_reserve(new_skb, RX_OFFSET);
629 623
630 priv_netdev(ip)->last_rx = jiffies;
631 ip->stats.rx_packets++; /* Statistics */ 624 ip->stats.rx_packets++; /* Statistics */
632 ip->stats.rx_bytes += len; 625 ip->stats.rx_bytes += len;
633 } else { 626 } else {
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 059369885be1..7b6d435a8468 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -1222,7 +1222,6 @@ static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1222 skb->protocol = eth_type_trans(skb, dev); 1222 skb->protocol = eth_type_trans(skb, dev);
1223 skb->ip_summed = CHECKSUM_NONE; 1223 skb->ip_summed = CHECKSUM_NONE;
1224 netif_rx(skb); 1224 netif_rx(skb);
1225 dev->last_rx = jiffies;
1226 sp->rx_buff[entry] = NULL; 1225 sp->rx_buff[entry] = NULL;
1227} 1226}
1228 1227
@@ -1256,7 +1255,6 @@ static void ipg_nic_rx_with_start(struct net_device *dev,
1256 jumbo->skb = skb; 1255 jumbo->skb = skb;
1257 1256
1258 sp->rx_buff[entry] = NULL; 1257 sp->rx_buff[entry] = NULL;
1259 dev->last_rx = jiffies;
1260} 1258}
1261 1259
1262static void ipg_nic_rx_with_end(struct net_device *dev, 1260static void ipg_nic_rx_with_end(struct net_device *dev,
@@ -1292,7 +1290,6 @@ static void ipg_nic_rx_with_end(struct net_device *dev,
1292 } 1290 }
1293 } 1291 }
1294 1292
1295 dev->last_rx = jiffies;
1296 jumbo->found_start = 0; 1293 jumbo->found_start = 0;
1297 jumbo->current_size = 0; 1294 jumbo->current_size = 0;
1298 jumbo->skb = NULL; 1295 jumbo->skb = NULL;
@@ -1325,7 +1322,6 @@ static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
1325 skb->data, sp->rxfrag_size); 1322 skb->data, sp->rxfrag_size);
1326 } 1323 }
1327 } 1324 }
1328 dev->last_rx = jiffies;
1329 ipg_nic_rx_free_skb(dev); 1325 ipg_nic_rx_free_skb(dev);
1330 } 1326 }
1331 } else { 1327 } else {
@@ -1494,11 +1490,6 @@ static int ipg_nic_rx(struct net_device *dev)
1494 * when processing completes. 1490 * when processing completes.
1495 */ 1491 */
1496 netif_rx(skb); 1492 netif_rx(skb);
1497
1498 /* Record frame receive time (jiffies = Linux
1499 * kernel current time stamp).
1500 */
1501 dev->last_rx = jiffies;
1502 } 1493 }
1503 1494
1504 /* Assure RX buffer is not reused by IPG. */ 1495 /* Assure RX buffer is not reused by IPG. */
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 2ff181861d2d..3c58e67ef1e4 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -292,7 +292,7 @@ static int ali_ircc_open(int i, chipio_t *info)
292 return -ENOMEM; 292 return -ENOMEM;
293 } 293 }
294 294
295 self = dev->priv; 295 self = netdev_priv(dev);
296 self->netdev = dev; 296 self->netdev = dev;
297 spin_lock_init(&self->lock); 297 spin_lock_init(&self->lock);
298 298
@@ -665,7 +665,7 @@ static irqreturn_t ali_ircc_interrupt(int irq, void *dev_id)
665 665
666 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__); 666 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__);
667 667
668 self = dev->priv; 668 self = netdev_priv(dev);
669 669
670 spin_lock(&self->lock); 670 spin_lock(&self->lock);
671 671
@@ -1333,7 +1333,7 @@ static int ali_ircc_net_open(struct net_device *dev)
1333 1333
1334 IRDA_ASSERT(dev != NULL, return -1;); 1334 IRDA_ASSERT(dev != NULL, return -1;);
1335 1335
1336 self = (struct ali_ircc_cb *) dev->priv; 1336 self = netdev_priv(dev);
1337 1337
1338 IRDA_ASSERT(self != NULL, return 0;); 1338 IRDA_ASSERT(self != NULL, return 0;);
1339 1339
@@ -1396,7 +1396,7 @@ static int ali_ircc_net_close(struct net_device *dev)
1396 1396
1397 IRDA_ASSERT(dev != NULL, return -1;); 1397 IRDA_ASSERT(dev != NULL, return -1;);
1398 1398
1399 self = (struct ali_ircc_cb *) dev->priv; 1399 self = netdev_priv(dev);
1400 IRDA_ASSERT(self != NULL, return 0;); 1400 IRDA_ASSERT(self != NULL, return 0;);
1401 1401
1402 /* Stop device */ 1402 /* Stop device */
@@ -1436,7 +1436,7 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1436 1436
1437 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ ); 1437 IRDA_DEBUG(1, "%s(), ---------------- Start -----------------\n", __func__ );
1438 1438
1439 self = (struct ali_ircc_cb *) dev->priv; 1439 self = netdev_priv(dev);
1440 iobase = self->io.fir_base; 1440 iobase = self->io.fir_base;
1441 1441
1442 netif_stop_queue(dev); 1442 netif_stop_queue(dev);
@@ -1931,7 +1931,6 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1931 skb_reset_mac_header(skb); 1931 skb_reset_mac_header(skb);
1932 skb->protocol = htons(ETH_P_IRDA); 1932 skb->protocol = htons(ETH_P_IRDA);
1933 netif_rx(skb); 1933 netif_rx(skb);
1934 self->netdev->last_rx = jiffies;
1935 } 1934 }
1936 } 1935 }
1937 1936
@@ -1960,7 +1959,7 @@ static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
1960 1959
1961 IRDA_ASSERT(dev != NULL, return 0;); 1960 IRDA_ASSERT(dev != NULL, return 0;);
1962 1961
1963 self = (struct ali_ircc_cb *) dev->priv; 1962 self = netdev_priv(dev);
1964 IRDA_ASSERT(self != NULL, return 0;); 1963 IRDA_ASSERT(self != NULL, return 0;);
1965 1964
1966 iobase = self->io.sir_base; 1965 iobase = self->io.sir_base;
@@ -2028,7 +2027,7 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2028 2027
2029 IRDA_ASSERT(dev != NULL, return -1;); 2028 IRDA_ASSERT(dev != NULL, return -1;);
2030 2029
2031 self = dev->priv; 2030 self = netdev_priv(dev);
2032 2031
2033 IRDA_ASSERT(self != NULL, return -1;); 2032 IRDA_ASSERT(self != NULL, return -1;);
2034 2033
@@ -2114,7 +2113,7 @@ static int ali_ircc_is_receiving(struct ali_ircc_cb *self)
2114 2113
2115static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev) 2114static struct net_device_stats *ali_ircc_net_get_stats(struct net_device *dev)
2116{ 2115{
2117 struct ali_ircc_cb *self = (struct ali_ircc_cb *) dev->priv; 2116 struct ali_ircc_cb *self = netdev_priv(dev);
2118 2117
2119 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ ); 2118 IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __func__ );
2120 2119
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index a1e4508717c8..6c4b53ffbcac 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -620,7 +620,6 @@ static int au1k_irda_rx(struct net_device *dev)
620 /* next descriptor */ 620 /* next descriptor */
621 prxd = aup->rx_ring[aup->rx_head]; 621 prxd = aup->rx_ring[aup->rx_head];
622 flags = prxd->flags; 622 flags = prxd->flags;
623 dev->last_rx = jiffies;
624 623
625 } 624 }
626 return 0; 625 return 0;
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 69d16b30323b..687c2d53d4d2 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -979,7 +979,7 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
979 unsigned long flags; 979 unsigned long flags;
980 struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb; 980 struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb;
981 981
982 self = (struct toshoboe_cb *) dev->priv; 982 self = netdev_priv(dev);
983 983
984 IRDA_ASSERT (self != NULL, return 0; ); 984 IRDA_ASSERT (self != NULL, return 0; );
985 985
@@ -1384,7 +1384,7 @@ toshoboe_net_close (struct net_device *dev)
1384 IRDA_DEBUG (4, "%s()\n", __func__); 1384 IRDA_DEBUG (4, "%s()\n", __func__);
1385 1385
1386 IRDA_ASSERT (dev != NULL, return -1; ); 1386 IRDA_ASSERT (dev != NULL, return -1; );
1387 self = (struct toshoboe_cb *) dev->priv; 1387 self = netdev_priv(dev);
1388 1388
1389 /* Stop device */ 1389 /* Stop device */
1390 netif_stop_queue(dev); 1390 netif_stop_queue(dev);
@@ -1422,7 +1422,7 @@ toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1422 1422
1423 IRDA_ASSERT (dev != NULL, return -1; ); 1423 IRDA_ASSERT (dev != NULL, return -1; );
1424 1424
1425 self = dev->priv; 1425 self = netdev_priv(dev);
1426 1426
1427 IRDA_ASSERT (self != NULL, return -1; ); 1427 IRDA_ASSERT (self != NULL, return -1; );
1428 1428
@@ -1546,7 +1546,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1546 return -ENOMEM; 1546 return -ENOMEM;
1547 } 1547 }
1548 1548
1549 self = dev->priv; 1549 self = netdev_priv(dev);
1550 self->netdev = dev; 1550 self->netdev = dev;
1551 self->pdev = pci_dev; 1551 self->pdev = pci_dev;
1552 self->base = pci_resource_start(pci_dev,0); 1552 self->base = pci_resource_start(pci_dev,0);
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index b5d6b9ac162a..205e4e825a97 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -384,7 +384,7 @@ static void speed_bulk_callback(struct urb *urb)
384 */ 384 */
385static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev) 385static int irda_usb_hard_xmit(struct sk_buff *skb, struct net_device *netdev)
386{ 386{
387 struct irda_usb_cb *self = netdev->priv; 387 struct irda_usb_cb *self = netdev_priv(netdev);
388 struct urb *urb = self->tx_urb; 388 struct urb *urb = self->tx_urb;
389 unsigned long flags; 389 unsigned long flags;
390 s32 speed; 390 s32 speed;
@@ -628,7 +628,7 @@ static void write_bulk_callback(struct urb *urb)
628static void irda_usb_net_timeout(struct net_device *netdev) 628static void irda_usb_net_timeout(struct net_device *netdev)
629{ 629{
630 unsigned long flags; 630 unsigned long flags;
631 struct irda_usb_cb *self = netdev->priv; 631 struct irda_usb_cb *self = netdev_priv(netdev);
632 struct urb *urb; 632 struct urb *urb;
633 int done = 0; /* If we have made any progress */ 633 int done = 0; /* If we have made any progress */
634 634
@@ -929,7 +929,6 @@ static void irda_usb_receive(struct urb *urb)
929 /* Keep stats up to date */ 929 /* Keep stats up to date */
930 self->stats.rx_bytes += len; 930 self->stats.rx_bytes += len;
931 self->stats.rx_packets++; 931 self->stats.rx_packets++;
932 self->netdev->last_rx = jiffies;
933 932
934done: 933done:
935 /* Note : at this point, the URB we've just received (urb) 934 /* Note : at this point, the URB we've just received (urb)
@@ -1175,7 +1174,7 @@ static int irda_usb_net_open(struct net_device *netdev)
1175 IRDA_DEBUG(1, "%s()\n", __func__); 1174 IRDA_DEBUG(1, "%s()\n", __func__);
1176 1175
1177 IRDA_ASSERT(netdev != NULL, return -1;); 1176 IRDA_ASSERT(netdev != NULL, return -1;);
1178 self = (struct irda_usb_cb *) netdev->priv; 1177 self = netdev_priv(netdev);
1179 IRDA_ASSERT(self != NULL, return -1;); 1178 IRDA_ASSERT(self != NULL, return -1;);
1180 1179
1181 spin_lock_irqsave(&self->lock, flags); 1180 spin_lock_irqsave(&self->lock, flags);
@@ -1257,7 +1256,7 @@ static int irda_usb_net_close(struct net_device *netdev)
1257 IRDA_DEBUG(1, "%s()\n", __func__); 1256 IRDA_DEBUG(1, "%s()\n", __func__);
1258 1257
1259 IRDA_ASSERT(netdev != NULL, return -1;); 1258 IRDA_ASSERT(netdev != NULL, return -1;);
1260 self = (struct irda_usb_cb *) netdev->priv; 1259 self = netdev_priv(netdev);
1261 IRDA_ASSERT(self != NULL, return -1;); 1260 IRDA_ASSERT(self != NULL, return -1;);
1262 1261
1263 /* Clear this flag *before* unlinking the urbs and *before* 1262 /* Clear this flag *before* unlinking the urbs and *before*
@@ -1306,7 +1305,7 @@ static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1306 int ret = 0; 1305 int ret = 0;
1307 1306
1308 IRDA_ASSERT(dev != NULL, return -1;); 1307 IRDA_ASSERT(dev != NULL, return -1;);
1309 self = dev->priv; 1308 self = netdev_priv(dev);
1310 IRDA_ASSERT(self != NULL, return -1;); 1309 IRDA_ASSERT(self != NULL, return -1;);
1311 1310
1312 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd); 1311 IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
@@ -1348,7 +1347,7 @@ static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1348 */ 1347 */
1349static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev) 1348static struct net_device_stats *irda_usb_net_get_stats(struct net_device *dev)
1350{ 1349{
1351 struct irda_usb_cb *self = dev->priv; 1350 struct irda_usb_cb *self = netdev_priv(dev);
1352 return &self->stats; 1351 return &self->stats;
1353} 1352}
1354 1353
@@ -1641,7 +1640,7 @@ static int irda_usb_probe(struct usb_interface *intf,
1641 goto err_out; 1640 goto err_out;
1642 1641
1643 SET_NETDEV_DEV(net, &intf->dev); 1642 SET_NETDEV_DEV(net, &intf->dev);
1644 self = net->priv; 1643 self = netdev_priv(net);
1645 self->netdev = net; 1644 self->netdev = net;
1646 spin_lock_init(&self->lock); 1645 spin_lock_init(&self->lock);
1647 init_timer(&self->rx_defer_timer); 1646 init_timer(&self->rx_defer_timer);
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 6bcee01c684c..d53aa9582137 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -191,7 +191,7 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
191 tty = priv->tty; 191 tty = priv->tty;
192 if (!tty->ops->write) 192 if (!tty->ops->write)
193 return 0; 193 return 0;
194 tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); 194 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
195 writelen = tty_write_room(tty); 195 writelen = tty_write_room(tty);
196 if (writelen > len) 196 if (writelen > len)
197 writelen = len; 197 writelen = len;
@@ -263,8 +263,7 @@ static void irtty_write_wakeup(struct tty_struct *tty)
263 IRDA_ASSERT(priv != NULL, return;); 263 IRDA_ASSERT(priv != NULL, return;);
264 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;); 264 IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
265 265
266 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 266 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
267
268 if (priv->dev) 267 if (priv->dev)
269 sirdev_write_complete(priv->dev); 268 sirdev_write_complete(priv->dev);
270} 269}
@@ -522,7 +521,7 @@ static void irtty_close(struct tty_struct *tty)
522 521
523 /* Stop tty */ 522 /* Stop tty */
524 irtty_stop_receiver(tty, TRUE); 523 irtty_stop_receiver(tty, TRUE);
525 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 524 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
526 if (tty->ops->stop) 525 if (tty->ops->stop)
527 tty->ops->stop(tty); 526 tty->ops->stop(tty);
528 527
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index e1429fc6d050..c747c874d44d 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -235,7 +235,6 @@ static void kingsun_rcv_irq(struct urb *urb)
235 &kingsun->stats, 235 &kingsun->stats,
236 &kingsun->rx_buff, bytes[i]); 236 &kingsun->rx_buff, bytes[i]);
237 } 237 }
238 kingsun->netdev->last_rx = jiffies;
239 do_gettimeofday(&kingsun->rx_time); 238 do_gettimeofday(&kingsun->rx_time);
240 kingsun->receiving = 239 kingsun->receiving =
241 (kingsun->rx_buff.state != OUTSIDE_FRAME) 240 (kingsun->rx_buff.state != OUTSIDE_FRAME)
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 2e67ae015d91..600d96f9cdb7 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -474,7 +474,6 @@ static void ks959_rcv_irq(struct urb *urb)
474 bytes[i]); 474 bytes[i]);
475 } 475 }
476 } 476 }
477 kingsun->netdev->last_rx = jiffies;
478 do_gettimeofday(&kingsun->rx_time); 477 do_gettimeofday(&kingsun->rx_time);
479 kingsun->receiving = 478 kingsun->receiving =
480 (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0; 479 (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 3843b5faba8b..0e7f89337b25 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -371,7 +371,6 @@ static void ksdazzle_rcv_irq(struct urb *urb)
371 async_unwrap_char(kingsun->netdev, &kingsun->stats, 371 async_unwrap_char(kingsun->netdev, &kingsun->stats,
372 &kingsun->rx_unwrap_buff, bytes[i]); 372 &kingsun->rx_unwrap_buff, bytes[i]);
373 } 373 }
374 kingsun->netdev->last_rx = jiffies;
375 kingsun->receiving = 374 kingsun->receiving =
376 (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0; 375 (kingsun->rx_unwrap_buff.state != OUTSIDE_FRAME) ? 1 : 0;
377 } 376 }
diff --git a/drivers/net/irda/ma600-sir.c b/drivers/net/irda/ma600-sir.c
index 1ceed9cfb7c4..e91216452379 100644
--- a/drivers/net/irda/ma600-sir.c
+++ b/drivers/net/irda/ma600-sir.c
@@ -236,7 +236,7 @@ static int ma600_change_speed(struct sir_dev *dev, unsigned speed)
236 * avoid the state machine complexity before we get things working 236 * avoid the state machine complexity before we get things working
237 */ 237 */
238 238
239int ma600_reset(struct sir_dev *dev) 239static int ma600_reset(struct sir_dev *dev)
240{ 240{
241 IRDA_DEBUG(2, "%s()\n", __func__); 241 IRDA_DEBUG(2, "%s()\n", __func__);
242 242
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index ad92d3ff1c40..904c9610c0dd 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -806,7 +806,6 @@ static void mcs_receive_irq(struct urb *urb)
806 mcs_unwrap_fir(mcs, urb->transfer_buffer, 806 mcs_unwrap_fir(mcs, urb->transfer_buffer,
807 urb->actual_length); 807 urb->actual_length);
808 } 808 }
809 mcs->netdev->last_rx = jiffies;
810 do_gettimeofday(&mcs->rx_time); 809 do_gettimeofday(&mcs->rx_time);
811 } 810 }
812 811
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 8583d951a6ad..2c6bf2d11bb1 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -373,7 +373,7 @@ static int __init nsc_ircc_open(chipio_t *info)
373 return -ENOMEM; 373 return -ENOMEM;
374 } 374 }
375 375
376 self = dev->priv; 376 self = netdev_priv(dev);
377 self->netdev = dev; 377 self->netdev = dev;
378 spin_lock_init(&self->lock); 378 spin_lock_init(&self->lock);
379 379
@@ -1354,7 +1354,7 @@ static int nsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
1354 __s32 speed; 1354 __s32 speed;
1355 __u8 bank; 1355 __u8 bank;
1356 1356
1357 self = (struct nsc_ircc_cb *) dev->priv; 1357 self = netdev_priv(dev);
1358 1358
1359 IRDA_ASSERT(self != NULL, return 0;); 1359 IRDA_ASSERT(self != NULL, return 0;);
1360 1360
@@ -1427,7 +1427,7 @@ static int nsc_ircc_hard_xmit_fir(struct sk_buff *skb, struct net_device *dev)
1427 __u8 bank; 1427 __u8 bank;
1428 int mtt, diff; 1428 int mtt, diff;
1429 1429
1430 self = (struct nsc_ircc_cb *) dev->priv; 1430 self = netdev_priv(dev);
1431 iobase = self->io.fir_base; 1431 iobase = self->io.fir_base;
1432 1432
1433 netif_stop_queue(dev); 1433 netif_stop_queue(dev);
@@ -1896,7 +1896,6 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
1896 skb_reset_mac_header(skb); 1896 skb_reset_mac_header(skb);
1897 skb->protocol = htons(ETH_P_IRDA); 1897 skb->protocol = htons(ETH_P_IRDA);
1898 netif_rx(skb); 1898 netif_rx(skb);
1899 self->netdev->last_rx = jiffies;
1900 } 1899 }
1901 } 1900 }
1902 /* Restore bank register */ 1901 /* Restore bank register */
@@ -2085,7 +2084,7 @@ static irqreturn_t nsc_ircc_interrupt(int irq, void *dev_id)
2085 __u8 bsr, eir; 2084 __u8 bsr, eir;
2086 int iobase; 2085 int iobase;
2087 2086
2088 self = dev->priv; 2087 self = netdev_priv(dev);
2089 2088
2090 spin_lock(&self->lock); 2089 spin_lock(&self->lock);
2091 2090
@@ -2166,7 +2165,7 @@ static int nsc_ircc_net_open(struct net_device *dev)
2166 IRDA_DEBUG(4, "%s()\n", __func__); 2165 IRDA_DEBUG(4, "%s()\n", __func__);
2167 2166
2168 IRDA_ASSERT(dev != NULL, return -1;); 2167 IRDA_ASSERT(dev != NULL, return -1;);
2169 self = (struct nsc_ircc_cb *) dev->priv; 2168 self = netdev_priv(dev);
2170 2169
2171 IRDA_ASSERT(self != NULL, return 0;); 2170 IRDA_ASSERT(self != NULL, return 0;);
2172 2171
@@ -2229,7 +2228,7 @@ static int nsc_ircc_net_close(struct net_device *dev)
2229 2228
2230 IRDA_ASSERT(dev != NULL, return -1;); 2229 IRDA_ASSERT(dev != NULL, return -1;);
2231 2230
2232 self = (struct nsc_ircc_cb *) dev->priv; 2231 self = netdev_priv(dev);
2233 IRDA_ASSERT(self != NULL, return 0;); 2232 IRDA_ASSERT(self != NULL, return 0;);
2234 2233
2235 /* Stop device */ 2234 /* Stop device */
@@ -2275,7 +2274,7 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2275 2274
2276 IRDA_ASSERT(dev != NULL, return -1;); 2275 IRDA_ASSERT(dev != NULL, return -1;);
2277 2276
2278 self = dev->priv; 2277 self = netdev_priv(dev);
2279 2278
2280 IRDA_ASSERT(self != NULL, return -1;); 2279 IRDA_ASSERT(self != NULL, return -1;);
2281 2280
@@ -2310,7 +2309,7 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2310 2309
2311static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev) 2310static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev)
2312{ 2311{
2313 struct nsc_ircc_cb *self = (struct nsc_ircc_cb *) dev->priv; 2312 struct nsc_ircc_cb *self = netdev_priv(dev);
2314 2313
2315 return &self->stats; 2314 return &self->stats;
2316} 2315}
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index c5b02b66f756..a0ee05318155 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -225,7 +225,6 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
225 } 225 }
226 lsr = STLSR; 226 lsr = STLSR;
227 } 227 }
228 dev->last_rx = jiffies;
229 si->last_oscr = OSCR; 228 si->last_oscr = OSCR;
230 break; 229 break;
231 230
@@ -237,7 +236,6 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
237 si->stats.rx_bytes++; 236 si->stats.rx_bytes++;
238 async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR); 237 async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR);
239 } while (STLSR & LSR_DR); 238 } while (STLSR & LSR_DR);
240 dev->last_rx = jiffies;
241 si->last_oscr = OSCR; 239 si->last_oscr = OSCR;
242 break; 240 break;
243 241
@@ -397,8 +395,6 @@ static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, in
397 395
398 si->stats.rx_packets++; 396 si->stats.rx_packets++;
399 si->stats.rx_bytes += len; 397 si->stats.rx_bytes += len;
400
401 dev->last_rx = jiffies;
402 } 398 }
403} 399}
404 400
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index a95188948de7..ccde5829ba21 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -298,7 +298,7 @@ static int sa1100_irda_suspend(struct platform_device *pdev, pm_message_t state)
298 if (!dev) 298 if (!dev)
299 return 0; 299 return 0;
300 300
301 si = dev->priv; 301 si = netdev_priv(dev);
302 if (si->open) { 302 if (si->open) {
303 /* 303 /*
304 * Stop the transmit queue 304 * Stop the transmit queue
@@ -323,7 +323,7 @@ static int sa1100_irda_resume(struct platform_device *pdev)
323 if (!dev) 323 if (!dev)
324 return 0; 324 return 0;
325 325
326 si = dev->priv; 326 si = netdev_priv(dev);
327 if (si->open) { 327 if (si->open) {
328 /* 328 /*
329 * If we missed a speed change, initialise at the new speed 329 * If we missed a speed change, initialise at the new speed
@@ -359,7 +359,7 @@ static int sa1100_irda_resume(struct platform_device *pdev)
359 */ 359 */
360static void sa1100_irda_hpsir_irq(struct net_device *dev) 360static void sa1100_irda_hpsir_irq(struct net_device *dev)
361{ 361{
362 struct sa1100_irda *si = dev->priv; 362 struct sa1100_irda *si = netdev_priv(dev);
363 int status; 363 int status;
364 364
365 status = Ser2UTSR0; 365 status = Ser2UTSR0;
@@ -410,7 +410,6 @@ static void sa1100_irda_hpsir_irq(struct net_device *dev)
410 Ser2UTDR); 410 Ser2UTDR);
411 } while (Ser2UTSR1 & UTSR1_RNE); 411 } while (Ser2UTSR1 & UTSR1_RNE);
412 412
413 dev->last_rx = jiffies;
414 } 413 }
415 414
416 if (status & UTSR0_TFS && si->tx_buff.len) { 415 if (status & UTSR0_TFS && si->tx_buff.len) {
@@ -515,7 +514,6 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
515 sa1100_irda_rx_alloc(si); 514 sa1100_irda_rx_alloc(si);
516 515
517 netif_rx(skb); 516 netif_rx(skb);
518 dev->last_rx = jiffies;
519 } else { 517 } else {
520 /* 518 /*
521 * Remap the buffer. 519 * Remap the buffer.
@@ -534,7 +532,7 @@ static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev
534 */ 532 */
535static void sa1100_irda_fir_irq(struct net_device *dev) 533static void sa1100_irda_fir_irq(struct net_device *dev)
536{ 534{
537 struct sa1100_irda *si = dev->priv; 535 struct sa1100_irda *si = netdev_priv(dev);
538 536
539 /* 537 /*
540 * Stop RX DMA 538 * Stop RX DMA
@@ -582,7 +580,7 @@ static void sa1100_irda_fir_irq(struct net_device *dev)
582static irqreturn_t sa1100_irda_irq(int irq, void *dev_id) 580static irqreturn_t sa1100_irda_irq(int irq, void *dev_id)
583{ 581{
584 struct net_device *dev = dev_id; 582 struct net_device *dev = dev_id;
585 if (IS_FIR(((struct sa1100_irda *)dev->priv))) 583 if (IS_FIR(((struct sa1100_irda *)netdev_priv(dev))))
586 sa1100_irda_fir_irq(dev); 584 sa1100_irda_fir_irq(dev);
587 else 585 else
588 sa1100_irda_hpsir_irq(dev); 586 sa1100_irda_hpsir_irq(dev);
@@ -595,7 +593,7 @@ static irqreturn_t sa1100_irda_irq(int irq, void *dev_id)
595static void sa1100_irda_txdma_irq(void *id) 593static void sa1100_irda_txdma_irq(void *id)
596{ 594{
597 struct net_device *dev = id; 595 struct net_device *dev = id;
598 struct sa1100_irda *si = dev->priv; 596 struct sa1100_irda *si = netdev_priv(dev);
599 struct sk_buff *skb = si->txskb; 597 struct sk_buff *skb = si->txskb;
600 598
601 si->txskb = NULL; 599 si->txskb = NULL;
@@ -649,7 +647,7 @@ static void sa1100_irda_txdma_irq(void *id)
649 647
650static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) 648static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
651{ 649{
652 struct sa1100_irda *si = dev->priv; 650 struct sa1100_irda *si = netdev_priv(dev);
653 int speed = irda_get_next_speed(skb); 651 int speed = irda_get_next_speed(skb);
654 652
655 /* 653 /*
@@ -724,7 +722,7 @@ static int
724sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd) 722sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
725{ 723{
726 struct if_irda_req *rq = (struct if_irda_req *)ifreq; 724 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
727 struct sa1100_irda *si = dev->priv; 725 struct sa1100_irda *si = netdev_priv(dev);
728 int ret = -EOPNOTSUPP; 726 int ret = -EOPNOTSUPP;
729 727
730 switch (cmd) { 728 switch (cmd) {
@@ -766,13 +764,13 @@ sa1100_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
766 764
767static struct net_device_stats *sa1100_irda_stats(struct net_device *dev) 765static struct net_device_stats *sa1100_irda_stats(struct net_device *dev)
768{ 766{
769 struct sa1100_irda *si = dev->priv; 767 struct sa1100_irda *si = netdev_priv(dev);
770 return &si->stats; 768 return &si->stats;
771} 769}
772 770
773static int sa1100_irda_start(struct net_device *dev) 771static int sa1100_irda_start(struct net_device *dev)
774{ 772{
775 struct sa1100_irda *si = dev->priv; 773 struct sa1100_irda *si = netdev_priv(dev);
776 int err; 774 int err;
777 775
778 si->speed = 9600; 776 si->speed = 9600;
@@ -835,7 +833,7 @@ err_irq:
835 833
836static int sa1100_irda_stop(struct net_device *dev) 834static int sa1100_irda_stop(struct net_device *dev)
837{ 835{
838 struct sa1100_irda *si = dev->priv; 836 struct sa1100_irda *si = netdev_priv(dev);
839 837
840 disable_irq(dev->irq); 838 disable_irq(dev->irq);
841 sa1100_irda_shutdown(si); 839 sa1100_irda_shutdown(si);
@@ -908,7 +906,7 @@ static int sa1100_irda_probe(struct platform_device *pdev)
908 if (!dev) 906 if (!dev)
909 goto err_mem_4; 907 goto err_mem_4;
910 908
911 si = dev->priv; 909 si = netdev_priv(dev);
912 si->dev = &pdev->dev; 910 si->dev = &pdev->dev;
913 si->pdata = pdev->dev.platform_data; 911 si->pdata = pdev->dev.platform_data;
914 912
@@ -987,7 +985,7 @@ static int sa1100_irda_remove(struct platform_device *pdev)
987 struct net_device *dev = platform_get_drvdata(pdev); 985 struct net_device *dev = platform_get_drvdata(pdev);
988 986
989 if (dev) { 987 if (dev) {
990 struct sa1100_irda *si = dev->priv; 988 struct sa1100_irda *si = netdev_priv(dev);
991 unregister_netdev(dev); 989 unregister_netdev(dev);
992 kfree(si->tx_buff.head); 990 kfree(si->tx_buff.head);
993 kfree(si->rx_buff.head); 991 kfree(si->rx_buff.head);
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index 3f32909c24c8..ceef040aa76d 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -584,14 +584,14 @@ EXPORT_SYMBOL(sirdev_receive);
584 584
585static struct net_device_stats *sirdev_get_stats(struct net_device *ndev) 585static struct net_device_stats *sirdev_get_stats(struct net_device *ndev)
586{ 586{
587 struct sir_dev *dev = ndev->priv; 587 struct sir_dev *dev = netdev_priv(ndev);
588 588
589 return (dev) ? &dev->stats : NULL; 589 return (dev) ? &dev->stats : NULL;
590} 590}
591 591
592static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev) 592static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
593{ 593{
594 struct sir_dev *dev = ndev->priv; 594 struct sir_dev *dev = netdev_priv(ndev);
595 unsigned long flags; 595 unsigned long flags;
596 int actual = 0; 596 int actual = 0;
597 int err; 597 int err;
@@ -683,7 +683,7 @@ static int sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
683static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 683static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
684{ 684{
685 struct if_irda_req *irq = (struct if_irda_req *) rq; 685 struct if_irda_req *irq = (struct if_irda_req *) rq;
686 struct sir_dev *dev = ndev->priv; 686 struct sir_dev *dev = netdev_priv(ndev);
687 int ret = 0; 687 int ret = 0;
688 688
689 IRDA_ASSERT(dev != NULL, return -1;); 689 IRDA_ASSERT(dev != NULL, return -1;);
@@ -795,7 +795,7 @@ static void sirdev_free_buffers(struct sir_dev *dev)
795 795
796static int sirdev_open(struct net_device *ndev) 796static int sirdev_open(struct net_device *ndev)
797{ 797{
798 struct sir_dev *dev = ndev->priv; 798 struct sir_dev *dev = netdev_priv(ndev);
799 const struct sir_driver *drv = dev->drv; 799 const struct sir_driver *drv = dev->drv;
800 800
801 if (!drv) 801 if (!drv)
@@ -840,7 +840,7 @@ errout_dec:
840 840
841static int sirdev_close(struct net_device *ndev) 841static int sirdev_close(struct net_device *ndev)
842{ 842{
843 struct sir_dev *dev = ndev->priv; 843 struct sir_dev *dev = netdev_priv(ndev);
844 const struct sir_driver *drv; 844 const struct sir_driver *drv;
845 845
846// IRDA_DEBUG(0, "%s\n", __func__); 846// IRDA_DEBUG(0, "%s\n", __func__);
@@ -896,7 +896,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
896 IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__); 896 IRDA_ERROR("%s - Can't allocate memory for IrDA control block!\n", __func__);
897 goto out; 897 goto out;
898 } 898 }
899 dev = ndev->priv; 899 dev = netdev_priv(ndev);
900 900
901 irda_init_max_qos_capabilies(&dev->qos); 901 irda_init_max_qos_capabilies(&dev->qos);
902 dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; 902 dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index b5360fe99d3a..5d09e157e15b 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -872,7 +872,7 @@ static void smsc_ircc_timeout(struct net_device *dev)
872 * waits until the next transmit interrupt, and continues until the 872 * waits until the next transmit interrupt, and continues until the
873 * frame is transmitted. 873 * frame is transmitted.
874 */ 874 */
875int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev) 875static int smsc_ircc_hard_xmit_sir(struct sk_buff *skb, struct net_device *dev)
876{ 876{
877 struct smsc_ircc_cb *self; 877 struct smsc_ircc_cb *self;
878 unsigned long flags; 878 unsigned long flags;
@@ -1128,7 +1128,7 @@ static void smsc_ircc_change_speed(struct smsc_ircc_cb *self, u32 speed)
1128 * Set speed of IrDA port to specified baudrate 1128 * Set speed of IrDA port to specified baudrate
1129 * 1129 *
1130 */ 1130 */
1131void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed) 1131static void smsc_ircc_set_sir_speed(struct smsc_ircc_cb *self, __u32 speed)
1132{ 1132{
1133 int iobase; 1133 int iobase;
1134 int fcr; /* FIFO control reg */ 1134 int fcr; /* FIFO control reg */
@@ -1894,7 +1894,7 @@ static void __exit smsc_ircc_cleanup(void)
1894 * This function *must* be called with spinlock held, because it may 1894 * This function *must* be called with spinlock held, because it may
1895 * be called from the irq handler (via smsc_ircc_change_speed()). - Jean II 1895 * be called from the irq handler (via smsc_ircc_change_speed()). - Jean II
1896 */ 1896 */
1897void smsc_ircc_sir_start(struct smsc_ircc_cb *self) 1897static void smsc_ircc_sir_start(struct smsc_ircc_cb *self)
1898{ 1898{
1899 struct net_device *dev; 1899 struct net_device *dev;
1900 int fir_base, sir_base; 1900 int fir_base, sir_base;
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 3575804fd7c6..ca4cd9266e55 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -824,7 +824,6 @@ static void stir_rcv_irq(struct urb *urb)
824 unwrap_chars(stir, urb->transfer_buffer, 824 unwrap_chars(stir, urb->transfer_buffer,
825 urb->actual_length); 825 urb->actual_length);
826 826
827 stir->netdev->last_rx = jiffies;
828 do_gettimeofday(&stir->rx_time); 827 do_gettimeofday(&stir->rx_time);
829 } 828 }
830 829
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 84e609ea5fbb..74c78cf7a333 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -334,7 +334,7 @@ static __devinit int via_ircc_open(int i, chipio_t * info, unsigned int id)
334 if (dev == NULL) 334 if (dev == NULL)
335 return -ENOMEM; 335 return -ENOMEM;
336 336
337 self = dev->priv; 337 self = netdev_priv(dev);
338 self->netdev = dev; 338 self->netdev = dev;
339 spin_lock_init(&self->lock); 339 spin_lock_init(&self->lock);
340 340
@@ -824,7 +824,7 @@ static int via_ircc_hard_xmit_sir(struct sk_buff *skb,
824 u16 iobase; 824 u16 iobase;
825 __u32 speed; 825 __u32 speed;
826 826
827 self = (struct via_ircc_cb *) dev->priv; 827 self = netdev_priv(dev);
828 IRDA_ASSERT(self != NULL, return 0;); 828 IRDA_ASSERT(self != NULL, return 0;);
829 iobase = self->io.fir_base; 829 iobase = self->io.fir_base;
830 830
@@ -896,7 +896,7 @@ static int via_ircc_hard_xmit_fir(struct sk_buff *skb,
896 __u32 speed; 896 __u32 speed;
897 unsigned long flags; 897 unsigned long flags;
898 898
899 self = (struct via_ircc_cb *) dev->priv; 899 self = netdev_priv(dev);
900 iobase = self->io.fir_base; 900 iobase = self->io.fir_base;
901 901
902 if (self->st_fifo.len) 902 if (self->st_fifo.len)
@@ -1349,7 +1349,7 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1349static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id) 1349static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1350{ 1350{
1351 struct net_device *dev = dev_id; 1351 struct net_device *dev = dev_id;
1352 struct via_ircc_cb *self = dev->priv; 1352 struct via_ircc_cb *self = netdev_priv(dev);
1353 int iobase; 1353 int iobase;
1354 u8 iHostIntType, iRxIntType, iTxIntType; 1354 u8 iHostIntType, iRxIntType, iTxIntType;
1355 1355
@@ -1522,7 +1522,7 @@ static int via_ircc_net_open(struct net_device *dev)
1522 IRDA_DEBUG(3, "%s()\n", __func__); 1522 IRDA_DEBUG(3, "%s()\n", __func__);
1523 1523
1524 IRDA_ASSERT(dev != NULL, return -1;); 1524 IRDA_ASSERT(dev != NULL, return -1;);
1525 self = (struct via_ircc_cb *) dev->priv; 1525 self = netdev_priv(dev);
1526 self->stats.rx_packets = 0; 1526 self->stats.rx_packets = 0;
1527 IRDA_ASSERT(self != NULL, return 0;); 1527 IRDA_ASSERT(self != NULL, return 0;);
1528 iobase = self->io.fir_base; 1528 iobase = self->io.fir_base;
@@ -1589,7 +1589,7 @@ static int via_ircc_net_close(struct net_device *dev)
1589 IRDA_DEBUG(3, "%s()\n", __func__); 1589 IRDA_DEBUG(3, "%s()\n", __func__);
1590 1590
1591 IRDA_ASSERT(dev != NULL, return -1;); 1591 IRDA_ASSERT(dev != NULL, return -1;);
1592 self = (struct via_ircc_cb *) dev->priv; 1592 self = netdev_priv(dev);
1593 IRDA_ASSERT(self != NULL, return 0;); 1593 IRDA_ASSERT(self != NULL, return 0;);
1594 1594
1595 /* Stop device */ 1595 /* Stop device */
@@ -1628,7 +1628,7 @@ static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1628 int ret = 0; 1628 int ret = 0;
1629 1629
1630 IRDA_ASSERT(dev != NULL, return -1;); 1630 IRDA_ASSERT(dev != NULL, return -1;);
1631 self = dev->priv; 1631 self = netdev_priv(dev);
1632 IRDA_ASSERT(self != NULL, return -1;); 1632 IRDA_ASSERT(self != NULL, return -1;);
1633 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name, 1633 IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1634 cmd); 1634 cmd);
@@ -1663,7 +1663,7 @@ static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1663static struct net_device_stats *via_ircc_net_get_stats(struct net_device 1663static struct net_device_stats *via_ircc_net_get_stats(struct net_device
1664 *dev) 1664 *dev)
1665{ 1665{
1666 struct via_ircc_cb *self = (struct via_ircc_cb *) dev->priv; 1666 struct via_ircc_cb *self = netdev_priv(dev);
1667 1667
1668 return &self->stats; 1668 return &self->stats;
1669} 1669}
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 9c926d205de9..0d30f8d659a1 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -178,7 +178,7 @@ static void vlsi_proc_pdev(struct seq_file *seq, struct pci_dev *pdev)
178 178
179static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev) 179static void vlsi_proc_ndev(struct seq_file *seq, struct net_device *ndev)
180{ 180{
181 vlsi_irda_dev_t *idev = ndev->priv; 181 vlsi_irda_dev_t *idev = netdev_priv(ndev);
182 u8 byte; 182 u8 byte;
183 u16 word; 183 u16 word;
184 unsigned delta1, delta2; 184 unsigned delta1, delta2;
@@ -346,7 +346,7 @@ static void vlsi_proc_ring(struct seq_file *seq, struct vlsi_ring *r)
346static int vlsi_seq_show(struct seq_file *seq, void *v) 346static int vlsi_seq_show(struct seq_file *seq, void *v)
347{ 347{
348 struct net_device *ndev = seq->private; 348 struct net_device *ndev = seq->private;
349 vlsi_irda_dev_t *idev = ndev->priv; 349 vlsi_irda_dev_t *idev = netdev_priv(ndev);
350 unsigned long flags; 350 unsigned long flags;
351 351
352 seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION); 352 seq_printf(seq, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION);
@@ -543,7 +543,7 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
543 struct sk_buff *skb; 543 struct sk_buff *skb;
544 int ret = 0; 544 int ret = 0;
545 struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev); 545 struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev);
546 vlsi_irda_dev_t *idev = ndev->priv; 546 vlsi_irda_dev_t *idev = netdev_priv(ndev);
547 547
548 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir); 548 pci_dma_sync_single_for_cpu(r->pdev, rd_get_addr(rd), r->len, r->dir);
549 /* dma buffer now owned by the CPU */ 549 /* dma buffer now owned by the CPU */
@@ -600,7 +600,6 @@ static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
600 netif_rx(skb); 600 netif_rx(skb);
601 else 601 else
602 netif_rx_ni(skb); 602 netif_rx_ni(skb);
603 ndev->last_rx = jiffies;
604 603
605done: 604done:
606 rd_set_status(rd, 0); 605 rd_set_status(rd, 0);
@@ -638,7 +637,7 @@ static void vlsi_fill_rx(struct vlsi_ring *r)
638 637
639static void vlsi_rx_interrupt(struct net_device *ndev) 638static void vlsi_rx_interrupt(struct net_device *ndev)
640{ 639{
641 vlsi_irda_dev_t *idev = ndev->priv; 640 vlsi_irda_dev_t *idev = netdev_priv(ndev);
642 struct vlsi_ring *r = idev->rx_ring; 641 struct vlsi_ring *r = idev->rx_ring;
643 struct ring_descr *rd; 642 struct ring_descr *rd;
644 int ret; 643 int ret;
@@ -856,7 +855,7 @@ static int vlsi_set_baud(vlsi_irda_dev_t *idev, unsigned iobase)
856 855
857static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) 856static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
858{ 857{
859 vlsi_irda_dev_t *idev = ndev->priv; 858 vlsi_irda_dev_t *idev = netdev_priv(ndev);
860 struct vlsi_ring *r = idev->tx_ring; 859 struct vlsi_ring *r = idev->tx_ring;
861 struct ring_descr *rd; 860 struct ring_descr *rd;
862 unsigned long flags; 861 unsigned long flags;
@@ -1063,7 +1062,7 @@ drop:
1063 1062
1064static void vlsi_tx_interrupt(struct net_device *ndev) 1063static void vlsi_tx_interrupt(struct net_device *ndev)
1065{ 1064{
1066 vlsi_irda_dev_t *idev = ndev->priv; 1065 vlsi_irda_dev_t *idev = netdev_priv(ndev);
1067 struct vlsi_ring *r = idev->tx_ring; 1066 struct vlsi_ring *r = idev->tx_ring;
1068 struct ring_descr *rd; 1067 struct ring_descr *rd;
1069 unsigned iobase; 1068 unsigned iobase;
@@ -1262,7 +1261,7 @@ static inline void vlsi_clear_regs(unsigned iobase)
1262static int vlsi_init_chip(struct pci_dev *pdev) 1261static int vlsi_init_chip(struct pci_dev *pdev)
1263{ 1262{
1264 struct net_device *ndev = pci_get_drvdata(pdev); 1263 struct net_device *ndev = pci_get_drvdata(pdev);
1265 vlsi_irda_dev_t *idev = ndev->priv; 1264 vlsi_irda_dev_t *idev = netdev_priv(ndev);
1266 unsigned iobase; 1265 unsigned iobase;
1267 u16 ptr; 1266 u16 ptr;
1268 1267
@@ -1376,14 +1375,14 @@ static int vlsi_stop_hw(vlsi_irda_dev_t *idev)
1376 1375
1377static struct net_device_stats * vlsi_get_stats(struct net_device *ndev) 1376static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
1378{ 1377{
1379 vlsi_irda_dev_t *idev = ndev->priv; 1378 vlsi_irda_dev_t *idev = netdev_priv(ndev);
1380 1379
1381 return &idev->stats; 1380 return &idev->stats;
1382} 1381}
1383 1382
1384static void vlsi_tx_timeout(struct net_device *ndev) 1383static void vlsi_tx_timeout(struct net_device *ndev)
1385{ 1384{
1386 vlsi_irda_dev_t *idev = ndev->priv; 1385 vlsi_irda_dev_t *idev = netdev_priv(ndev);
1387 1386
1388 1387
1389 vlsi_reg_debug(ndev->base_addr, __func__); 1388 vlsi_reg_debug(ndev->base_addr, __func__);
@@ -1408,7 +1407,7 @@ static void vlsi_tx_timeout(struct net_device *ndev)
1408 1407
1409static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1408static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1410{ 1409{
1411 vlsi_irda_dev_t *idev = ndev->priv; 1410 vlsi_irda_dev_t *idev = netdev_priv(ndev);
1412 struct if_irda_req *irq = (struct if_irda_req *) rq; 1411 struct if_irda_req *irq = (struct if_irda_req *) rq;
1413 unsigned long flags; 1412 unsigned long flags;
1414 u16 fifocnt; 1413 u16 fifocnt;
@@ -1458,7 +1457,7 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1458static irqreturn_t vlsi_interrupt(int irq, void *dev_instance) 1457static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)
1459{ 1458{
1460 struct net_device *ndev = dev_instance; 1459 struct net_device *ndev = dev_instance;
1461 vlsi_irda_dev_t *idev = ndev->priv; 1460 vlsi_irda_dev_t *idev = netdev_priv(ndev);
1462 unsigned iobase; 1461 unsigned iobase;
1463 u8 irintr; 1462 u8 irintr;
1464 int boguscount = 5; 1463 int boguscount = 5;
@@ -1499,7 +1498,7 @@ static irqreturn_t vlsi_interrupt(int irq, void *dev_instance)
1499 1498
1500static int vlsi_open(struct net_device *ndev) 1499static int vlsi_open(struct net_device *ndev)
1501{ 1500{
1502 vlsi_irda_dev_t *idev = ndev->priv; 1501 vlsi_irda_dev_t *idev = netdev_priv(ndev);
1503 int err = -EAGAIN; 1502 int err = -EAGAIN;
1504 char hwname[32]; 1503 char hwname[32];
1505 1504
@@ -1558,7 +1557,7 @@ errout:
1558 1557
1559static int vlsi_close(struct net_device *ndev) 1558static int vlsi_close(struct net_device *ndev)
1560{ 1559{
1561 vlsi_irda_dev_t *idev = ndev->priv; 1560 vlsi_irda_dev_t *idev = netdev_priv(ndev);
1562 1561
1563 netif_stop_queue(ndev); 1562 netif_stop_queue(ndev);
1564 1563
@@ -1581,7 +1580,7 @@ static int vlsi_close(struct net_device *ndev)
1581 1580
1582static int vlsi_irda_init(struct net_device *ndev) 1581static int vlsi_irda_init(struct net_device *ndev)
1583{ 1582{
1584 vlsi_irda_dev_t *idev = ndev->priv; 1583 vlsi_irda_dev_t *idev = netdev_priv(ndev);
1585 struct pci_dev *pdev = idev->pdev; 1584 struct pci_dev *pdev = idev->pdev;
1586 1585
1587 ndev->irq = pdev->irq; 1586 ndev->irq = pdev->irq;
@@ -1656,7 +1655,7 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1656 goto out_disable; 1655 goto out_disable;
1657 } 1656 }
1658 1657
1659 idev = ndev->priv; 1658 idev = netdev_priv(ndev);
1660 1659
1661 spin_lock_init(&idev->lock); 1660 spin_lock_init(&idev->lock);
1662 mutex_init(&idev->mtx); 1661 mutex_init(&idev->mtx);
@@ -1713,7 +1712,7 @@ static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
1713 1712
1714 unregister_netdev(ndev); 1713 unregister_netdev(ndev);
1715 1714
1716 idev = ndev->priv; 1715 idev = netdev_priv(ndev);
1717 mutex_lock(&idev->mtx); 1716 mutex_lock(&idev->mtx);
1718 if (idev->proc_entry) { 1717 if (idev->proc_entry) {
1719 remove_proc_entry(ndev->name, vlsi_proc_root); 1718 remove_proc_entry(ndev->name, vlsi_proc_root);
@@ -1748,7 +1747,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1748 __func__, pci_name(pdev)); 1747 __func__, pci_name(pdev));
1749 return 0; 1748 return 0;
1750 } 1749 }
1751 idev = ndev->priv; 1750 idev = netdev_priv(ndev);
1752 mutex_lock(&idev->mtx); 1751 mutex_lock(&idev->mtx);
1753 if (pdev->current_state != 0) { /* already suspended */ 1752 if (pdev->current_state != 0) { /* already suspended */
1754 if (state.event > pdev->current_state) { /* simply go deeper */ 1753 if (state.event > pdev->current_state) { /* simply go deeper */
@@ -1787,7 +1786,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1787 __func__, pci_name(pdev)); 1786 __func__, pci_name(pdev));
1788 return 0; 1787 return 0;
1789 } 1788 }
1790 idev = ndev->priv; 1789 idev = netdev_priv(ndev);
1791 mutex_lock(&idev->mtx); 1790 mutex_lock(&idev->mtx);
1792 if (pdev->current_state == 0) { 1791 if (pdev->current_state == 0) {
1793 mutex_unlock(&idev->mtx); 1792 mutex_unlock(&idev->mtx);
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 002a6d769f21..30ec9131c5ce 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -147,8 +147,8 @@ static void __exit w83977af_cleanup(void)
147 * Open driver instance 147 * Open driver instance
148 * 148 *
149 */ 149 */
150int w83977af_open(int i, unsigned int iobase, unsigned int irq, 150static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
151 unsigned int dma) 151 unsigned int dma)
152{ 152{
153 struct net_device *dev; 153 struct net_device *dev;
154 struct w83977af_ir *self; 154 struct w83977af_ir *self;
@@ -178,7 +178,7 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
178 goto err_out; 178 goto err_out;
179 } 179 }
180 180
181 self = dev->priv; 181 self = netdev_priv(dev);
182 spin_lock_init(&self->lock); 182 spin_lock_init(&self->lock);
183 183
184 184
@@ -310,7 +310,7 @@ static int w83977af_close(struct w83977af_ir *self)
310 return 0; 310 return 0;
311} 311}
312 312
313int w83977af_probe( int iobase, int irq, int dma) 313static int w83977af_probe(int iobase, int irq, int dma)
314{ 314{
315 int version; 315 int version;
316 int i; 316 int i;
@@ -409,7 +409,7 @@ int w83977af_probe( int iobase, int irq, int dma)
409 return -1; 409 return -1;
410} 410}
411 411
412void w83977af_change_speed(struct w83977af_ir *self, __u32 speed) 412static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
413{ 413{
414 int ir_mode = HCR_SIR; 414 int ir_mode = HCR_SIR;
415 int iobase; 415 int iobase;
@@ -489,7 +489,7 @@ void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
489 * Sets up a DMA transfer to send the current frame. 489 * Sets up a DMA transfer to send the current frame.
490 * 490 *
491 */ 491 */
492int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev) 492static int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
493{ 493{
494 struct w83977af_ir *self; 494 struct w83977af_ir *self;
495 __s32 speed; 495 __s32 speed;
@@ -497,7 +497,7 @@ int w83977af_hard_xmit(struct sk_buff *skb, struct net_device *dev)
497 __u8 set; 497 __u8 set;
498 int mtt; 498 int mtt;
499 499
500 self = (struct w83977af_ir *) dev->priv; 500 self = netdev_priv(dev);
501 501
502 iobase = self->io.fir_base; 502 iobase = self->io.fir_base;
503 503
@@ -731,7 +731,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
731 * if it starts to receive a frame. 731 * if it starts to receive a frame.
732 * 732 *
733 */ 733 */
734int w83977af_dma_receive(struct w83977af_ir *self) 734static int w83977af_dma_receive(struct w83977af_ir *self)
735{ 735{
736 int iobase; 736 int iobase;
737 __u8 set; 737 __u8 set;
@@ -803,7 +803,7 @@ int w83977af_dma_receive(struct w83977af_ir *self)
803 * Finished with receiving a frame 803 * Finished with receiving a frame
804 * 804 *
805 */ 805 */
806int w83977af_dma_receive_complete(struct w83977af_ir *self) 806static int w83977af_dma_receive_complete(struct w83977af_ir *self)
807{ 807{
808 struct sk_buff *skb; 808 struct sk_buff *skb;
809 struct st_fifo *st_fifo; 809 struct st_fifo *st_fifo;
@@ -923,7 +923,6 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
923 skb_reset_mac_header(skb); 923 skb_reset_mac_header(skb);
924 skb->protocol = htons(ETH_P_IRDA); 924 skb->protocol = htons(ETH_P_IRDA);
925 netif_rx(skb); 925 netif_rx(skb);
926 self->netdev->last_rx = jiffies;
927 } 926 }
928 } 927 }
929 /* Restore set register */ 928 /* Restore set register */
@@ -1119,7 +1118,7 @@ static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
1119 __u8 set, icr, isr; 1118 __u8 set, icr, isr;
1120 int iobase; 1119 int iobase;
1121 1120
1122 self = dev->priv; 1121 self = netdev_priv(dev);
1123 1122
1124 iobase = self->io.fir_base; 1123 iobase = self->io.fir_base;
1125 1124
@@ -1192,7 +1191,7 @@ static int w83977af_net_open(struct net_device *dev)
1192 IRDA_DEBUG(0, "%s()\n", __func__ ); 1191 IRDA_DEBUG(0, "%s()\n", __func__ );
1193 1192
1194 IRDA_ASSERT(dev != NULL, return -1;); 1193 IRDA_ASSERT(dev != NULL, return -1;);
1195 self = (struct w83977af_ir *) dev->priv; 1194 self = netdev_priv(dev);
1196 1195
1197 IRDA_ASSERT(self != NULL, return 0;); 1196 IRDA_ASSERT(self != NULL, return 0;);
1198 1197
@@ -1256,7 +1255,7 @@ static int w83977af_net_close(struct net_device *dev)
1256 1255
1257 IRDA_ASSERT(dev != NULL, return -1;); 1256 IRDA_ASSERT(dev != NULL, return -1;);
1258 1257
1259 self = (struct w83977af_ir *) dev->priv; 1258 self = netdev_priv(dev);
1260 1259
1261 IRDA_ASSERT(self != NULL, return 0;); 1260 IRDA_ASSERT(self != NULL, return 0;);
1262 1261
@@ -1303,7 +1302,7 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1303 1302
1304 IRDA_ASSERT(dev != NULL, return -1;); 1303 IRDA_ASSERT(dev != NULL, return -1;);
1305 1304
1306 self = dev->priv; 1305 self = netdev_priv(dev);
1307 1306
1308 IRDA_ASSERT(self != NULL, return -1;); 1307 IRDA_ASSERT(self != NULL, return -1;);
1309 1308
@@ -1339,7 +1338,7 @@ out:
1339 1338
1340static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev) 1339static struct net_device_stats *w83977af_net_get_stats(struct net_device *dev)
1341{ 1340{
1342 struct w83977af_ir *self = (struct w83977af_ir *) dev->priv; 1341 struct w83977af_ir *self = netdev_priv(dev);
1343 1342
1344 return &self->stats; 1343 return &self->stats;
1345} 1344}
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
index d6ff26af37b3..3126678bdd3c 100644
--- a/drivers/net/isa-skeleton.c
+++ b/drivers/net/isa-skeleton.c
@@ -192,7 +192,6 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr)
192 static unsigned version_printed; 192 static unsigned version_printed;
193 int i; 193 int i;
194 int err = -ENODEV; 194 int err = -ENODEV;
195 DECLARE_MAC_BUF(mac);
196 195
197 /* Grab the region so that no one else tries to probe our ioports. */ 196 /* Grab the region so that no one else tries to probe our ioports. */
198 if (!request_region(ioaddr, NETCARD_IO_EXTENT, cardname)) 197 if (!request_region(ioaddr, NETCARD_IO_EXTENT, cardname))
@@ -220,7 +219,7 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr)
220 for (i = 0; i < 6; i++) 219 for (i = 0; i < 6; i++)
221 dev->dev_addr[i] = inb(ioaddr + i); 220 dev->dev_addr[i] = inb(ioaddr + i);
222 221
223 printk("%s", print_mac(mac, dev->dev_addr)); 222 printk("%pM", dev->dev_addr);
224 223
225 err = -EAGAIN; 224 err = -EAGAIN;
226#ifdef jumpered_interrupts 225#ifdef jumpered_interrupts
@@ -584,7 +583,6 @@ net_rx(struct net_device *dev)
584 insw(ioaddr, skb->data, (pkt_len + 1) >> 1); 583 insw(ioaddr, skb->data, (pkt_len + 1) >> 1);
585 584
586 netif_rx(skb); 585 netif_rx(skb);
587 dev->last_rx = jiffies;
588 lp->stats.rx_packets++; 586 lp->stats.rx_packets++;
589 lp->stats.rx_bytes += pkt_len; 587 lp->stats.rx_bytes += pkt_len;
590 } 588 }
@@ -711,15 +709,3 @@ cleanup_module(void)
711} 709}
712 710
713#endif /* MODULE */ 711#endif /* MODULE */
714
715/*
716 * Local variables:
717 * compile-command:
718 * gcc -D__KERNEL__ -Wall -Wstrict-prototypes -Wwrite-strings
719 * -Wredundant-decls -O2 -m486 -c skeleton.c
720 * version-control: t
721 * kept-new-versions: 5
722 * tab-width: 4
723 * c-indent-level: 4
724 * End:
725 */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index c46864d626b2..c7457f97259d 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -952,7 +952,7 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
952 952
953static void veth_set_multicast_list(struct net_device *dev) 953static void veth_set_multicast_list(struct net_device *dev)
954{ 954{
955 struct veth_port *port = (struct veth_port *) dev->priv; 955 struct veth_port *port = netdev_priv(dev);
956 unsigned long flags; 956 unsigned long flags;
957 957
958 write_lock_irqsave(&port->mcast_gate, flags); 958 write_lock_irqsave(&port->mcast_gate, flags);
@@ -1044,7 +1044,7 @@ static struct net_device *veth_probe_one(int vlan,
1044 return NULL; 1044 return NULL;
1045 } 1045 }
1046 1046
1047 port = (struct veth_port *) dev->priv; 1047 port = netdev_priv(dev);
1048 1048
1049 spin_lock_init(&port->queue_lock); 1049 spin_lock_init(&port->queue_lock);
1050 rwlock_init(&port->mcast_gate); 1050 rwlock_init(&port->mcast_gate);
@@ -1102,7 +1102,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
1102 struct net_device *dev) 1102 struct net_device *dev)
1103{ 1103{
1104 struct veth_lpar_connection *cnx = veth_cnx[rlp]; 1104 struct veth_lpar_connection *cnx = veth_cnx[rlp];
1105 struct veth_port *port = (struct veth_port *) dev->priv; 1105 struct veth_port *port = netdev_priv(dev);
1106 HvLpEvent_Rc rc; 1106 HvLpEvent_Rc rc;
1107 struct veth_msg *msg = NULL; 1107 struct veth_msg *msg = NULL;
1108 unsigned long flags; 1108 unsigned long flags;
@@ -1191,7 +1191,7 @@ static void veth_transmit_to_many(struct sk_buff *skb,
1191static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev) 1191static int veth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1192{ 1192{
1193 unsigned char *frame = skb->data; 1193 unsigned char *frame = skb->data;
1194 struct veth_port *port = (struct veth_port *) dev->priv; 1194 struct veth_port *port = netdev_priv(dev);
1195 HvLpIndexMap lpmask; 1195 HvLpIndexMap lpmask;
1196 1196
1197 if (! (frame[0] & 0x01)) { 1197 if (! (frame[0] & 0x01)) {
@@ -1255,7 +1255,7 @@ static void veth_wake_queues(struct veth_lpar_connection *cnx)
1255 if (! dev) 1255 if (! dev)
1256 continue; 1256 continue;
1257 1257
1258 port = (struct veth_port *)dev->priv; 1258 port = netdev_priv(dev);
1259 1259
1260 if (! (port->lpar_map & (1<<cnx->remote_lp))) 1260 if (! (port->lpar_map & (1<<cnx->remote_lp)))
1261 continue; 1261 continue;
@@ -1284,7 +1284,7 @@ static void veth_stop_queues(struct veth_lpar_connection *cnx)
1284 if (! dev) 1284 if (! dev)
1285 continue; 1285 continue;
1286 1286
1287 port = (struct veth_port *)dev->priv; 1287 port = netdev_priv(dev);
1288 1288
1289 /* If this cnx is not on the vlan for this port, continue */ 1289 /* If this cnx is not on the vlan for this port, continue */
1290 if (! (port->lpar_map & (1 << cnx->remote_lp))) 1290 if (! (port->lpar_map & (1 << cnx->remote_lp)))
@@ -1506,7 +1506,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
1506 continue; 1506 continue;
1507 } 1507 }
1508 1508
1509 port = (struct veth_port *)dev->priv; 1509 port = netdev_priv(dev);
1510 dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000; 1510 dest = *((u64 *) skb->data) & 0xFFFFFFFFFFFF0000;
1511 1511
1512 if ((vlan > HVMAXARCHITECTEDVIRTUALLANS) || !port) { 1512 if ((vlan > HVMAXARCHITECTEDVIRTUALLANS) || !port) {
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index be3c7dc96f63..eee28d395682 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -321,6 +321,24 @@ ixgb_reset(struct ixgb_adapter *adapter)
321 } 321 }
322} 322}
323 323
324static const struct net_device_ops ixgb_netdev_ops = {
325 .ndo_open = ixgb_open,
326 .ndo_stop = ixgb_close,
327 .ndo_start_xmit = ixgb_xmit_frame,
328 .ndo_get_stats = ixgb_get_stats,
329 .ndo_set_multicast_list = ixgb_set_multi,
330 .ndo_validate_addr = eth_validate_addr,
331 .ndo_set_mac_address = ixgb_set_mac,
332 .ndo_change_mtu = ixgb_change_mtu,
333 .ndo_tx_timeout = ixgb_tx_timeout,
334 .ndo_vlan_rx_register = ixgb_vlan_rx_register,
335 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
336 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
337#ifdef CONFIG_NET_POLL_CONTROLLER
338 .ndo_poll_controller = ixgb_netpoll,
339#endif
340};
341
324/** 342/**
325 * ixgb_probe - Device Initialization Routine 343 * ixgb_probe - Device Initialization Routine
326 * @pdev: PCI device information struct 344 * @pdev: PCI device information struct
@@ -381,8 +399,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
381 adapter->hw.back = adapter; 399 adapter->hw.back = adapter;
382 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT); 400 adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
383 401
384 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0), 402 adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
385 pci_resource_len(pdev, BAR_0));
386 if (!adapter->hw.hw_addr) { 403 if (!adapter->hw.hw_addr) {
387 err = -EIO; 404 err = -EIO;
388 goto err_ioremap; 405 goto err_ioremap;
@@ -397,23 +414,10 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
397 } 414 }
398 } 415 }
399 416
400 netdev->open = &ixgb_open; 417 netdev->netdev_ops = &ixgb_netdev_ops;
401 netdev->stop = &ixgb_close;
402 netdev->hard_start_xmit = &ixgb_xmit_frame;
403 netdev->get_stats = &ixgb_get_stats;
404 netdev->set_multicast_list = &ixgb_set_multi;
405 netdev->set_mac_address = &ixgb_set_mac;
406 netdev->change_mtu = &ixgb_change_mtu;
407 ixgb_set_ethtool_ops(netdev); 418 ixgb_set_ethtool_ops(netdev);
408 netdev->tx_timeout = &ixgb_tx_timeout;
409 netdev->watchdog_timeo = 5 * HZ; 419 netdev->watchdog_timeo = 5 * HZ;
410 netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64); 420 netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
411 netdev->vlan_rx_register = ixgb_vlan_rx_register;
412 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
413 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
414#ifdef CONFIG_NET_POLL_CONTROLLER
415 netdev->poll_controller = ixgb_netpoll;
416#endif
417 421
418 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 422 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
419 423
@@ -1106,8 +1110,15 @@ ixgb_watchdog(unsigned long data)
1106 1110
1107 if (adapter->hw.link_up) { 1111 if (adapter->hw.link_up) {
1108 if (!netif_carrier_ok(netdev)) { 1112 if (!netif_carrier_ok(netdev)) {
1109 DPRINTK(LINK, INFO, 1113 printk(KERN_INFO "ixgb: %s NIC Link is Up 10 Gbps "
1110 "NIC Link is Up 10000 Mbps Full Duplex\n"); 1114 "Full Duplex, Flow Control: %s\n",
1115 netdev->name,
1116 (adapter->hw.fc.type == ixgb_fc_full) ?
1117 "RX/TX" :
1118 ((adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1119 "RX" :
1120 ((adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1121 "TX" : "None")));
1111 adapter->link_speed = 10000; 1122 adapter->link_speed = 10000;
1112 adapter->link_duplex = FULL_DUPLEX; 1123 adapter->link_duplex = FULL_DUPLEX;
1113 netif_carrier_on(netdev); 1124 netif_carrier_on(netdev);
@@ -1117,7 +1128,8 @@ ixgb_watchdog(unsigned long data)
1117 if (netif_carrier_ok(netdev)) { 1128 if (netif_carrier_ok(netdev)) {
1118 adapter->link_speed = 0; 1129 adapter->link_speed = 0;
1119 adapter->link_duplex = 0; 1130 adapter->link_duplex = 0;
1120 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 1131 printk(KERN_INFO "ixgb: %s NIC Link is Down\n",
1132 netdev->name);
1121 netif_carrier_off(netdev); 1133 netif_carrier_off(netdev);
1122 netif_stop_queue(netdev); 1134 netif_stop_queue(netdev);
1123 1135
@@ -1709,14 +1721,14 @@ ixgb_intr(int irq, void *data)
1709 if (!test_bit(__IXGB_DOWN, &adapter->flags)) 1721 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1710 mod_timer(&adapter->watchdog_timer, jiffies); 1722 mod_timer(&adapter->watchdog_timer, jiffies);
1711 1723
1712 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 1724 if (netif_rx_schedule_prep(&adapter->napi)) {
1713 1725
1714 /* Disable interrupts and register for poll. The flush 1726 /* Disable interrupts and register for poll. The flush
1715 of the posted write is intentionally left out. 1727 of the posted write is intentionally left out.
1716 */ 1728 */
1717 1729
1718 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 1730 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1719 __netif_rx_schedule(netdev, &adapter->napi); 1731 __netif_rx_schedule(&adapter->napi);
1720 } 1732 }
1721 return IRQ_HANDLED; 1733 return IRQ_HANDLED;
1722} 1734}
@@ -1730,7 +1742,6 @@ static int
1730ixgb_clean(struct napi_struct *napi, int budget) 1742ixgb_clean(struct napi_struct *napi, int budget)
1731{ 1743{
1732 struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi); 1744 struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
1733 struct net_device *netdev = adapter->netdev;
1734 int work_done = 0; 1745 int work_done = 0;
1735 1746
1736 ixgb_clean_tx_irq(adapter); 1747 ixgb_clean_tx_irq(adapter);
@@ -1738,7 +1749,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
1738 1749
1739 /* If budget not fully consumed, exit the polling mode */ 1750 /* If budget not fully consumed, exit the polling mode */
1740 if (work_done < budget) { 1751 if (work_done < budget) {
1741 netif_rx_complete(netdev, napi); 1752 netif_rx_complete(napi);
1742 if (!test_bit(__IXGB_DOWN, &adapter->flags)) 1753 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1743 ixgb_irq_enable(adapter); 1754 ixgb_irq_enable(adapter);
1744 } 1755 }
@@ -1981,7 +1992,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1981 } else { 1992 } else {
1982 netif_receive_skb(skb); 1993 netif_receive_skb(skb);
1983 } 1994 }
1984 netdev->last_rx = jiffies;
1985 1995
1986rxdesc_done: 1996rxdesc_done:
1987 /* clean up descriptor, might be written over by hw */ 1997 /* clean up descriptor, might be written over by hw */
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index ccd83d9f579e..6e7ef765bcd8 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -34,3 +34,5 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82598.o ixgbe_phy.o 36 ixgbe_82598.o ixgbe_phy.o
37
38ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index e116d340dcc6..e112008f39c1 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -32,10 +32,11 @@
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include <linux/netdevice.h> 33#include <linux/netdevice.h>
34#include <linux/inet_lro.h> 34#include <linux/inet_lro.h>
35#include <linux/aer.h>
35 36
36#include "ixgbe_type.h" 37#include "ixgbe_type.h"
37#include "ixgbe_common.h" 38#include "ixgbe_common.h"
38 39#include "ixgbe_dcb.h"
39#ifdef CONFIG_IXGBE_DCA 40#ifdef CONFIG_IXGBE_DCA
40#include <linux/dca.h> 41#include <linux/dca.h>
41#endif 42#endif
@@ -84,6 +85,7 @@
84#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) 85#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
85#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) 86#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
86#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 87#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
88#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
87#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 89#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
88 90
89#define IXGBE_MAX_LRO_DESCRIPTORS 8 91#define IXGBE_MAX_LRO_DESCRIPTORS 8
@@ -134,7 +136,7 @@ struct ixgbe_ring {
134 136
135 u16 reg_idx; /* holds the special value that gets the hardware register 137 u16 reg_idx; /* holds the special value that gets the hardware register
136 * offset associated with this ring, which is different 138 * offset associated with this ring, which is different
137 * for DCE and RSS modes */ 139 * for DCB and RSS modes */
138 140
139#ifdef CONFIG_IXGBE_DCA 141#ifdef CONFIG_IXGBE_DCA
140 /* cpu for tx queue */ 142 /* cpu for tx queue */
@@ -152,8 +154,10 @@ struct ixgbe_ring {
152 u16 rx_buf_len; 154 u16 rx_buf_len;
153}; 155};
154 156
157#define RING_F_DCB 0
155#define RING_F_VMDQ 1 158#define RING_F_VMDQ 1
156#define RING_F_RSS 2 159#define RING_F_RSS 2
160#define IXGBE_MAX_DCB_INDICES 8
157#define IXGBE_MAX_RSS_INDICES 16 161#define IXGBE_MAX_RSS_INDICES 16
158#define IXGBE_MAX_VMDQ_INDICES 16 162#define IXGBE_MAX_VMDQ_INDICES 16
159struct ixgbe_ring_feature { 163struct ixgbe_ring_feature {
@@ -164,6 +168,10 @@ struct ixgbe_ring_feature {
164#define MAX_RX_QUEUES 64 168#define MAX_RX_QUEUES 64
165#define MAX_TX_QUEUES 32 169#define MAX_TX_QUEUES 32
166 170
171#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
172 ? 8 : 1)
173#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
174
167/* MAX_MSIX_Q_VECTORS of these are allocated, 175/* MAX_MSIX_Q_VECTORS of these are allocated,
168 * but we only use one per queue-specific vector. 176 * but we only use one per queue-specific vector.
169 */ 177 */
@@ -215,6 +223,9 @@ struct ixgbe_adapter {
215 struct work_struct reset_task; 223 struct work_struct reset_task;
216 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS]; 224 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
217 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5]; 225 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
226 struct ixgbe_dcb_config dcb_cfg;
227 struct ixgbe_dcb_config temp_dcb_cfg;
228 u8 dcb_set_bitmap;
218 229
219 /* Interrupt Throttle Rate */ 230 /* Interrupt Throttle Rate */
220 u32 itr_setting; 231 u32 itr_setting;
@@ -267,8 +278,10 @@ struct ixgbe_adapter {
267#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) 278#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
268#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) 279#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
269#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) 280#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
281#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
270#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) 282#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
271#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23) 283#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
284#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 24)
272 285
273/* default to trying for four seconds */ 286/* default to trying for four seconds */
274#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) 287#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
@@ -299,12 +312,15 @@ struct ixgbe_adapter {
299 unsigned long link_check_timeout; 312 unsigned long link_check_timeout;
300 313
301 struct work_struct watchdog_task; 314 struct work_struct watchdog_task;
315 struct work_struct sfp_task;
316 struct timer_list sfp_timer;
302}; 317};
303 318
304enum ixbge_state_t { 319enum ixbge_state_t {
305 __IXGBE_TESTING, 320 __IXGBE_TESTING,
306 __IXGBE_RESETTING, 321 __IXGBE_RESETTING,
307 __IXGBE_DOWN 322 __IXGBE_DOWN,
323 __IXGBE_SFP_MODULE_NOT_FOUND
308}; 324};
309 325
310enum ixgbe_boards { 326enum ixgbe_boards {
@@ -312,6 +328,12 @@ enum ixgbe_boards {
312}; 328};
313 329
314extern struct ixgbe_info ixgbe_82598_info; 330extern struct ixgbe_info ixgbe_82598_info;
331#ifdef CONFIG_IXGBE_DCB
332extern struct dcbnl_rtnl_ops dcbnl_ops;
333extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
334 struct ixgbe_dcb_config *dst_dcb_cfg,
335 int tc_max);
336#endif
315 337
316extern char ixgbe_driver_name[]; 338extern char ixgbe_driver_name[];
317extern const char ixgbe_driver_version[]; 339extern const char ixgbe_driver_version[];
@@ -326,5 +348,9 @@ extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *)
326extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 348extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
327extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 349extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
328extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 350extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
351extern void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter);
352extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
353void ixgbe_napi_add_all(struct ixgbe_adapter *adapter);
354void ixgbe_napi_del_all(struct ixgbe_adapter *adapter);
329 355
330#endif /* _IXGBE_H_ */ 356#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 7cddcfba809e..ad5699d9ab0d 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -46,6 +46,8 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
46 ixgbe_link_speed speed, 46 ixgbe_link_speed speed,
47 bool autoneg, 47 bool autoneg,
48 bool autoneg_wait_to_complete); 48 bool autoneg_wait_to_complete);
49static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
50 u8 *eeprom_data);
49 51
50/** 52/**
51 */ 53 */
@@ -53,12 +55,40 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
53{ 55{
54 struct ixgbe_mac_info *mac = &hw->mac; 56 struct ixgbe_mac_info *mac = &hw->mac;
55 struct ixgbe_phy_info *phy = &hw->phy; 57 struct ixgbe_phy_info *phy = &hw->phy;
58 s32 ret_val = 0;
59 u16 list_offset, data_offset;
56 60
57 /* Call PHY identify routine to get the phy type */ 61 /* Call PHY identify routine to get the phy type */
58 ixgbe_identify_phy_generic(hw); 62 ixgbe_identify_phy_generic(hw);
59 63
60 /* PHY Init */ 64 /* PHY Init */
61 switch (phy->type) { 65 switch (phy->type) {
66 case ixgbe_phy_tn:
67 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
68 phy->ops.get_firmware_version =
69 &ixgbe_get_phy_firmware_version_tnx;
70 break;
71 case ixgbe_phy_nl:
72 phy->ops.reset = &ixgbe_reset_phy_nl;
73
74 /* Call SFP+ identify routine to get the SFP+ module type */
75 ret_val = phy->ops.identify_sfp(hw);
76 if (ret_val != 0)
77 goto out;
78 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
79 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
80 goto out;
81 }
82
83 /* Check to see if SFP+ module is supported */
84 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
85 &list_offset,
86 &data_offset);
87 if (ret_val != 0) {
88 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
89 goto out;
90 }
91 break;
62 default: 92 default:
63 break; 93 break;
64 } 94 }
@@ -77,7 +107,8 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
77 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 107 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
78 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 108 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
79 109
80 return 0; 110out:
111 return ret_val;
81} 112}
82 113
83/** 114/**
@@ -146,9 +177,9 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
146 * 177 *
147 * Determines the link capabilities by reading the AUTOC register. 178 * Determines the link capabilities by reading the AUTOC register.
148 **/ 179 **/
149s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, 180static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
150 ixgbe_link_speed *speed, 181 ixgbe_link_speed *speed,
151 bool *autoneg) 182 bool *autoneg)
152{ 183{
153 s32 status = IXGBE_ERR_LINK_SETUP; 184 s32 status = IXGBE_ERR_LINK_SETUP;
154 u16 speed_ability; 185 u16 speed_ability;
@@ -186,9 +217,15 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
186 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 217 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
187 case IXGBE_DEV_ID_82598EB_CX4: 218 case IXGBE_DEV_ID_82598EB_CX4:
188 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 219 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
220 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
221 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
189 case IXGBE_DEV_ID_82598EB_XF_LR: 222 case IXGBE_DEV_ID_82598EB_XF_LR:
223 case IXGBE_DEV_ID_82598EB_SFP_LOM:
190 media_type = ixgbe_media_type_fiber; 224 media_type = ixgbe_media_type_fiber;
191 break; 225 break;
226 case IXGBE_DEV_ID_82598AT:
227 media_type = ixgbe_media_type_copper;
228 break;
192 default: 229 default:
193 media_type = ixgbe_media_type_unknown; 230 media_type = ixgbe_media_type_unknown;
194 break; 231 break;
@@ -205,7 +242,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
205 * Configures the flow control settings based on SW configuration. This 242 * Configures the flow control settings based on SW configuration. This
206 * function is used for 802.3x flow control configuration only. 243 * function is used for 802.3x flow control configuration only.
207 **/ 244 **/
208s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) 245static s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
209{ 246{
210 u32 frctl_reg; 247 u32 frctl_reg;
211 u32 rmcs_reg; 248 u32 rmcs_reg;
@@ -391,6 +428,46 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
391{ 428{
392 u32 links_reg; 429 u32 links_reg;
393 u32 i; 430 u32 i;
431 u16 link_reg, adapt_comp_reg;
432
433 /*
434 * SERDES PHY requires us to read link status from register 0xC79F.
435 * Bit 0 set indicates link is up/ready; clear indicates link down.
436 * 0xC00C is read to check that the XAUI lanes are active. Bit 0
437 * clear indicates active; set indicates inactive.
438 */
439 if (hw->phy.type == ixgbe_phy_nl) {
440 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
441 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
442 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
443 &adapt_comp_reg);
444 if (link_up_wait_to_complete) {
445 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
446 if ((link_reg & 1) &&
447 ((adapt_comp_reg & 1) == 0)) {
448 *link_up = true;
449 break;
450 } else {
451 *link_up = false;
452 }
453 msleep(100);
454 hw->phy.ops.read_reg(hw, 0xC79F,
455 IXGBE_TWINAX_DEV,
456 &link_reg);
457 hw->phy.ops.read_reg(hw, 0xC00C,
458 IXGBE_TWINAX_DEV,
459 &adapt_comp_reg);
460 }
461 } else {
462 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
463 *link_up = true;
464 else
465 *link_up = false;
466 }
467
468 if (*link_up == false)
469 goto out;
470 }
394 471
395 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 472 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
396 if (link_up_wait_to_complete) { 473 if (link_up_wait_to_complete) {
@@ -416,6 +493,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
416 else 493 else
417 *speed = IXGBE_LINK_SPEED_1GB_FULL; 494 *speed = IXGBE_LINK_SPEED_1GB_FULL;
418 495
496out:
419 return 0; 497 return 0;
420} 498}
421 499
@@ -648,7 +726,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
648 * @rar: receive address register index to associate with a VMDq index 726 * @rar: receive address register index to associate with a VMDq index
649 * @vmdq: VMDq set index 727 * @vmdq: VMDq set index
650 **/ 728 **/
651s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 729static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
652{ 730{
653 u32 rar_high; 731 u32 rar_high;
654 732
@@ -692,8 +770,8 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
692 * 770 *
693 * Turn on/off specified VLAN in the VLAN filter table. 771 * Turn on/off specified VLAN in the VLAN filter table.
694 **/ 772 **/
695s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, 773static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
696 bool vlan_on) 774 bool vlan_on)
697{ 775{
698 u32 regindex; 776 u32 regindex;
699 u32 bitindex; 777 u32 bitindex;
@@ -816,7 +894,7 @@ static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index)
816 * 894 *
817 * Performs read operation to Atlas analog register specified. 895 * Performs read operation to Atlas analog register specified.
818 **/ 896 **/
819s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) 897static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
820{ 898{
821 u32 atlas_ctl; 899 u32 atlas_ctl;
822 900
@@ -838,7 +916,7 @@ s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
838 * 916 *
839 * Performs write operation to Atlas analog register specified. 917 * Performs write operation to Atlas analog register specified.
840 **/ 918 **/
841s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) 919static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
842{ 920{
843 u32 atlas_ctl; 921 u32 atlas_ctl;
844 922
@@ -851,12 +929,75 @@ s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
851} 929}
852 930
853/** 931/**
932 * ixgbe_read_i2c_eeprom_82598 - Read 8 bit EEPROM word of an SFP+ module
933 * over I2C interface through an intermediate phy.
934 * @hw: pointer to hardware structure
935 * @byte_offset: EEPROM byte offset to read
936 * @eeprom_data: value read
937 *
938 * Performs byte read operation to SFP module's EEPROM over I2C interface.
939 **/
940static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
941 u8 *eeprom_data)
942{
943 s32 status = 0;
944 u16 sfp_addr = 0;
945 u16 sfp_data = 0;
946 u16 sfp_stat = 0;
947 u32 i;
948
949 if (hw->phy.type == ixgbe_phy_nl) {
950 /*
951 * phy SDA/SCL registers are at addresses 0xC30A to
952 * 0xC30D. These registers are used to talk to the SFP+
953 * module's EEPROM through the SDA/SCL (I2C) interface.
954 */
955 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
956 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
957 hw->phy.ops.write_reg(hw,
958 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
959 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
960 sfp_addr);
961
962 /* Poll status */
963 for (i = 0; i < 100; i++) {
964 hw->phy.ops.read_reg(hw,
965 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
966 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
967 &sfp_stat);
968 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
969 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
970 break;
971 msleep(10);
972 }
973
974 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
975 hw_dbg(hw, "EEPROM read did not pass.\n");
976 status = IXGBE_ERR_SFP_NOT_PRESENT;
977 goto out;
978 }
979
980 /* Read data */
981 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
982 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
983
984 *eeprom_data = (u8)(sfp_data >> 8);
985 } else {
986 status = IXGBE_ERR_PHY;
987 goto out;
988 }
989
990out:
991 return status;
992}
993
994/**
854 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type 995 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
855 * @hw: pointer to hardware structure 996 * @hw: pointer to hardware structure
856 * 997 *
857 * Determines physical layer capabilities of the current configuration. 998 * Determines physical layer capabilities of the current configuration.
858 **/ 999 **/
859s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) 1000static s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
860{ 1001{
861 s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1002 s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
862 1003
@@ -865,13 +1006,39 @@ s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
865 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 1006 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
866 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 1007 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
867 break; 1008 break;
1009 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1010 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1011 break;
868 case IXGBE_DEV_ID_82598AF_DUAL_PORT: 1012 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
869 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 1013 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1014 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
870 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 1015 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
871 break; 1016 break;
872 case IXGBE_DEV_ID_82598EB_XF_LR: 1017 case IXGBE_DEV_ID_82598EB_XF_LR:
873 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 1018 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
874 break; 1019 break;
1020 case IXGBE_DEV_ID_82598AT:
1021 physical_layer = (IXGBE_PHYSICAL_LAYER_10GBASE_T |
1022 IXGBE_PHYSICAL_LAYER_1000BASE_T);
1023 break;
1024 case IXGBE_DEV_ID_82598EB_SFP_LOM:
1025 hw->phy.ops.identify_sfp(hw);
1026
1027 switch (hw->phy.sfp_type) {
1028 case ixgbe_sfp_type_da_cu:
1029 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1030 break;
1031 case ixgbe_sfp_type_sr:
1032 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1033 break;
1034 case ixgbe_sfp_type_lr:
1035 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1036 break;
1037 default:
1038 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1039 break;
1040 }
1041 break;
875 1042
876 default: 1043 default:
877 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 1044 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
@@ -923,12 +1090,13 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
923 1090
924static struct ixgbe_phy_operations phy_ops_82598 = { 1091static struct ixgbe_phy_operations phy_ops_82598 = {
925 .identify = &ixgbe_identify_phy_generic, 1092 .identify = &ixgbe_identify_phy_generic,
926 /* .identify_sfp = &ixgbe_identify_sfp_module_generic, */ 1093 .identify_sfp = &ixgbe_identify_sfp_module_generic,
927 .reset = &ixgbe_reset_phy_generic, 1094 .reset = &ixgbe_reset_phy_generic,
928 .read_reg = &ixgbe_read_phy_reg_generic, 1095 .read_reg = &ixgbe_read_phy_reg_generic,
929 .write_reg = &ixgbe_write_phy_reg_generic, 1096 .write_reg = &ixgbe_write_phy_reg_generic,
930 .setup_link = &ixgbe_setup_phy_link_generic, 1097 .setup_link = &ixgbe_setup_phy_link_generic,
931 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 1098 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
1099 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
932}; 1100};
933 1101
934struct ixgbe_info ixgbe_82598_info = { 1102struct ixgbe_info ixgbe_82598_info = {
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
new file mode 100644
index 000000000000..e2e28ac63dec
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -0,0 +1,332 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29
30#include "ixgbe.h"
31#include "ixgbe_type.h"
32#include "ixgbe_dcb.h"
33#include "ixgbe_dcb_82598.h"
34
35/**
36 * ixgbe_dcb_config - Struct containing DCB settings.
37 * @dcb_config: Pointer to DCB config structure
38 *
39 * This function checks DCB rules for DCB settings.
40 * The following rules are checked:
41 * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
42 * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
43 * Group must total 100.
44 * 3. A Traffic Class should not be set to both Link Strict Priority
45 * and Group Strict Priority.
46 * 4. Link strict Bandwidth Groups can only have link strict traffic classes
47 * with zero bandwidth.
48 */
49s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *dcb_config)
50{
51 struct tc_bw_alloc *p;
52 s32 ret_val = 0;
53 u8 i, j, bw = 0, bw_id;
54 u8 bw_sum[2][MAX_BW_GROUP];
55 bool link_strict[2][MAX_BW_GROUP];
56
57 memset(bw_sum, 0, sizeof(bw_sum));
58 memset(link_strict, 0, sizeof(link_strict));
59
60 /* First Tx, then Rx */
61 for (i = 0; i < 2; i++) {
62 /* Check each traffic class for rule violation */
63 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
64 p = &dcb_config->tc_config[j].path[i];
65
66 bw = p->bwg_percent;
67 bw_id = p->bwg_id;
68
69 if (bw_id >= MAX_BW_GROUP) {
70 ret_val = DCB_ERR_CONFIG;
71 goto err_config;
72 }
73 if (p->prio_type == prio_link) {
74 link_strict[i][bw_id] = true;
75 /* Link strict should have zero bandwidth */
76 if (bw) {
77 ret_val = DCB_ERR_LS_BW_NONZERO;
78 goto err_config;
79 }
80 } else if (!bw) {
81 /*
82 * Traffic classes without link strict
83 * should have non-zero bandwidth.
84 */
85 ret_val = DCB_ERR_TC_BW_ZERO;
86 goto err_config;
87 }
88 bw_sum[i][bw_id] += bw;
89 }
90
91 bw = 0;
92
93 /* Check each bandwidth group for rule violation */
94 for (j = 0; j < MAX_BW_GROUP; j++) {
95 bw += dcb_config->bw_percentage[i][j];
96 /*
97 * Sum of bandwidth percentages of all traffic classes
98 * within a Bandwidth Group must total 100 except for
99 * link strict group (zero bandwidth).
100 */
101 if (link_strict[i][j]) {
102 if (bw_sum[i][j]) {
103 /*
104 * Link strict group should have zero
105 * bandwidth.
106 */
107 ret_val = DCB_ERR_LS_BWG_NONZERO;
108 goto err_config;
109 }
110 } else if (bw_sum[i][j] != BW_PERCENT &&
111 bw_sum[i][j] != 0) {
112 ret_val = DCB_ERR_TC_BW;
113 goto err_config;
114 }
115 }
116
117 if (bw != BW_PERCENT) {
118 ret_val = DCB_ERR_BW_GROUP;
119 goto err_config;
120 }
121 }
122
123err_config:
124 return ret_val;
125}
126
127/**
128 * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
129 * @ixgbe_dcb_config: Struct containing DCB settings.
130 * @direction: Configuring either Tx or Rx.
131 *
132 * This function calculates the credits allocated to each traffic class.
133 * It should be called only after the rules are checked by
134 * ixgbe_dcb_check_config().
135 */
136s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
137 u8 direction)
138{
139 struct tc_bw_alloc *p;
140 s32 ret_val = 0;
141 /* Initialization values default for Tx settings */
142 u32 credit_refill = 0;
143 u32 credit_max = 0;
144 u16 link_percentage = 0;
145 u8 bw_percent = 0;
146 u8 i;
147
148 if (dcb_config == NULL) {
149 ret_val = DCB_ERR_CONFIG;
150 goto out;
151 }
152
153 /* Find out the link percentage for each TC first */
154 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
155 p = &dcb_config->tc_config[i].path[direction];
156 bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
157
158 link_percentage = p->bwg_percent;
159 /* Must be careful of integer division for very small nums */
160 link_percentage = (link_percentage * bw_percent) / 100;
161 if (p->bwg_percent > 0 && link_percentage == 0)
162 link_percentage = 1;
163
164 /* Save link_percentage for reference */
165 p->link_percent = (u8)link_percentage;
166
167 /* Calculate credit refill and save it */
168 credit_refill = link_percentage * MINIMUM_CREDIT_REFILL;
169 p->data_credits_refill = (u16)credit_refill;
170
171 /* Calculate maximum credit for the TC */
172 credit_max = (link_percentage * MAX_CREDIT) / 100;
173
174 /*
175 * Adjustment based on rule checking, if the percentage
176 * of a TC is too small, the maximum credit may not be
177 * enough to send out a jumbo frame in data plane arbitration.
178 */
179 if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO))
180 credit_max = MINIMUM_CREDIT_FOR_JUMBO;
181
182 if (direction == DCB_TX_CONFIG) {
183 /*
184 * Adjustment based on rule checking, if the
185 * percentage of a TC is too small, the maximum
186 * credit may not be enough to send out a TSO
187 * packet in descriptor plane arbitration.
188 */
189 if (credit_max &&
190 (credit_max < MINIMUM_CREDIT_FOR_TSO))
191 credit_max = MINIMUM_CREDIT_FOR_TSO;
192
193 dcb_config->tc_config[i].desc_credits_max =
194 (u16)credit_max;
195 }
196
197 p->data_credits_max = (u16)credit_max;
198 }
199
200out:
201 return ret_val;
202}
203
204/**
205 * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
206 * @hw: pointer to hardware structure
207 * @stats: pointer to statistics structure
208 * @tc_count: Number of elements in bwg_array.
209 *
210 * This function returns the status data for each of the Traffic Classes in use.
211 */
212s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
213 u8 tc_count)
214{
215 s32 ret = 0;
216 if (hw->mac.type == ixgbe_mac_82598EB)
217 ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
218 return ret;
219}
220
221/**
222 * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
223 * hw - pointer to hardware structure
224 * stats - pointer to statistics structure
225 * tc_count - Number of elements in bwg_array.
226 *
227 * This function returns the CBFC status data for each of the Traffic Classes.
228 */
229s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
230 u8 tc_count)
231{
232 s32 ret = 0;
233 if (hw->mac.type == ixgbe_mac_82598EB)
234 ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
235 return ret;
236}
237
238/**
239 * ixgbe_dcb_config_rx_arbiter - Config Rx arbiter
240 * @hw: pointer to hardware structure
241 * @dcb_config: pointer to ixgbe_dcb_config structure
242 *
243 * Configure Rx Data Arbiter and credits for each traffic class.
244 */
245s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *hw,
246 struct ixgbe_dcb_config *dcb_config)
247{
248 s32 ret = 0;
249 if (hw->mac.type == ixgbe_mac_82598EB)
250 ret = ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
251 return ret;
252}
253
254/**
255 * ixgbe_dcb_config_tx_desc_arbiter - Config Tx Desc arbiter
256 * @hw: pointer to hardware structure
257 * @dcb_config: pointer to ixgbe_dcb_config structure
258 *
259 * Configure Tx Descriptor Arbiter and credits for each traffic class.
260 */
261s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *hw,
262 struct ixgbe_dcb_config *dcb_config)
263{
264 s32 ret = 0;
265 if (hw->mac.type == ixgbe_mac_82598EB)
266 ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
267 return ret;
268}
269
270/**
271 * ixgbe_dcb_config_tx_data_arbiter - Config Tx data arbiter
272 * @hw: pointer to hardware structure
273 * @dcb_config: pointer to ixgbe_dcb_config structure
274 *
275 * Configure Tx Data Arbiter and credits for each traffic class.
276 */
277s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *hw,
278 struct ixgbe_dcb_config *dcb_config)
279{
280 s32 ret = 0;
281 if (hw->mac.type == ixgbe_mac_82598EB)
282 ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
283 return ret;
284}
285
286/**
287 * ixgbe_dcb_config_pfc - Config priority flow control
288 * @hw: pointer to hardware structure
289 * @dcb_config: pointer to ixgbe_dcb_config structure
290 *
291 * Configure Priority Flow Control for each traffic class.
292 */
293s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw,
294 struct ixgbe_dcb_config *dcb_config)
295{
296 s32 ret = 0;
297 if (hw->mac.type == ixgbe_mac_82598EB)
298 ret = ixgbe_dcb_config_pfc_82598(hw, dcb_config);
299 return ret;
300}
301
302/**
303 * ixgbe_dcb_config_tc_stats - Config traffic class statistics
304 * @hw: pointer to hardware structure
305 *
306 * Configure queue statistics registers, all queues belonging to same traffic
307 * class uses a single set of queue statistics counters.
308 */
309s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
310{
311 s32 ret = 0;
312 if (hw->mac.type == ixgbe_mac_82598EB)
313 ret = ixgbe_dcb_config_tc_stats_82598(hw);
314 return ret;
315}
316
317/**
318 * ixgbe_dcb_hw_config - Config and enable DCB
319 * @hw: pointer to hardware structure
320 * @dcb_config: pointer to ixgbe_dcb_config structure
321 *
322 * Configure dcb settings and enable dcb mode.
323 */
324s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
325 struct ixgbe_dcb_config *dcb_config)
326{
327 s32 ret = 0;
328 if (hw->mac.type == ixgbe_mac_82598EB)
329 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
330 return ret;
331}
332
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
new file mode 100644
index 000000000000..75f6efe1e369
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -0,0 +1,184 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _DCB_CONFIG_H_
30#define _DCB_CONFIG_H_
31
32#include "ixgbe_type.h"
33
34/* DCB data structures */
35
36#define IXGBE_MAX_PACKET_BUFFERS 8
37#define MAX_USER_PRIORITY 8
38#define MAX_TRAFFIC_CLASS 8
39#define MAX_BW_GROUP 8
40#define BW_PERCENT 100
41
42#define DCB_TX_CONFIG 0
43#define DCB_RX_CONFIG 1
44
45/* DCB error Codes */
46#define DCB_SUCCESS 0
47#define DCB_ERR_CONFIG -1
48#define DCB_ERR_PARAM -2
49
50/* Transmit and receive Errors */
51/* Error in bandwidth group allocation */
52#define DCB_ERR_BW_GROUP -3
53/* Error in traffic class bandwidth allocation */
54#define DCB_ERR_TC_BW -4
55/* Traffic class has both link strict and group strict enabled */
56#define DCB_ERR_LS_GS -5
57/* Link strict traffic class has non zero bandwidth */
58#define DCB_ERR_LS_BW_NONZERO -6
59/* Link strict bandwidth group has non zero bandwidth */
60#define DCB_ERR_LS_BWG_NONZERO -7
61/* Traffic class has zero bandwidth */
62#define DCB_ERR_TC_BW_ZERO -8
63
64#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF
65
66struct dcb_pfc_tc_debug {
67 u8 tc;
68 u8 pause_status;
69 u64 pause_quanta;
70};
71
72enum strict_prio_type {
73 prio_none = 0,
74 prio_group,
75 prio_link
76};
77
78/* Traffic class bandwidth allocation per direction */
79struct tc_bw_alloc {
80 u8 bwg_id; /* Bandwidth Group (BWG) ID */
81 u8 bwg_percent; /* % of BWG's bandwidth */
82 u8 link_percent; /* % of link bandwidth */
83 u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
84 u16 data_credits_refill; /* Credit refill amount in 64B granularity */
85 u16 data_credits_max; /* Max credits for a configured packet buffer
86 * in 64B granularity.*/
87 enum strict_prio_type prio_type; /* Link or Group Strict Priority */
88};
89
90enum dcb_pfc_type {
91 pfc_disabled = 0,
92 pfc_enabled_full,
93 pfc_enabled_tx,
94 pfc_enabled_rx
95};
96
97/* Traffic class configuration */
98struct tc_configuration {
99 struct tc_bw_alloc path[2]; /* One each for Tx/Rx */
100 enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */
101
102 u16 desc_credits_max; /* For Tx Descriptor arbitration */
103 u8 tc; /* Traffic class (TC) */
104};
105
106enum dcb_rx_pba_cfg {
107 pba_equal, /* PBA[0-7] each use 64KB FIFO */
108 pba_80_48 /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
109};
110
111/*
112 * This structure contains many values encoded as fixed-point
113 * numbers, meaning that some of bits are dedicated to the
114 * magnitude and others to the fraction part. In the comments
115 * this is shown as f=n, where n is the number of fraction bits.
116 * These fraction bits are always the low-order bits. The size
117 * of the magnitude is not specified.
118 */
119struct bcn_config {
120 u32 rp_admin_mode[MAX_TRAFFIC_CLASS]; /* BCN enabled, per TC */
121 u32 bcna_option[2]; /* BCNA Port + MAC Addr */
122 u32 rp_w; /* Derivative Weight, f=3 */
123 u32 rp_gi; /* Increase Gain, f=12 */
124 u32 rp_gd; /* Decrease Gain, f=12 */
125 u32 rp_ru; /* Rate Unit */
126 u32 rp_alpha; /* Max Decrease Factor, f=12 */
127 u32 rp_beta; /* Max Increase Factor, f=12 */
128 u32 rp_ri; /* Initial Rate */
129 u32 rp_td; /* Drift Interval Timer */
130 u32 rp_rd; /* Drift Increase */
131 u32 rp_tmax; /* Severe Congestion Backoff Timer Range */
132 u32 rp_rmin; /* Severe Congestion Restart Rate */
133 u32 rp_wrtt; /* RTT Moving Average Weight */
134};
135
136struct ixgbe_dcb_config {
137 struct bcn_config bcn;
138
139 struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
140 u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
141
142 bool round_robin_enable;
143
144 enum dcb_rx_pba_cfg rx_pba_cfg;
145
146 u32 dcb_cfg_version; /* Not used...OS-specific? */
147 u32 link_speed; /* For bandwidth allocation validation purpose */
148};
149
150/* DCB driver APIs */
151
152/* DCB rule checking function.*/
153s32 ixgbe_dcb_check_config(struct ixgbe_dcb_config *config);
154
155/* DCB credits calculation */
156s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8);
157
158/* DCB PFC functions */
159s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, struct ixgbe_dcb_config *g);
160s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
161
162/* DCB traffic class stats */
163s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
164s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
165
166/* DCB config arbiters */
167s32 ixgbe_dcb_config_tx_desc_arbiter(struct ixgbe_hw *,
168 struct ixgbe_dcb_config *);
169s32 ixgbe_dcb_config_tx_data_arbiter(struct ixgbe_hw *,
170 struct ixgbe_dcb_config *);
171s32 ixgbe_dcb_config_rx_arbiter(struct ixgbe_hw *, struct ixgbe_dcb_config *);
172
173/* DCB hw initialization */
174s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
175
176/* DCB definitions for credit calculation */
177#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
178#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
179#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
180#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
181#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
182#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
183
184#endif /* _DCB_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
new file mode 100644
index 000000000000..2c046b0b5d28
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -0,0 +1,398 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "ixgbe.h"
30#include "ixgbe_type.h"
31#include "ixgbe_dcb.h"
32#include "ixgbe_dcb_82598.h"
33
34/**
35 * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
36 * @hw: pointer to hardware structure
37 * @stats: pointer to statistics structure
38 * @tc_count: Number of elements in bwg_array.
39 *
40 * This function returns the status data for each of the Traffic Classes in use.
41 */
42s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
43 struct ixgbe_hw_stats *stats,
44 u8 tc_count)
45{
46 int tc;
47
48 if (tc_count > MAX_TRAFFIC_CLASS)
49 return DCB_ERR_PARAM;
50
51 /* Statistics pertaining to each traffic class */
52 for (tc = 0; tc < tc_count; tc++) {
53 /* Transmitted Packets */
54 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
55 /* Transmitted Bytes */
56 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
57 /* Received Packets */
58 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
59 /* Received Bytes */
60 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
61 }
62
63 return 0;
64}
65
66/**
67 * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
68 * @hw: pointer to hardware structure
69 * @stats: pointer to statistics structure
70 * @tc_count: Number of elements in bwg_array.
71 *
72 * This function returns the CBFC status data for each of the Traffic Classes.
73 */
74s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
75 struct ixgbe_hw_stats *stats,
76 u8 tc_count)
77{
78 int tc;
79
80 if (tc_count > MAX_TRAFFIC_CLASS)
81 return DCB_ERR_PARAM;
82
83 for (tc = 0; tc < tc_count; tc++) {
84 /* Priority XOFF Transmitted */
85 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
86 /* Priority XOFF Received */
87 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
88 }
89
90 return 0;
91}
92
93/**
94 * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
95 * @hw: pointer to hardware structure
96 * @dcb_config: pointer to ixgbe_dcb_config structure
97 *
98 * Configure packet buffers for DCB mode.
99 */
100static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
101 struct ixgbe_dcb_config *dcb_config)
102{
103 s32 ret_val = 0;
104 u32 value = IXGBE_RXPBSIZE_64KB;
105 u8 i = 0;
106
107 /* Setup Rx packet buffer sizes */
108 switch (dcb_config->rx_pba_cfg) {
109 case pba_80_48:
110 /* Setup the first four at 80KB */
111 value = IXGBE_RXPBSIZE_80KB;
112 for (; i < 4; i++)
113 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
114 /* Setup the last four at 48KB...don't re-init i */
115 value = IXGBE_RXPBSIZE_48KB;
116 /* Fall Through */
117 case pba_equal:
118 default:
119 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
120 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
121
122 /* Setup Tx packet buffer sizes */
123 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
124 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
125 IXGBE_TXPBSIZE_40KB);
126 }
127 break;
128 }
129
130 return ret_val;
131}
132
133/**
134 * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
135 * @hw: pointer to hardware structure
136 * @dcb_config: pointer to ixgbe_dcb_config structure
137 *
138 * Configure Rx Data Arbiter and credits for each traffic class.
139 */
140s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
141 struct ixgbe_dcb_config *dcb_config)
142{
143 struct tc_bw_alloc *p;
144 u32 reg = 0;
145 u32 credit_refill = 0;
146 u32 credit_max = 0;
147 u8 i = 0;
148
149 reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
150 IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
151
152 reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
153 /* Enable Arbiter */
154 reg &= ~IXGBE_RMCS_ARBDIS;
155 /* Enable Receive Recycle within the BWG */
156 reg |= IXGBE_RMCS_RRM;
157 /* Enable Deficit Fixed Priority arbitration*/
158 reg |= IXGBE_RMCS_DFP;
159
160 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
161
162 /* Configure traffic class credits and priority */
163 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
164 p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
165 credit_refill = p->data_credits_refill;
166 credit_max = p->data_credits_max;
167
168 reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
169
170 if (p->prio_type == prio_link)
171 reg |= IXGBE_RT2CR_LSP;
172
173 IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
174 }
175
176 reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
177 reg |= IXGBE_RDRXCTL_RDMTS_1_2;
178 reg |= IXGBE_RDRXCTL_MPBEN;
179 reg |= IXGBE_RDRXCTL_MCEN;
180 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
181
182 reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
183 /* Make sure there is enough descriptors before arbitration */
184 reg &= ~IXGBE_RXCTRL_DMBYPS;
185 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
186
187 return 0;
188}
189
190/**
191 * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
192 * @hw: pointer to hardware structure
193 * @dcb_config: pointer to ixgbe_dcb_config structure
194 *
195 * Configure Tx Descriptor Arbiter and credits for each traffic class.
196 */
197s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
198 struct ixgbe_dcb_config *dcb_config)
199{
200 struct tc_bw_alloc *p;
201 u32 reg, max_credits;
202 u8 i;
203
204 reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
205
206 /* Enable arbiter */
207 reg &= ~IXGBE_DPMCS_ARBDIS;
208 if (!(dcb_config->round_robin_enable)) {
209 /* Enable DFP and Recycle mode */
210 reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
211 }
212 reg |= IXGBE_DPMCS_TSOEF;
213 /* Configure Max TSO packet size 34KB including payload and headers */
214 reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
215
216 IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
217
218 /* Configure traffic class credits and priority */
219 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
220 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
221 max_credits = dcb_config->tc_config[i].desc_credits_max;
222 reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
223 reg |= p->data_credits_refill;
224 reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
225
226 if (p->prio_type == prio_group)
227 reg |= IXGBE_TDTQ2TCCR_GSP;
228
229 if (p->prio_type == prio_link)
230 reg |= IXGBE_TDTQ2TCCR_LSP;
231
232 IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
233 }
234
235 return 0;
236}
237
238/**
239 * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
240 * @hw: pointer to hardware structure
241 * @dcb_config: pointer to ixgbe_dcb_config structure
242 *
243 * Configure Tx Data Arbiter and credits for each traffic class.
244 */
245s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
246 struct ixgbe_dcb_config *dcb_config)
247{
248 struct tc_bw_alloc *p;
249 u32 reg;
250 u8 i;
251
252 reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
253 /* Enable Data Plane Arbiter */
254 reg &= ~IXGBE_PDPMCS_ARBDIS;
255 /* Enable DFP and Transmit Recycle Mode */
256 reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
257
258 IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
259
260 /* Configure traffic class credits and priority */
261 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
262 p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
263 reg = p->data_credits_refill;
264 reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
265 reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
266
267 if (p->prio_type == prio_group)
268 reg |= IXGBE_TDPT2TCCR_GSP;
269
270 if (p->prio_type == prio_link)
271 reg |= IXGBE_TDPT2TCCR_LSP;
272
273 IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
274 }
275
276 /* Enable Tx packet buffer division */
277 reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
278 reg |= IXGBE_DTXCTL_ENDBUBD;
279 IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
280
281 return 0;
282}
283
284/**
285 * ixgbe_dcb_config_pfc_82598 - Config priority flow control
286 * @hw: pointer to hardware structure
287 * @dcb_config: pointer to ixgbe_dcb_config structure
288 *
289 * Configure Priority Flow Control for each traffic class.
290 */
291s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
292 struct ixgbe_dcb_config *dcb_config)
293{
294 u32 reg, rx_pba_size;
295 u8 i;
296
297 /* Enable Transmit Priority Flow Control */
298 reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
299 reg &= ~IXGBE_RMCS_TFCE_802_3X;
300 /* correct the reporting of our flow control status */
301 hw->fc.type = ixgbe_fc_none;
302 reg |= IXGBE_RMCS_TFCE_PRIORITY;
303 IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
304
305 /* Enable Receive Priority Flow Control */
306 reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
307 reg &= ~IXGBE_FCTRL_RFCE;
308 reg |= IXGBE_FCTRL_RPFCE;
309 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
310
311 /*
312 * Configure flow control thresholds and enable priority flow control
313 * for each traffic class.
314 */
315 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
316 if (dcb_config->rx_pba_cfg == pba_equal) {
317 rx_pba_size = IXGBE_RXPBSIZE_64KB;
318 } else {
319 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
320 : IXGBE_RXPBSIZE_48KB;
321 }
322
323 reg = ((rx_pba_size >> 5) & 0xFFF0);
324 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
325 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
326 reg |= IXGBE_FCRTL_XONE;
327
328 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
329
330 reg = ((rx_pba_size >> 2) & 0xFFF0);
331 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
332 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
333 reg |= IXGBE_FCRTH_FCEN;
334
335 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
336 }
337
338 /* Configure pause time */
339 for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++)
340 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800);
341
342 /* Configure flow control refresh threshold value */
343 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400);
344
345 return 0;
346}
347
348/**
349 * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
350 * @hw: pointer to hardware structure
351 *
352 * Configure queue statistics registers, all queues belonging to same traffic
353 * class uses a single set of queue statistics counters.
354 */
355s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
356{
357 u32 reg = 0;
358 u8 i = 0;
359 u8 j = 0;
360
361 /* Receive Queues stats setting - 8 queues per statistics reg */
362 for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
363 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
364 reg |= ((0x1010101) * j);
365 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
366 reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
367 reg |= ((0x1010101) * j);
368 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
369 }
370 /* Transmit Queues stats setting - 4 queues per statistics reg */
371 for (i = 0; i < 8; i++) {
372 reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
373 reg |= ((0x1010101) * i);
374 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
375 }
376
377 return 0;
378}
379
380/**
381 * ixgbe_dcb_hw_config_82598 - Config and enable DCB
382 * @hw: pointer to hardware structure
383 * @dcb_config: pointer to ixgbe_dcb_config structure
384 *
385 * Configure dcb settings and enable dcb mode.
386 */
387s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
388 struct ixgbe_dcb_config *dcb_config)
389{
390 ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
391 ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
392 ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
393 ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
394 ixgbe_dcb_config_pfc_82598(hw, dcb_config);
395 ixgbe_dcb_config_tc_stats_82598(hw);
396
397 return 0;
398}
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
new file mode 100644
index 000000000000..1e6a313719d7
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -0,0 +1,94 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#ifndef _DCB_82598_CONFIG_H_
30#define _DCB_82598_CONFIG_H_
31
32/* DCB register definitions */
33
34#define IXGBE_DPMCS_MTSOS_SHIFT 16
35#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */
36#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */
37#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */
38#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
39
40#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */
41
42#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */
43#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */
44
45#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */
46#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */
47
48#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12
49#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9
50#define IXGBE_TDTQ2TCCR_GSP 0x40000000
51#define IXGBE_TDTQ2TCCR_LSP 0x80000000
52
53#define IXGBE_TDPT2TCCR_MCL_SHIFT 12
54#define IXGBE_TDPT2TCCR_BWG_SHIFT 9
55#define IXGBE_TDPT2TCCR_GSP 0x40000000
56#define IXGBE_TDPT2TCCR_LSP 0x80000000
57
58#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */
59#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */
60#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */
61
62#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */
63
64#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
65#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
66#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
67#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
68
69#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000
70
71/* DCB hardware-specific driver APIs */
72
73/* DCB PFC functions */
74s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
75s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
76 u8);
77
78/* DCB traffic class stats */
79s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
80s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, struct ixgbe_hw_stats *,
81 u8);
82
83/* DCB config arbiters */
84s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *,
85 struct ixgbe_dcb_config *);
86s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *,
87 struct ixgbe_dcb_config *);
88s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *,
89 struct ixgbe_dcb_config *);
90
91/* DCB hw initialization */
92s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
93
94#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
new file mode 100644
index 000000000000..4129976953f5
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -0,0 +1,641 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "ixgbe.h"
30#include <linux/dcbnl.h>
31
32/* Callbacks for DCB netlink in the kernel */
33#define BIT_DCB_MODE 0x01
34#define BIT_PFC 0x02
35#define BIT_PG_RX 0x04
36#define BIT_PG_TX 0x08
37#define BIT_BCN 0x10
38
39int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
40 struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max)
41{
42 struct tc_configuration *src_tc_cfg = NULL;
43 struct tc_configuration *dst_tc_cfg = NULL;
44 int i;
45
46 if (!src_dcb_cfg || !dst_dcb_cfg)
47 return -EINVAL;
48
49 for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) {
50 src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
51 dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0];
52
53 dst_tc_cfg->path[DCB_TX_CONFIG].prio_type =
54 src_tc_cfg->path[DCB_TX_CONFIG].prio_type;
55
56 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id =
57 src_tc_cfg->path[DCB_TX_CONFIG].bwg_id;
58
59 dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent =
60 src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent;
61
62 dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap =
63 src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap;
64
65 dst_tc_cfg->path[DCB_RX_CONFIG].prio_type =
66 src_tc_cfg->path[DCB_RX_CONFIG].prio_type;
67
68 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id =
69 src_tc_cfg->path[DCB_RX_CONFIG].bwg_id;
70
71 dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent =
72 src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent;
73
74 dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap =
75 src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap;
76 }
77
78 for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) {
79 dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG]
80 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
81 [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
82 dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG]
83 [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage
84 [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0];
85 }
86
87 for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) {
88 dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc =
89 src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc;
90 }
91
92 for (i = DCB_BCN_ATTR_RP_0; i < DCB_BCN_ATTR_RP_ALL; i++) {
93 dst_dcb_cfg->bcn.rp_admin_mode[i - DCB_BCN_ATTR_RP_0] =
94 src_dcb_cfg->bcn.rp_admin_mode[i - DCB_BCN_ATTR_RP_0];
95 }
96 dst_dcb_cfg->bcn.bcna_option[0] = src_dcb_cfg->bcn.bcna_option[0];
97 dst_dcb_cfg->bcn.bcna_option[1] = src_dcb_cfg->bcn.bcna_option[1];
98 dst_dcb_cfg->bcn.rp_alpha = src_dcb_cfg->bcn.rp_alpha;
99 dst_dcb_cfg->bcn.rp_beta = src_dcb_cfg->bcn.rp_beta;
100 dst_dcb_cfg->bcn.rp_gd = src_dcb_cfg->bcn.rp_gd;
101 dst_dcb_cfg->bcn.rp_gi = src_dcb_cfg->bcn.rp_gi;
102 dst_dcb_cfg->bcn.rp_tmax = src_dcb_cfg->bcn.rp_tmax;
103 dst_dcb_cfg->bcn.rp_td = src_dcb_cfg->bcn.rp_td;
104 dst_dcb_cfg->bcn.rp_rmin = src_dcb_cfg->bcn.rp_rmin;
105 dst_dcb_cfg->bcn.rp_w = src_dcb_cfg->bcn.rp_w;
106 dst_dcb_cfg->bcn.rp_rd = src_dcb_cfg->bcn.rp_rd;
107 dst_dcb_cfg->bcn.rp_ru = src_dcb_cfg->bcn.rp_ru;
108 dst_dcb_cfg->bcn.rp_wrtt = src_dcb_cfg->bcn.rp_wrtt;
109 dst_dcb_cfg->bcn.rp_ri = src_dcb_cfg->bcn.rp_ri;
110
111 return 0;
112}
113
114static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
115{
116 struct ixgbe_adapter *adapter = netdev_priv(netdev);
117
118 DPRINTK(DRV, INFO, "Get DCB Admin Mode.\n");
119
120 return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
121}
122
123static u16 ixgbe_dcb_select_queue(struct net_device *dev, struct sk_buff *skb)
124{
125 /* All traffic should default to class 0 */
126 return 0;
127}
128
129static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
130{
131 u8 err = 0;
132 struct ixgbe_adapter *adapter = netdev_priv(netdev);
133
134 DPRINTK(DRV, INFO, "Set DCB Admin Mode.\n");
135
136 if (state > 0) {
137 /* Turn on DCB */
138 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
139 goto out;
140
141 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
142 DPRINTK(DRV, ERR, "Enable failed, needs MSI-X\n");
143 err = 1;
144 goto out;
145 }
146
147 if (netif_running(netdev))
148 netdev->netdev_ops->ndo_stop(netdev);
149 ixgbe_reset_interrupt_capability(adapter);
150 ixgbe_napi_del_all(adapter);
151 INIT_LIST_HEAD(&netdev->napi_list);
152 kfree(adapter->tx_ring);
153 kfree(adapter->rx_ring);
154 adapter->tx_ring = NULL;
155 adapter->rx_ring = NULL;
156 netdev->select_queue = &ixgbe_dcb_select_queue;
157
158 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
159 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
160 ixgbe_init_interrupt_scheme(adapter);
161 if (netif_running(netdev))
162 netdev->netdev_ops->ndo_open(netdev);
163 } else {
164 /* Turn off DCB */
165 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
166 if (netif_running(netdev))
167 netdev->netdev_ops->ndo_stop(netdev);
168 ixgbe_reset_interrupt_capability(adapter);
169 ixgbe_napi_del_all(adapter);
170 INIT_LIST_HEAD(&netdev->napi_list);
171 kfree(adapter->tx_ring);
172 kfree(adapter->rx_ring);
173 adapter->tx_ring = NULL;
174 adapter->rx_ring = NULL;
175 netdev->select_queue = NULL;
176
177 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
178 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
179 ixgbe_init_interrupt_scheme(adapter);
180 if (netif_running(netdev))
181 netdev->netdev_ops->ndo_open(netdev);
182 }
183 }
184out:
185 return err;
186}
187
188static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
189 u8 *perm_addr)
190{
191 struct ixgbe_adapter *adapter = netdev_priv(netdev);
192 int i;
193
194 for (i = 0; i < netdev->addr_len; i++)
195 perm_addr[i] = adapter->hw.mac.perm_addr[i];
196}
197
198static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
199 u8 prio, u8 bwg_id, u8 bw_pct,
200 u8 up_map)
201{
202 struct ixgbe_adapter *adapter = netdev_priv(netdev);
203
204 if (prio != DCB_ATTR_VALUE_UNDEFINED)
205 adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
206 if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
207 adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id;
208 if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
209 adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent =
210 bw_pct;
211 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
212 adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap =
213 up_map;
214
215 if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type !=
216 adapter->dcb_cfg.tc_config[tc].path[0].prio_type) ||
217 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id !=
218 adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) ||
219 (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
220 adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
221 (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
222 adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
223 adapter->dcb_set_bitmap |= BIT_PG_TX;
224}
225
226static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
227 u8 bw_pct)
228{
229 struct ixgbe_adapter *adapter = netdev_priv(netdev);
230
231 adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
232
233 if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
234 adapter->dcb_cfg.bw_percentage[0][bwg_id])
235 adapter->dcb_set_bitmap |= BIT_PG_RX;
236}
237
238static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
239 u8 prio, u8 bwg_id, u8 bw_pct,
240 u8 up_map)
241{
242 struct ixgbe_adapter *adapter = netdev_priv(netdev);
243
244 if (prio != DCB_ATTR_VALUE_UNDEFINED)
245 adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
246 if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
247 adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id;
248 if (bw_pct != DCB_ATTR_VALUE_UNDEFINED)
249 adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent =
250 bw_pct;
251 if (up_map != DCB_ATTR_VALUE_UNDEFINED)
252 adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap =
253 up_map;
254
255 if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type !=
256 adapter->dcb_cfg.tc_config[tc].path[1].prio_type) ||
257 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id !=
258 adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) ||
259 (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
260 adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
261 (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
262 adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
263 adapter->dcb_set_bitmap |= BIT_PG_RX;
264}
265
266static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
267 u8 bw_pct)
268{
269 struct ixgbe_adapter *adapter = netdev_priv(netdev);
270
271 adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
272
273 if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
274 adapter->dcb_cfg.bw_percentage[1][bwg_id])
275 adapter->dcb_set_bitmap |= BIT_PG_RX;
276}
277
278static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
279 u8 *prio, u8 *bwg_id, u8 *bw_pct,
280 u8 *up_map)
281{
282 struct ixgbe_adapter *adapter = netdev_priv(netdev);
283
284 *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type;
285 *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id;
286 *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent;
287 *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
288}
289
290static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
291 u8 *bw_pct)
292{
293 struct ixgbe_adapter *adapter = netdev_priv(netdev);
294
295 *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id];
296}
297
298static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
299 u8 *prio, u8 *bwg_id, u8 *bw_pct,
300 u8 *up_map)
301{
302 struct ixgbe_adapter *adapter = netdev_priv(netdev);
303
304 *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type;
305 *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id;
306 *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent;
307 *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap;
308}
309
310static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
311 u8 *bw_pct)
312{
313 struct ixgbe_adapter *adapter = netdev_priv(netdev);
314
315 *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id];
316}
317
318static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
319 u8 setting)
320{
321 struct ixgbe_adapter *adapter = netdev_priv(netdev);
322
323 adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting;
324 if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc !=
325 adapter->dcb_cfg.tc_config[priority].dcb_pfc)
326 adapter->dcb_set_bitmap |= BIT_PFC;
327}
328
329static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
330 u8 *setting)
331{
332 struct ixgbe_adapter *adapter = netdev_priv(netdev);
333
334 *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
335}
336
337static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
338{
339 struct ixgbe_adapter *adapter = netdev_priv(netdev);
340 int ret;
341
342 adapter->dcb_set_bitmap &= ~BIT_BCN; /* no set for BCN */
343 if (!adapter->dcb_set_bitmap)
344 return 1;
345
346 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
347 msleep(1);
348
349 if (netif_running(netdev))
350 ixgbe_down(adapter);
351
352 ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
353 adapter->ring_feature[RING_F_DCB].indices);
354 if (ret) {
355 clear_bit(__IXGBE_RESETTING, &adapter->state);
356 return ret;
357 }
358
359 if (netif_running(netdev))
360 ixgbe_up(adapter);
361
362 adapter->dcb_set_bitmap = 0x00;
363 clear_bit(__IXGBE_RESETTING, &adapter->state);
364 return ret;
365}
366
367static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
368{
369 struct ixgbe_adapter *adapter = netdev_priv(netdev);
370 u8 rval = 0;
371
372 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
373 switch (capid) {
374 case DCB_CAP_ATTR_PG:
375 *cap = true;
376 break;
377 case DCB_CAP_ATTR_PFC:
378 *cap = true;
379 break;
380 case DCB_CAP_ATTR_UP2TC:
381 *cap = false;
382 break;
383 case DCB_CAP_ATTR_PG_TCS:
384 *cap = 0x80;
385 break;
386 case DCB_CAP_ATTR_PFC_TCS:
387 *cap = 0x80;
388 break;
389 case DCB_CAP_ATTR_GSP:
390 *cap = true;
391 break;
392 case DCB_CAP_ATTR_BCN:
393 *cap = false;
394 break;
395 default:
396 rval = -EINVAL;
397 break;
398 }
399 } else {
400 rval = -EINVAL;
401 }
402
403 return rval;
404}
405
406static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
407{
408 struct ixgbe_adapter *adapter = netdev_priv(netdev);
409 u8 rval = 0;
410
411 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
412 switch (tcid) {
413 case DCB_NUMTCS_ATTR_PG:
414 *num = MAX_TRAFFIC_CLASS;
415 break;
416 case DCB_NUMTCS_ATTR_PFC:
417 *num = MAX_TRAFFIC_CLASS;
418 break;
419 default:
420 rval = -EINVAL;
421 break;
422 }
423 } else {
424 rval = -EINVAL;
425 }
426
427 return rval;
428}
429
430static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
431{
432 return -EINVAL;
433}
434
435static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev)
436{
437 struct ixgbe_adapter *adapter = netdev_priv(netdev);
438
439 return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED);
440}
441
442static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
443{
444 return;
445}
446
447static void ixgbe_dcbnl_getbcnrp(struct net_device *netdev, int priority,
448 u8 *setting)
449{
450 struct ixgbe_adapter *adapter = netdev_priv(netdev);
451
452 *setting = adapter->dcb_cfg.bcn.rp_admin_mode[priority];
453}
454
455
456static void ixgbe_dcbnl_getbcncfg(struct net_device *netdev, int enum_index,
457 u32 *setting)
458{
459 struct ixgbe_adapter *adapter = netdev_priv(netdev);
460
461 switch (enum_index) {
462 case DCB_BCN_ATTR_BCNA_0:
463 *setting = adapter->dcb_cfg.bcn.bcna_option[0];
464 break;
465 case DCB_BCN_ATTR_BCNA_1:
466 *setting = adapter->dcb_cfg.bcn.bcna_option[1];
467 break;
468 case DCB_BCN_ATTR_ALPHA:
469 *setting = adapter->dcb_cfg.bcn.rp_alpha;
470 break;
471 case DCB_BCN_ATTR_BETA:
472 *setting = adapter->dcb_cfg.bcn.rp_beta;
473 break;
474 case DCB_BCN_ATTR_GD:
475 *setting = adapter->dcb_cfg.bcn.rp_gd;
476 break;
477 case DCB_BCN_ATTR_GI:
478 *setting = adapter->dcb_cfg.bcn.rp_gi;
479 break;
480 case DCB_BCN_ATTR_TMAX:
481 *setting = adapter->dcb_cfg.bcn.rp_tmax;
482 break;
483 case DCB_BCN_ATTR_TD:
484 *setting = adapter->dcb_cfg.bcn.rp_td;
485 break;
486 case DCB_BCN_ATTR_RMIN:
487 *setting = adapter->dcb_cfg.bcn.rp_rmin;
488 break;
489 case DCB_BCN_ATTR_W:
490 *setting = adapter->dcb_cfg.bcn.rp_w;
491 break;
492 case DCB_BCN_ATTR_RD:
493 *setting = adapter->dcb_cfg.bcn.rp_rd;
494 break;
495 case DCB_BCN_ATTR_RU:
496 *setting = adapter->dcb_cfg.bcn.rp_ru;
497 break;
498 case DCB_BCN_ATTR_WRTT:
499 *setting = adapter->dcb_cfg.bcn.rp_wrtt;
500 break;
501 case DCB_BCN_ATTR_RI:
502 *setting = adapter->dcb_cfg.bcn.rp_ri;
503 break;
504 default:
505 *setting = -1;
506 }
507}
508
509static void ixgbe_dcbnl_setbcnrp(struct net_device *netdev, int priority,
510 u8 setting)
511{
512 struct ixgbe_adapter *adapter = netdev_priv(netdev);
513
514 adapter->temp_dcb_cfg.bcn.rp_admin_mode[priority] = setting;
515
516 if (adapter->temp_dcb_cfg.bcn.rp_admin_mode[priority] !=
517 adapter->dcb_cfg.bcn.rp_admin_mode[priority])
518 adapter->dcb_set_bitmap |= BIT_BCN;
519}
520
521static void ixgbe_dcbnl_setbcncfg(struct net_device *netdev, int enum_index,
522 u32 setting)
523{
524 struct ixgbe_adapter *adapter = netdev_priv(netdev);
525
526 switch (enum_index) {
527 case DCB_BCN_ATTR_BCNA_0:
528 adapter->temp_dcb_cfg.bcn.bcna_option[0] = setting;
529 if (adapter->temp_dcb_cfg.bcn.bcna_option[0] !=
530 adapter->dcb_cfg.bcn.bcna_option[0])
531 adapter->dcb_set_bitmap |= BIT_BCN;
532 break;
533 case DCB_BCN_ATTR_BCNA_1:
534 adapter->temp_dcb_cfg.bcn.bcna_option[1] = setting;
535 if (adapter->temp_dcb_cfg.bcn.bcna_option[1] !=
536 adapter->dcb_cfg.bcn.bcna_option[1])
537 adapter->dcb_set_bitmap |= BIT_BCN;
538 break;
539 case DCB_BCN_ATTR_ALPHA:
540 adapter->temp_dcb_cfg.bcn.rp_alpha = setting;
541 if (adapter->temp_dcb_cfg.bcn.rp_alpha !=
542 adapter->dcb_cfg.bcn.rp_alpha)
543 adapter->dcb_set_bitmap |= BIT_BCN;
544 break;
545 case DCB_BCN_ATTR_BETA:
546 adapter->temp_dcb_cfg.bcn.rp_beta = setting;
547 if (adapter->temp_dcb_cfg.bcn.rp_beta !=
548 adapter->dcb_cfg.bcn.rp_beta)
549 adapter->dcb_set_bitmap |= BIT_BCN;
550 break;
551 case DCB_BCN_ATTR_GD:
552 adapter->temp_dcb_cfg.bcn.rp_gd = setting;
553 if (adapter->temp_dcb_cfg.bcn.rp_gd !=
554 adapter->dcb_cfg.bcn.rp_gd)
555 adapter->dcb_set_bitmap |= BIT_BCN;
556 break;
557 case DCB_BCN_ATTR_GI:
558 adapter->temp_dcb_cfg.bcn.rp_gi = setting;
559 if (adapter->temp_dcb_cfg.bcn.rp_gi !=
560 adapter->dcb_cfg.bcn.rp_gi)
561 adapter->dcb_set_bitmap |= BIT_BCN;
562 break;
563 case DCB_BCN_ATTR_TMAX:
564 adapter->temp_dcb_cfg.bcn.rp_tmax = setting;
565 if (adapter->temp_dcb_cfg.bcn.rp_tmax !=
566 adapter->dcb_cfg.bcn.rp_tmax)
567 adapter->dcb_set_bitmap |= BIT_BCN;
568 break;
569 case DCB_BCN_ATTR_TD:
570 adapter->temp_dcb_cfg.bcn.rp_td = setting;
571 if (adapter->temp_dcb_cfg.bcn.rp_td !=
572 adapter->dcb_cfg.bcn.rp_td)
573 adapter->dcb_set_bitmap |= BIT_BCN;
574 break;
575 case DCB_BCN_ATTR_RMIN:
576 adapter->temp_dcb_cfg.bcn.rp_rmin = setting;
577 if (adapter->temp_dcb_cfg.bcn.rp_rmin !=
578 adapter->dcb_cfg.bcn.rp_rmin)
579 adapter->dcb_set_bitmap |= BIT_BCN;
580 break;
581 case DCB_BCN_ATTR_W:
582 adapter->temp_dcb_cfg.bcn.rp_w = setting;
583 if (adapter->temp_dcb_cfg.bcn.rp_w !=
584 adapter->dcb_cfg.bcn.rp_w)
585 adapter->dcb_set_bitmap |= BIT_BCN;
586 break;
587 case DCB_BCN_ATTR_RD:
588 adapter->temp_dcb_cfg.bcn.rp_rd = setting;
589 if (adapter->temp_dcb_cfg.bcn.rp_rd !=
590 adapter->dcb_cfg.bcn.rp_rd)
591 adapter->dcb_set_bitmap |= BIT_BCN;
592 break;
593 case DCB_BCN_ATTR_RU:
594 adapter->temp_dcb_cfg.bcn.rp_ru = setting;
595 if (adapter->temp_dcb_cfg.bcn.rp_ru !=
596 adapter->dcb_cfg.bcn.rp_ru)
597 adapter->dcb_set_bitmap |= BIT_BCN;
598 break;
599 case DCB_BCN_ATTR_WRTT:
600 adapter->temp_dcb_cfg.bcn.rp_wrtt = setting;
601 if (adapter->temp_dcb_cfg.bcn.rp_wrtt !=
602 adapter->dcb_cfg.bcn.rp_wrtt)
603 adapter->dcb_set_bitmap |= BIT_BCN;
604 break;
605 case DCB_BCN_ATTR_RI:
606 adapter->temp_dcb_cfg.bcn.rp_ri = setting;
607 if (adapter->temp_dcb_cfg.bcn.rp_ri !=
608 adapter->dcb_cfg.bcn.rp_ri)
609 adapter->dcb_set_bitmap |= BIT_BCN;
610 break;
611 default:
612 break;
613 }
614}
615
616struct dcbnl_rtnl_ops dcbnl_ops = {
617 .getstate = ixgbe_dcbnl_get_state,
618 .setstate = ixgbe_dcbnl_set_state,
619 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
620 .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx,
621 .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx,
622 .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx,
623 .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx,
624 .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx,
625 .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx,
626 .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx,
627 .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx,
628 .setpfccfg = ixgbe_dcbnl_set_pfc_cfg,
629 .getpfccfg = ixgbe_dcbnl_get_pfc_cfg,
630 .setall = ixgbe_dcbnl_set_all,
631 .getcap = ixgbe_dcbnl_getcap,
632 .getnumtcs = ixgbe_dcbnl_getnumtcs,
633 .setnumtcs = ixgbe_dcbnl_setnumtcs,
634 .getpfcstate = ixgbe_dcbnl_getpfcstate,
635 .setpfcstate = ixgbe_dcbnl_setpfcstate,
636 .getbcncfg = ixgbe_dcbnl_getbcncfg,
637 .getbcnrp = ixgbe_dcbnl_getbcnrp,
638 .setbcncfg = ixgbe_dcbnl_setbcncfg,
639 .setbcnrp = ixgbe_dcbnl_setbcnrp
640};
641
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 81a9c4b86726..67f87a79154d 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -94,12 +94,21 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
94}; 94};
95 95
96#define IXGBE_QUEUE_STATS_LEN \ 96#define IXGBE_QUEUE_STATS_LEN \
97 ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \ 97 ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
98 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \ 98 ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
99 (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 99 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
100#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
101#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 100#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
102#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) 101#define IXGBE_PB_STATS_LEN ( \
102 (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
103 IXGBE_FLAG_DCB_ENABLED) ? \
104 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
105 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
106 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
107 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
108 / sizeof(u64) : 0)
109#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
110 IXGBE_PB_STATS_LEN + \
111 IXGBE_QUEUE_STATS_LEN)
103 112
104static int ixgbe_get_settings(struct net_device *netdev, 113static int ixgbe_get_settings(struct net_device *netdev,
105 struct ethtool_cmd *ecmd) 114 struct ethtool_cmd *ecmd)
@@ -149,6 +158,8 @@ static int ixgbe_set_settings(struct net_device *netdev,
149{ 158{
150 struct ixgbe_adapter *adapter = netdev_priv(netdev); 159 struct ixgbe_adapter *adapter = netdev_priv(netdev);
151 struct ixgbe_hw *hw = &adapter->hw; 160 struct ixgbe_hw *hw = &adapter->hw;
161 u32 advertised, old;
162 s32 err;
152 163
153 switch (hw->phy.media_type) { 164 switch (hw->phy.media_type) {
154 case ixgbe_media_type_fiber: 165 case ixgbe_media_type_fiber:
@@ -157,6 +168,31 @@ static int ixgbe_set_settings(struct net_device *netdev,
157 return -EINVAL; 168 return -EINVAL;
158 /* in this case we currently only support 10Gb/FULL */ 169 /* in this case we currently only support 10Gb/FULL */
159 break; 170 break;
171 case ixgbe_media_type_copper:
172 /* 10000/copper and 1000/copper must autoneg
173 * this function does not support any duplex forcing, but can
174 * limit the advertising of the adapter to only 10000 or 1000 */
175 if (ecmd->autoneg == AUTONEG_DISABLE)
176 return -EINVAL;
177
178 old = hw->phy.autoneg_advertised;
179 advertised = 0;
180 if (ecmd->advertising & ADVERTISED_10000baseT_Full)
181 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
182
183 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
184 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
185
186 if (old == advertised)
187 break;
188 /* this sets the link speed and restarts auto-neg */
189 err = hw->mac.ops.setup_link_speed(hw, advertised, true, true);
190 if (err) {
191 DPRINTK(PROBE, INFO,
192 "setup link failed with code %d\n", err);
193 hw->mac.ops.setup_link_speed(hw, old, true, true);
194 }
195 break;
160 default: 196 default:
161 break; 197 break;
162 } 198 }
@@ -676,30 +712,15 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
676 return 0; 712 return 0;
677 } 713 }
678 714
679 if (adapter->num_tx_queues > adapter->num_rx_queues) 715 temp_ring = kcalloc(adapter->num_tx_queues,
680 temp_ring = vmalloc(adapter->num_tx_queues * 716 sizeof(struct ixgbe_ring), GFP_KERNEL);
681 sizeof(struct ixgbe_ring));
682 else
683 temp_ring = vmalloc(adapter->num_rx_queues *
684 sizeof(struct ixgbe_ring));
685 if (!temp_ring) 717 if (!temp_ring)
686 return -ENOMEM; 718 return -ENOMEM;
687 719
688 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 720 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
689 msleep(1); 721 msleep(1);
690 722
691 if (netif_running(netdev))
692 ixgbe_down(adapter);
693
694 /*
695 * We can't just free everything and then setup again,
696 * because the ISRs in MSI-X mode get passed pointers
697 * to the tx and rx ring structs.
698 */
699 if (new_tx_count != adapter->tx_ring->count) { 723 if (new_tx_count != adapter->tx_ring->count) {
700 memcpy(temp_ring, adapter->tx_ring,
701 adapter->num_tx_queues * sizeof(struct ixgbe_ring));
702
703 for (i = 0; i < adapter->num_tx_queues; i++) { 724 for (i = 0; i < adapter->num_tx_queues; i++) {
704 temp_ring[i].count = new_tx_count; 725 temp_ring[i].count = new_tx_count;
705 err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]); 726 err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
@@ -711,21 +732,28 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
711 } 732 }
712 goto err_setup; 733 goto err_setup;
713 } 734 }
735 temp_ring[i].v_idx = adapter->tx_ring[i].v_idx;
714 } 736 }
715 737 if (netif_running(netdev))
716 for (i = 0; i < adapter->num_tx_queues; i++) 738 netdev->netdev_ops->ndo_stop(netdev);
717 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); 739 ixgbe_reset_interrupt_capability(adapter);
718 740 ixgbe_napi_del_all(adapter);
719 memcpy(adapter->tx_ring, temp_ring, 741 INIT_LIST_HEAD(&netdev->napi_list);
720 adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 742 kfree(adapter->tx_ring);
721 743 adapter->tx_ring = temp_ring;
744 temp_ring = NULL;
722 adapter->tx_ring_count = new_tx_count; 745 adapter->tx_ring_count = new_tx_count;
723 } 746 }
724 747
725 if (new_rx_count != adapter->rx_ring->count) { 748 temp_ring = kcalloc(adapter->num_rx_queues,
726 memcpy(temp_ring, adapter->rx_ring, 749 sizeof(struct ixgbe_ring), GFP_KERNEL);
727 adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 750 if (!temp_ring) {
751 if (netif_running(netdev))
752 netdev->netdev_ops->ndo_open(netdev);
753 return -ENOMEM;
754 }
728 755
756 if (new_rx_count != adapter->rx_ring->count) {
729 for (i = 0; i < adapter->num_rx_queues; i++) { 757 for (i = 0; i < adapter->num_rx_queues; i++) {
730 temp_ring[i].count = new_rx_count; 758 temp_ring[i].count = new_rx_count;
731 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); 759 err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
@@ -737,13 +765,16 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
737 } 765 }
738 goto err_setup; 766 goto err_setup;
739 } 767 }
768 temp_ring[i].v_idx = adapter->rx_ring[i].v_idx;
740 } 769 }
741 770 if (netif_running(netdev))
742 for (i = 0; i < adapter->num_rx_queues; i++) 771 netdev->netdev_ops->ndo_stop(netdev);
743 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); 772 ixgbe_reset_interrupt_capability(adapter);
744 773 ixgbe_napi_del_all(adapter);
745 memcpy(adapter->rx_ring, temp_ring, 774 INIT_LIST_HEAD(&netdev->napi_list);
746 adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 775 kfree(adapter->rx_ring);
776 adapter->rx_ring = temp_ring;
777 temp_ring = NULL;
747 778
748 adapter->rx_ring_count = new_rx_count; 779 adapter->rx_ring_count = new_rx_count;
749 } 780 }
@@ -751,8 +782,9 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
751 /* success! */ 782 /* success! */
752 err = 0; 783 err = 0;
753err_setup: 784err_setup:
785 ixgbe_init_interrupt_scheme(adapter);
754 if (netif_running(netdev)) 786 if (netif_running(netdev))
755 ixgbe_up(adapter); 787 netdev->netdev_ops->ndo_open(netdev);
756 788
757 clear_bit(__IXGBE_RESETTING, &adapter->state); 789 clear_bit(__IXGBE_RESETTING, &adapter->state);
758 return err; 790 return err;
@@ -804,6 +836,16 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
804 data[i + k] = queue_stat[k]; 836 data[i + k] = queue_stat[k];
805 i += k; 837 i += k;
806 } 838 }
839 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
840 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
841 data[i++] = adapter->stats.pxontxc[j];
842 data[i++] = adapter->stats.pxofftxc[j];
843 }
844 for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
845 data[i++] = adapter->stats.pxonrxc[j];
846 data[i++] = adapter->stats.pxoffrxc[j];
847 }
848 }
807} 849}
808 850
809static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 851static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
@@ -832,6 +874,20 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
832 sprintf(p, "rx_queue_%u_bytes", i); 874 sprintf(p, "rx_queue_%u_bytes", i);
833 p += ETH_GSTRING_LEN; 875 p += ETH_GSTRING_LEN;
834 } 876 }
877 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
878 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
879 sprintf(p, "tx_pb_%u_pxon", i);
880 p += ETH_GSTRING_LEN;
881 sprintf(p, "tx_pb_%u_pxoff", i);
882 p += ETH_GSTRING_LEN;
883 }
884 for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
885 sprintf(p, "rx_pb_%u_pxon", i);
886 p += ETH_GSTRING_LEN;
887 sprintf(p, "rx_pb_%u_pxoff", i);
888 p += ETH_GSTRING_LEN;
889 }
890 }
835 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 891 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
836 break; 892 break;
837 } 893 }
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 5236f633ee36..acef3c65cd2c 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -68,12 +68,20 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
68 board_82598 }, 68 board_82598 },
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), 69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
70 board_82598 }, 70 board_82598 },
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
72 board_82598 },
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), 73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
72 board_82598 }, 74 board_82598 },
73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), 75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
74 board_82598 }, 76 board_82598 },
77 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
78 board_82598 },
79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
80 board_82598 },
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), 81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
76 board_82598 }, 82 board_82598 },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
84 board_82598 },
77 85
78 /* required last entry */ 86 /* required last entry */
79 {0, } 87 {0, }
@@ -402,7 +410,7 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
402 410
403 if (adapter->netdev->features & NETIF_F_LRO && 411 if (adapter->netdev->features & NETIF_F_LRO &&
404 skb->ip_summed == CHECKSUM_UNNECESSARY) { 412 skb->ip_summed == CHECKSUM_UNNECESSARY) {
405 if (adapter->vlgrp && is_vlan) 413 if (adapter->vlgrp && is_vlan && (tag != 0))
406 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, 414 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
407 adapter->vlgrp, tag, 415 adapter->vlgrp, tag,
408 rx_desc); 416 rx_desc);
@@ -411,12 +419,12 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
411 ring->lro_used = true; 419 ring->lro_used = true;
412 } else { 420 } else {
413 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { 421 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
414 if (adapter->vlgrp && is_vlan) 422 if (adapter->vlgrp && is_vlan && (tag != 0))
415 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag); 423 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
416 else 424 else
417 netif_receive_skb(skb); 425 netif_receive_skb(skb);
418 } else { 426 } else {
419 if (adapter->vlgrp && is_vlan) 427 if (adapter->vlgrp && is_vlan && (tag != 0))
420 vlan_hwaccel_rx(skb, adapter->vlgrp, tag); 428 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
421 else 429 else
422 netif_rx(skb); 430 netif_rx(skb);
@@ -471,7 +479,6 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
471 union ixgbe_adv_rx_desc *rx_desc; 479 union ixgbe_adv_rx_desc *rx_desc;
472 struct ixgbe_rx_buffer *bi; 480 struct ixgbe_rx_buffer *bi;
473 unsigned int i; 481 unsigned int i;
474 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
475 482
476 i = rx_ring->next_to_use; 483 i = rx_ring->next_to_use;
477 bi = &rx_ring->rx_buffer_info[i]; 484 bi = &rx_ring->rx_buffer_info[i];
@@ -500,8 +507,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
500 } 507 }
501 508
502 if (!bi->skb) { 509 if (!bi->skb) {
503 struct sk_buff *skb = netdev_alloc_skb(adapter->netdev, 510 struct sk_buff *skb;
504 bufsz); 511 skb = netdev_alloc_skb(adapter->netdev,
512 (rx_ring->rx_buf_len +
513 NET_IP_ALIGN));
505 514
506 if (!skb) { 515 if (!skb) {
507 adapter->alloc_rx_buff_failed++; 516 adapter->alloc_rx_buff_failed++;
@@ -516,7 +525,8 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
516 skb_reserve(skb, NET_IP_ALIGN); 525 skb_reserve(skb, NET_IP_ALIGN);
517 526
518 bi->skb = skb; 527 bi->skb = skb;
519 bi->dma = pci_map_single(pdev, skb->data, bufsz, 528 bi->dma = pci_map_single(pdev, skb->data,
529 rx_ring->rx_buf_len,
520 PCI_DMA_FROMDEVICE); 530 PCI_DMA_FROMDEVICE);
521 } 531 }
522 /* Refresh the desc even if buffer_addrs didn't change because 532 /* Refresh the desc even if buffer_addrs didn't change because
@@ -607,7 +617,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
607 617
608 if (len && !skb_shinfo(skb)->nr_frags) { 618 if (len && !skb_shinfo(skb)->nr_frags) {
609 pci_unmap_single(pdev, rx_buffer_info->dma, 619 pci_unmap_single(pdev, rx_buffer_info->dma,
610 rx_ring->rx_buf_len + NET_IP_ALIGN, 620 rx_ring->rx_buf_len,
611 PCI_DMA_FROMDEVICE); 621 PCI_DMA_FROMDEVICE);
612 skb_put(skb, len); 622 skb_put(skb, len);
613 } 623 }
@@ -666,7 +676,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
666 676
667 skb->protocol = eth_type_trans(skb, adapter->netdev); 677 skb->protocol = eth_type_trans(skb, adapter->netdev);
668 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); 678 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
669 adapter->netdev->last_rx = jiffies;
670 679
671next_desc: 680next_desc:
672 rx_desc->wb.upper.status_error = 0; 681 rx_desc->wb.upper.status_error = 0;
@@ -904,6 +913,17 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
904 return; 913 return;
905} 914}
906 915
916static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
917{
918 struct ixgbe_hw *hw = &adapter->hw;
919
920 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
921 (eicr & IXGBE_EICR_GPI_SDP1)) {
922 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
923 /* write to clear the interrupt */
924 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
925 }
926}
907 927
908static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) 928static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
909{ 929{
@@ -928,6 +948,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
928 if (eicr & IXGBE_EICR_LSC) 948 if (eicr & IXGBE_EICR_LSC)
929 ixgbe_check_lsc(adapter); 949 ixgbe_check_lsc(adapter);
930 950
951 ixgbe_check_fan_failure(adapter, eicr);
952
931 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 953 if (!test_bit(__IXGBE_DOWN, &adapter->state))
932 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 954 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
933 955
@@ -990,7 +1012,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
990 rx_ring = &(adapter->rx_ring[r_idx]); 1012 rx_ring = &(adapter->rx_ring[r_idx]);
991 /* disable interrupts on this vector only */ 1013 /* disable interrupts on this vector only */
992 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); 1014 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
993 netif_rx_schedule(adapter->netdev, &q_vector->napi); 1015 netif_rx_schedule(&q_vector->napi);
994 1016
995 return IRQ_HANDLED; 1017 return IRQ_HANDLED;
996} 1018}
@@ -1031,7 +1053,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1031 1053
1032 /* If all Rx work done, exit the polling mode */ 1054 /* If all Rx work done, exit the polling mode */
1033 if (work_done < budget) { 1055 if (work_done < budget) {
1034 netif_rx_complete(adapter->netdev, napi); 1056 netif_rx_complete(napi);
1035 if (adapter->itr_setting & 3) 1057 if (adapter->itr_setting & 3)
1036 ixgbe_set_itr_msix(q_vector); 1058 ixgbe_set_itr_msix(q_vector);
1037 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1059 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -1080,7 +1102,7 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1080 rx_ring = &(adapter->rx_ring[r_idx]); 1102 rx_ring = &(adapter->rx_ring[r_idx]);
1081 /* If all Rx work done, exit the polling mode */ 1103 /* If all Rx work done, exit the polling mode */
1082 if (work_done < budget) { 1104 if (work_done < budget) {
1083 netif_rx_complete(adapter->netdev, napi); 1105 netif_rx_complete(napi);
1084 if (adapter->itr_setting & 3) 1106 if (adapter->itr_setting & 3)
1085 ixgbe_set_itr_msix(q_vector); 1107 ixgbe_set_itr_msix(q_vector);
1086 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1108 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -1187,6 +1209,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1187 struct net_device *netdev = adapter->netdev; 1209 struct net_device *netdev = adapter->netdev;
1188 irqreturn_t (*handler)(int, void *); 1210 irqreturn_t (*handler)(int, void *);
1189 int i, vector, q_vectors, err; 1211 int i, vector, q_vectors, err;
1212 int ri=0, ti=0;
1190 1213
1191 /* Decrement for Other and TCP Timer vectors */ 1214 /* Decrement for Other and TCP Timer vectors */
1192 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1215 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1201,10 +1224,19 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1201 &ixgbe_msix_clean_many) 1224 &ixgbe_msix_clean_many)
1202 for (vector = 0; vector < q_vectors; vector++) { 1225 for (vector = 0; vector < q_vectors; vector++) {
1203 handler = SET_HANDLER(&adapter->q_vector[vector]); 1226 handler = SET_HANDLER(&adapter->q_vector[vector]);
1204 sprintf(adapter->name[vector], "%s:v%d-%s", 1227
1205 netdev->name, vector, 1228 if(handler == &ixgbe_msix_clean_rx) {
1206 (handler == &ixgbe_msix_clean_rx) ? "Rx" : 1229 sprintf(adapter->name[vector], "%s-%s-%d",
1207 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); 1230 netdev->name, "rx", ri++);
1231 }
1232 else if(handler == &ixgbe_msix_clean_tx) {
1233 sprintf(adapter->name[vector], "%s-%s-%d",
1234 netdev->name, "tx", ti++);
1235 }
1236 else
1237 sprintf(adapter->name[vector], "%s-%s-%d",
1238 netdev->name, "TxRx", vector);
1239
1208 err = request_irq(adapter->msix_entries[vector].vector, 1240 err = request_irq(adapter->msix_entries[vector].vector,
1209 handler, 0, adapter->name[vector], 1241 handler, 0, adapter->name[vector],
1210 &(adapter->q_vector[vector])); 1242 &(adapter->q_vector[vector]));
@@ -1312,6 +1344,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1312{ 1344{
1313 u32 mask; 1345 u32 mask;
1314 mask = IXGBE_EIMS_ENABLE_MASK; 1346 mask = IXGBE_EIMS_ENABLE_MASK;
1347 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1348 mask |= IXGBE_EIMS_GPI_SDP1;
1315 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1349 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1316 IXGBE_WRITE_FLUSH(&adapter->hw); 1350 IXGBE_WRITE_FLUSH(&adapter->hw);
1317} 1351}
@@ -1342,13 +1376,15 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
1342 if (eicr & IXGBE_EICR_LSC) 1376 if (eicr & IXGBE_EICR_LSC)
1343 ixgbe_check_lsc(adapter); 1377 ixgbe_check_lsc(adapter);
1344 1378
1345 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) { 1379 ixgbe_check_fan_failure(adapter, eicr);
1380
1381 if (netif_rx_schedule_prep(&adapter->q_vector[0].napi)) {
1346 adapter->tx_ring[0].total_packets = 0; 1382 adapter->tx_ring[0].total_packets = 0;
1347 adapter->tx_ring[0].total_bytes = 0; 1383 adapter->tx_ring[0].total_bytes = 0;
1348 adapter->rx_ring[0].total_packets = 0; 1384 adapter->rx_ring[0].total_packets = 0;
1349 adapter->rx_ring[0].total_bytes = 0; 1385 adapter->rx_ring[0].total_bytes = 0;
1350 /* would disable interrupts here but EIAM disabled it */ 1386 /* would disable interrupts here but EIAM disabled it */
1351 __netif_rx_schedule(netdev, &adapter->q_vector[0].napi); 1387 __netif_rx_schedule(&adapter->q_vector[0].napi);
1352 } 1388 }
1353 1389
1354 return IRQ_HANDLED; 1390 return IRQ_HANDLED;
@@ -1651,10 +1687,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1651 * effects of setting this bit are only that SRRCTL must be 1687 * effects of setting this bit are only that SRRCTL must be
1652 * fully programmed [0..15] 1688 * fully programmed [0..15]
1653 */ 1689 */
1654 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 1690 if (adapter->flags &
1655 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 1691 (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) {
1656 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); 1692 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1657 1693 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1694 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1695 }
1658 1696
1659 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 1697 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1660 /* Fill out redirection table */ 1698 /* Fill out redirection table */
@@ -1713,6 +1751,16 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
1713 ixgbe_irq_disable(adapter); 1751 ixgbe_irq_disable(adapter);
1714 adapter->vlgrp = grp; 1752 adapter->vlgrp = grp;
1715 1753
1754 /*
1755 * For a DCB driver, always enable VLAN tag stripping so we can
1756 * still receive traffic from a DCB-enabled host even if we're
1757 * not in DCB mode.
1758 */
1759 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1760 ctrl |= IXGBE_VLNCTRL_VME;
1761 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1762 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1763
1716 if (grp) { 1764 if (grp) {
1717 /* enable VLAN tag insert/strip */ 1765 /* enable VLAN tag insert/strip */
1718 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 1766 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
@@ -1877,6 +1925,44 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1877 } 1925 }
1878} 1926}
1879 1927
1928#ifdef CONFIG_IXGBE_DCB
1929/*
1930 * ixgbe_configure_dcb - Configure DCB hardware
1931 * @adapter: ixgbe adapter struct
1932 *
1933 * This is called by the driver on open to configure the DCB hardware.
1934 * This is also called by the gennetlink interface when reconfiguring
1935 * the DCB state.
1936 */
1937static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
1938{
1939 struct ixgbe_hw *hw = &adapter->hw;
1940 u32 txdctl, vlnctrl;
1941 int i, j;
1942
1943 ixgbe_dcb_check_config(&adapter->dcb_cfg);
1944 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
1945 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
1946
1947 /* reconfigure the hardware */
1948 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
1949
1950 for (i = 0; i < adapter->num_tx_queues; i++) {
1951 j = adapter->tx_ring[i].reg_idx;
1952 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1953 /* PThresh workaround for Tx hang with DFP enabled. */
1954 txdctl |= 32;
1955 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1956 }
1957 /* Enable VLAN tag insert/strip */
1958 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1959 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
1960 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1961 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1962 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
1963}
1964
1965#endif
1880static void ixgbe_configure(struct ixgbe_adapter *adapter) 1966static void ixgbe_configure(struct ixgbe_adapter *adapter)
1881{ 1967{
1882 struct net_device *netdev = adapter->netdev; 1968 struct net_device *netdev = adapter->netdev;
@@ -1885,6 +1971,16 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1885 ixgbe_set_rx_mode(netdev); 1971 ixgbe_set_rx_mode(netdev);
1886 1972
1887 ixgbe_restore_vlan(adapter); 1973 ixgbe_restore_vlan(adapter);
1974#ifdef CONFIG_IXGBE_DCB
1975 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1976 netif_set_gso_max_size(netdev, 32768);
1977 ixgbe_configure_dcb(adapter);
1978 } else {
1979 netif_set_gso_max_size(netdev, 65536);
1980 }
1981#else
1982 netif_set_gso_max_size(netdev, 65536);
1983#endif
1888 1984
1889 ixgbe_configure_tx(adapter); 1985 ixgbe_configure_tx(adapter);
1890 ixgbe_configure_rx(adapter); 1986 ixgbe_configure_rx(adapter);
@@ -1924,6 +2020,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1924 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 2020 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1925 } 2021 }
1926 2022
2023 /* Enable fan failure interrupt if media type is copper */
2024 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2025 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2026 gpie |= IXGBE_SDP1_GPIEN;
2027 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2028 }
2029
1927 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 2030 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1928 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 2031 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
1929 mhadd &= ~IXGBE_MHADD_MFS_MASK; 2032 mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -1961,6 +2064,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1961 else 2064 else
1962 ixgbe_configure_msi_and_legacy(adapter); 2065 ixgbe_configure_msi_and_legacy(adapter);
1963 2066
2067 ixgbe_napi_add_all(adapter);
2068
1964 clear_bit(__IXGBE_DOWN, &adapter->state); 2069 clear_bit(__IXGBE_DOWN, &adapter->state);
1965 ixgbe_napi_enable_all(adapter); 2070 ixgbe_napi_enable_all(adapter);
1966 2071
@@ -2205,7 +2310,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
2205 2310
2206 /* If budget not fully consumed, exit the polling mode */ 2311 /* If budget not fully consumed, exit the polling mode */
2207 if (work_done < budget) { 2312 if (work_done < budget) {
2208 netif_rx_complete(adapter->netdev, napi); 2313 netif_rx_complete(napi);
2209 if (adapter->itr_setting & 3) 2314 if (adapter->itr_setting & 3)
2210 ixgbe_set_itr(adapter); 2315 ixgbe_set_itr(adapter);
2211 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2316 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2231,6 +2336,11 @@ static void ixgbe_reset_task(struct work_struct *work)
2231 struct ixgbe_adapter *adapter; 2336 struct ixgbe_adapter *adapter;
2232 adapter = container_of(work, struct ixgbe_adapter, reset_task); 2337 adapter = container_of(work, struct ixgbe_adapter, reset_task);
2233 2338
2339 /* If we're already down or resetting, just bail */
2340 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
2341 test_bit(__IXGBE_RESETTING, &adapter->state))
2342 return;
2343
2234 adapter->tx_timeout_count++; 2344 adapter->tx_timeout_count++;
2235 2345
2236 ixgbe_reinit_locked(adapter); 2346 ixgbe_reinit_locked(adapter);
@@ -2240,15 +2350,31 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2240{ 2350{
2241 int nrq = 1, ntq = 1; 2351 int nrq = 1, ntq = 1;
2242 int feature_mask = 0, rss_i, rss_m; 2352 int feature_mask = 0, rss_i, rss_m;
2353 int dcb_i, dcb_m;
2243 2354
2244 /* Number of supported queues */ 2355 /* Number of supported queues */
2245 switch (adapter->hw.mac.type) { 2356 switch (adapter->hw.mac.type) {
2246 case ixgbe_mac_82598EB: 2357 case ixgbe_mac_82598EB:
2358 dcb_i = adapter->ring_feature[RING_F_DCB].indices;
2359 dcb_m = 0;
2247 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2360 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2248 rss_m = 0; 2361 rss_m = 0;
2249 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2362 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2363 feature_mask |= IXGBE_FLAG_DCB_ENABLED;
2250 2364
2251 switch (adapter->flags & feature_mask) { 2365 switch (adapter->flags & feature_mask) {
2366 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
2367 dcb_m = 0x7 << 3;
2368 rss_i = min(8, rss_i);
2369 rss_m = 0x7;
2370 nrq = dcb_i * rss_i;
2371 ntq = min(MAX_TX_QUEUES, dcb_i * rss_i);
2372 break;
2373 case (IXGBE_FLAG_DCB_ENABLED):
2374 dcb_m = 0x7 << 3;
2375 nrq = dcb_i;
2376 ntq = dcb_i;
2377 break;
2252 case (IXGBE_FLAG_RSS_ENABLED): 2378 case (IXGBE_FLAG_RSS_ENABLED):
2253 rss_m = 0xF; 2379 rss_m = 0xF;
2254 nrq = rss_i; 2380 nrq = rss_i;
@@ -2256,6 +2382,8 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2256 break; 2382 break;
2257 case 0: 2383 case 0:
2258 default: 2384 default:
2385 dcb_i = 0;
2386 dcb_m = 0;
2259 rss_i = 0; 2387 rss_i = 0;
2260 rss_m = 0; 2388 rss_m = 0;
2261 nrq = 1; 2389 nrq = 1;
@@ -2263,6 +2391,12 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2263 break; 2391 break;
2264 } 2392 }
2265 2393
2394 /* Sanity check, we should never have zero queues */
2395 nrq = (nrq ?:1);
2396 ntq = (ntq ?:1);
2397
2398 adapter->ring_feature[RING_F_DCB].indices = dcb_i;
2399 adapter->ring_feature[RING_F_DCB].mask = dcb_m;
2266 adapter->ring_feature[RING_F_RSS].indices = rss_i; 2400 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2267 adapter->ring_feature[RING_F_RSS].mask = rss_m; 2401 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2268 break; 2402 break;
@@ -2314,6 +2448,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2314 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2448 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2315 kfree(adapter->msix_entries); 2449 kfree(adapter->msix_entries);
2316 adapter->msix_entries = NULL; 2450 adapter->msix_entries = NULL;
2451 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2317 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2452 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2318 ixgbe_set_num_queues(adapter); 2453 ixgbe_set_num_queues(adapter);
2319 } else { 2454 } else {
@@ -2333,15 +2468,42 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2333{ 2468{
2334 int feature_mask = 0, rss_i; 2469 int feature_mask = 0, rss_i;
2335 int i, txr_idx, rxr_idx; 2470 int i, txr_idx, rxr_idx;
2471 int dcb_i;
2336 2472
2337 /* Number of supported queues */ 2473 /* Number of supported queues */
2338 switch (adapter->hw.mac.type) { 2474 switch (adapter->hw.mac.type) {
2339 case ixgbe_mac_82598EB: 2475 case ixgbe_mac_82598EB:
2476 dcb_i = adapter->ring_feature[RING_F_DCB].indices;
2340 rss_i = adapter->ring_feature[RING_F_RSS].indices; 2477 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2341 txr_idx = 0; 2478 txr_idx = 0;
2342 rxr_idx = 0; 2479 rxr_idx = 0;
2480 feature_mask |= IXGBE_FLAG_DCB_ENABLED;
2343 feature_mask |= IXGBE_FLAG_RSS_ENABLED; 2481 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2344 switch (adapter->flags & feature_mask) { 2482 switch (adapter->flags & feature_mask) {
2483 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
2484 for (i = 0; i < dcb_i; i++) {
2485 int j;
2486 /* Rx first */
2487 for (j = 0; j < adapter->num_rx_queues; j++) {
2488 adapter->rx_ring[rxr_idx].reg_idx =
2489 i << 3 | j;
2490 rxr_idx++;
2491 }
2492 /* Tx now */
2493 for (j = 0; j < adapter->num_tx_queues; j++) {
2494 adapter->tx_ring[txr_idx].reg_idx =
2495 i << 2 | (j >> 1);
2496 if (j & 1)
2497 txr_idx++;
2498 }
2499 }
2500 case (IXGBE_FLAG_DCB_ENABLED):
2501 /* the number of queues is assumed to be symmetric */
2502 for (i = 0; i < dcb_i; i++) {
2503 adapter->rx_ring[i].reg_idx = i << 3;
2504 adapter->tx_ring[i].reg_idx = i << 2;
2505 }
2506 break;
2345 case (IXGBE_FLAG_RSS_ENABLED): 2507 case (IXGBE_FLAG_RSS_ENABLED):
2346 for (i = 0; i < adapter->num_rx_queues; i++) 2508 for (i = 0; i < adapter->num_rx_queues; i++)
2347 adapter->rx_ring[i].reg_idx = i; 2509 adapter->rx_ring[i].reg_idx = i;
@@ -2363,8 +2525,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2363 * @adapter: board private structure to initialize 2525 * @adapter: board private structure to initialize
2364 * 2526 *
2365 * We allocate one ring per queue at run-time since we don't know the 2527 * We allocate one ring per queue at run-time since we don't know the
2366 * number of queues at compile-time. The polling_netdev array is 2528 * number of queues at compile-time.
2367 * intended for Multiqueue, but should work fine with a single queue.
2368 **/ 2529 **/
2369static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 2530static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2370{ 2531{
@@ -2435,6 +2596,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
2435 adapter->msix_entries = kcalloc(v_budget, 2596 adapter->msix_entries = kcalloc(v_budget,
2436 sizeof(struct msix_entry), GFP_KERNEL); 2597 sizeof(struct msix_entry), GFP_KERNEL);
2437 if (!adapter->msix_entries) { 2598 if (!adapter->msix_entries) {
2599 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
2438 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2600 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2439 ixgbe_set_num_queues(adapter); 2601 ixgbe_set_num_queues(adapter);
2440 kfree(adapter->tx_ring); 2602 kfree(adapter->tx_ring);
@@ -2475,7 +2637,7 @@ out:
2475 return err; 2637 return err;
2476} 2638}
2477 2639
2478static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) 2640void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2479{ 2641{
2480 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2642 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2481 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2643 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
@@ -2499,7 +2661,7 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2499 * - Hardware queue count (num_*_queues) 2661 * - Hardware queue count (num_*_queues)
2500 * - defined by miscellaneous hardware support/features (RSS, etc.) 2662 * - defined by miscellaneous hardware support/features (RSS, etc.)
2501 **/ 2663 **/
2502static int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) 2664int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2503{ 2665{
2504 int err; 2666 int err;
2505 2667
@@ -2535,6 +2697,57 @@ err_alloc_queues:
2535} 2697}
2536 2698
2537/** 2699/**
2700 * ixgbe_sfp_timer - worker thread to find a missing module
2701 * @data: pointer to our adapter struct
2702 **/
2703static void ixgbe_sfp_timer(unsigned long data)
2704{
2705 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
2706
2707 /* Do the sfp_timer outside of interrupt context due to the
2708 * delays that sfp+ detection requires
2709 */
2710 schedule_work(&adapter->sfp_task);
2711}
2712
2713/**
2714 * ixgbe_sfp_task - worker thread to find a missing module
2715 * @work: pointer to work_struct containing our data
2716 **/
2717static void ixgbe_sfp_task(struct work_struct *work)
2718{
2719 struct ixgbe_adapter *adapter = container_of(work,
2720 struct ixgbe_adapter,
2721 sfp_task);
2722 struct ixgbe_hw *hw = &adapter->hw;
2723
2724 if ((hw->phy.type == ixgbe_phy_nl) &&
2725 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
2726 s32 ret = hw->phy.ops.identify_sfp(hw);
2727 if (ret)
2728 goto reschedule;
2729 ret = hw->phy.ops.reset(hw);
2730 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2731 DPRINTK(PROBE, ERR, "failed to initialize because an "
2732 "unsupported SFP+ module type was detected.\n"
2733 "Reload the driver after installing a "
2734 "supported module.\n");
2735 unregister_netdev(adapter->netdev);
2736 } else {
2737 DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
2738 hw->phy.sfp_type);
2739 }
2740 /* don't need this routine any more */
2741 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
2742 }
2743 return;
2744reschedule:
2745 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
2746 mod_timer(&adapter->sfp_timer,
2747 round_jiffies(jiffies + (2 * HZ)));
2748}
2749
2750/**
2538 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) 2751 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
2539 * @adapter: board private structure to initialize 2752 * @adapter: board private structure to initialize
2540 * 2753 *
@@ -2547,6 +2760,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2547 struct ixgbe_hw *hw = &adapter->hw; 2760 struct ixgbe_hw *hw = &adapter->hw;
2548 struct pci_dev *pdev = adapter->pdev; 2761 struct pci_dev *pdev = adapter->pdev;
2549 unsigned int rss; 2762 unsigned int rss;
2763#ifdef CONFIG_IXGBE_DCB
2764 int j;
2765 struct tc_configuration *tc;
2766#endif
2550 2767
2551 /* PCI config space info */ 2768 /* PCI config space info */
2552 2769
@@ -2560,6 +2777,30 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2560 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); 2777 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2561 adapter->ring_feature[RING_F_RSS].indices = rss; 2778 adapter->ring_feature[RING_F_RSS].indices = rss;
2562 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 2779 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2780 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
2781
2782#ifdef CONFIG_IXGBE_DCB
2783 /* Configure DCB traffic classes */
2784 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
2785 tc = &adapter->dcb_cfg.tc_config[j];
2786 tc->path[DCB_TX_CONFIG].bwg_id = 0;
2787 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
2788 tc->path[DCB_RX_CONFIG].bwg_id = 0;
2789 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
2790 tc->dcb_pfc = pfc_disabled;
2791 }
2792 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
2793 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
2794 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
2795 adapter->dcb_cfg.round_robin_enable = false;
2796 adapter->dcb_set_bitmap = 0x00;
2797 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
2798 adapter->ring_feature[RING_F_DCB].indices);
2799
2800#endif
2801 if (hw->mac.ops.get_media_type &&
2802 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper))
2803 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
2563 2804
2564 /* default flow control settings */ 2805 /* default flow control settings */
2565 hw->fc.original_type = ixgbe_fc_none; 2806 hw->fc.original_type = ixgbe_fc_none;
@@ -2934,11 +3175,16 @@ static int ixgbe_close(struct net_device *netdev)
2934 * @adapter: private struct 3175 * @adapter: private struct
2935 * helper function to napi_add each possible q_vector->napi 3176 * helper function to napi_add each possible q_vector->napi
2936 */ 3177 */
2937static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) 3178void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
2938{ 3179{
2939 int q_idx, q_vectors; 3180 int q_idx, q_vectors;
3181 struct net_device *netdev = adapter->netdev;
2940 int (*poll)(struct napi_struct *, int); 3182 int (*poll)(struct napi_struct *, int);
2941 3183
3184 /* check if we already have our netdev->napi_list populated */
3185 if (&netdev->napi_list != netdev->napi_list.next)
3186 return;
3187
2942 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3188 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2943 poll = &ixgbe_clean_rxonly; 3189 poll = &ixgbe_clean_rxonly;
2944 /* Only enable as many vectors as we have rx queues. */ 3190 /* Only enable as many vectors as we have rx queues. */
@@ -2955,7 +3201,7 @@ static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
2955 } 3201 }
2956} 3202}
2957 3203
2958static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter) 3204void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
2959{ 3205{
2960 int q_idx; 3206 int q_idx;
2961 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3207 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -3032,6 +3278,7 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3032 } 3278 }
3033 ixgbe_reset_interrupt_capability(adapter); 3279 ixgbe_reset_interrupt_capability(adapter);
3034 ixgbe_napi_del_all(adapter); 3280 ixgbe_napi_del_all(adapter);
3281 INIT_LIST_HEAD(&netdev->napi_list);
3035 kfree(adapter->tx_ring); 3282 kfree(adapter->tx_ring);
3036 kfree(adapter->rx_ring); 3283 kfree(adapter->rx_ring);
3037 3284
@@ -3076,6 +3323,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3076 adapter->stats.mpc[i] += mpc; 3323 adapter->stats.mpc[i] += mpc;
3077 total_mpc += adapter->stats.mpc[i]; 3324 total_mpc += adapter->stats.mpc[i];
3078 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i)); 3325 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3326 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3327 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
3328 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3329 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
3330 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
3331 IXGBE_PXONRXC(i));
3332 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
3333 IXGBE_PXONTXC(i));
3334 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
3335 IXGBE_PXOFFRXC(i));
3336 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
3337 IXGBE_PXOFFTXC(i));
3079 } 3338 }
3080 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); 3339 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3081 /* work around hardware counting issue */ 3340 /* work around hardware counting issue */
@@ -3204,15 +3463,16 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3204 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 3463 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
3205#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) 3464#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
3206#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) 3465#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
3207 DPRINTK(LINK, INFO, "NIC Link is Up %s, " 3466 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
3208 "Flow Control: %s\n", 3467 "Flow Control: %s\n",
3209 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 3468 netdev->name,
3210 "10 Gbps" : 3469 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
3211 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 3470 "10 Gbps" :
3212 "1 Gbps" : "unknown speed")), 3471 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
3213 ((FLOW_RX && FLOW_TX) ? "RX/TX" : 3472 "1 Gbps" : "unknown speed")),
3214 (FLOW_RX ? "RX" : 3473 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
3215 (FLOW_TX ? "TX" : "None")))); 3474 (FLOW_RX ? "RX" :
3475 (FLOW_TX ? "TX" : "None"))));
3216 3476
3217 netif_carrier_on(netdev); 3477 netif_carrier_on(netdev);
3218 netif_tx_wake_all_queues(netdev); 3478 netif_tx_wake_all_queues(netdev);
@@ -3224,7 +3484,8 @@ static void ixgbe_watchdog_task(struct work_struct *work)
3224 adapter->link_up = false; 3484 adapter->link_up = false;
3225 adapter->link_speed = 0; 3485 adapter->link_speed = 0;
3226 if (netif_carrier_ok(netdev)) { 3486 if (netif_carrier_ok(netdev)) {
3227 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 3487 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
3488 netdev->name);
3228 netif_carrier_off(netdev); 3489 netif_carrier_off(netdev);
3229 netif_tx_stop_all_queues(netdev); 3490 netif_tx_stop_all_queues(netdev);
3230 } 3491 }
@@ -3573,6 +3834,14 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3573 3834
3574 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3835 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3575 tx_flags |= vlan_tx_tag_get(skb); 3836 tx_flags |= vlan_tx_tag_get(skb);
3837 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3838 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
3839 tx_flags |= (skb->queue_mapping << 13);
3840 }
3841 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3842 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3843 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3844 tx_flags |= (skb->queue_mapping << 13);
3576 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3845 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3577 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3846 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3578 } 3847 }
@@ -3687,9 +3956,31 @@ static int ixgbe_link_config(struct ixgbe_hw *hw)
3687 /* must always autoneg for both 1G and 10G link */ 3956 /* must always autoneg for both 1G and 10G link */
3688 hw->mac.autoneg = true; 3957 hw->mac.autoneg = true;
3689 3958
3959 if ((hw->mac.type == ixgbe_mac_82598EB) &&
3960 (hw->phy.media_type == ixgbe_media_type_copper))
3961 autoneg = IXGBE_LINK_SPEED_82598_AUTONEG;
3962
3690 return hw->mac.ops.setup_link_speed(hw, autoneg, true, true); 3963 return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
3691} 3964}
3692 3965
3966static const struct net_device_ops ixgbe_netdev_ops = {
3967 .ndo_open = ixgbe_open,
3968 .ndo_stop = ixgbe_close,
3969 .ndo_start_xmit = ixgbe_xmit_frame,
3970 .ndo_get_stats = ixgbe_get_stats,
3971 .ndo_set_multicast_list = ixgbe_set_rx_mode,
3972 .ndo_validate_addr = eth_validate_addr,
3973 .ndo_set_mac_address = ixgbe_set_mac,
3974 .ndo_change_mtu = ixgbe_change_mtu,
3975 .ndo_tx_timeout = ixgbe_tx_timeout,
3976 .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
3977 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
3978 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
3979#ifdef CONFIG_NET_POLL_CONTROLLER
3980 .ndo_poll_controller = ixgbe_netpoll,
3981#endif
3982};
3983
3693/** 3984/**
3694 * ixgbe_probe - Device Initialization Routine 3985 * ixgbe_probe - Device Initialization Routine
3695 * @pdev: PCI device information struct 3986 * @pdev: PCI device information struct
@@ -3739,6 +4030,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3739 goto err_pci_reg; 4030 goto err_pci_reg;
3740 } 4031 }
3741 4032
4033 err = pci_enable_pcie_error_reporting(pdev);
4034 if (err) {
4035 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4036 "0x%x\n", err);
4037 /* non-fatal, continue */
4038 }
4039
3742 pci_set_master(pdev); 4040 pci_set_master(pdev);
3743 pci_save_state(pdev); 4041 pci_save_state(pdev);
3744 4042
@@ -3771,23 +4069,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3771 continue; 4069 continue;
3772 } 4070 }
3773 4071
3774 netdev->open = &ixgbe_open; 4072 netdev->netdev_ops = &ixgbe_netdev_ops;
3775 netdev->stop = &ixgbe_close;
3776 netdev->hard_start_xmit = &ixgbe_xmit_frame;
3777 netdev->get_stats = &ixgbe_get_stats;
3778 netdev->set_rx_mode = &ixgbe_set_rx_mode;
3779 netdev->set_multicast_list = &ixgbe_set_rx_mode;
3780 netdev->set_mac_address = &ixgbe_set_mac;
3781 netdev->change_mtu = &ixgbe_change_mtu;
3782 ixgbe_set_ethtool_ops(netdev); 4073 ixgbe_set_ethtool_ops(netdev);
3783 netdev->tx_timeout = &ixgbe_tx_timeout;
3784 netdev->watchdog_timeo = 5 * HZ; 4074 netdev->watchdog_timeo = 5 * HZ;
3785 netdev->vlan_rx_register = ixgbe_vlan_rx_register;
3786 netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
3787 netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
3788#ifdef CONFIG_NET_POLL_CONTROLLER
3789 netdev->poll_controller = ixgbe_netpoll;
3790#endif
3791 strcpy(netdev->name, pci_name(pdev)); 4075 strcpy(netdev->name, pci_name(pdev));
3792 4076
3793 adapter->bd_number = cards_found; 4077 adapter->bd_number = cards_found;
@@ -3805,11 +4089,31 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3805 4089
3806 /* PHY */ 4090 /* PHY */
3807 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); 4091 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
3808 /* phy->sfp_type = ixgbe_sfp_type_unknown; */ 4092 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
4093
4094 /* set up this timer and work struct before calling get_invariants
4095 * which might start the timer
4096 */
4097 init_timer(&adapter->sfp_timer);
4098 adapter->sfp_timer.function = &ixgbe_sfp_timer;
4099 adapter->sfp_timer.data = (unsigned long) adapter;
4100
4101 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
3809 4102
3810 err = ii->get_invariants(hw); 4103 err = ii->get_invariants(hw);
3811 if (err) 4104 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
4105 /* start a kernel thread to watch for a module to arrive */
4106 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4107 mod_timer(&adapter->sfp_timer,
4108 round_jiffies(jiffies + (2 * HZ)));
4109 err = 0;
4110 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4111 DPRINTK(PROBE, ERR, "failed to load because an "
4112 "unsupported SFP+ module type was detected.\n");
3812 goto err_hw_init; 4113 goto err_hw_init;
4114 } else if (err) {
4115 goto err_hw_init;
4116 }
3813 4117
3814 /* setup the private structure */ 4118 /* setup the private structure */
3815 err = ixgbe_sw_init(adapter); 4119 err = ixgbe_sw_init(adapter);
@@ -3839,6 +4143,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3839 netdev->vlan_features |= NETIF_F_IP_CSUM; 4143 netdev->vlan_features |= NETIF_F_IP_CSUM;
3840 netdev->vlan_features |= NETIF_F_SG; 4144 netdev->vlan_features |= NETIF_F_SG;
3841 4145
4146 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
4147 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4148
4149#ifdef CONFIG_IXGBE_DCB
4150 netdev->dcbnl_ops = &dcbnl_ops;
4151#endif
4152
3842 if (pci_using_dac) 4153 if (pci_using_dac)
3843 netdev->features |= NETIF_F_HIGHDMA; 4154 netdev->features |= NETIF_F_HIGHDMA;
3844 4155
@@ -3873,8 +4184,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3873 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status); 4184 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
3874 link_speed = link_status & IXGBE_PCI_LINK_SPEED; 4185 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
3875 link_width = link_status & IXGBE_PCI_LINK_WIDTH; 4186 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
3876 dev_info(&pdev->dev, "(PCI Express:%s:%s) " 4187 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
3877 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3878 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : 4188 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
3879 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : 4189 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
3880 "Unknown"), 4190 "Unknown"),
@@ -3883,8 +4193,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3883 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : 4193 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
3884 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : 4194 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
3885 "Unknown"), 4195 "Unknown"),
3886 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 4196 netdev->dev_addr);
3887 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
3888 ixgbe_read_pba_num_generic(hw, &part_num); 4197 ixgbe_read_pba_num_generic(hw, &part_num);
3889 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 4198 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3890 hw->mac.type, hw->phy.type, 4199 hw->mac.type, hw->phy.type,
@@ -3911,8 +4220,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
3911 netif_carrier_off(netdev); 4220 netif_carrier_off(netdev);
3912 netif_tx_stop_all_queues(netdev); 4221 netif_tx_stop_all_queues(netdev);
3913 4222
3914 ixgbe_napi_add_all(adapter);
3915
3916 strcpy(netdev->name, "eth%d"); 4223 strcpy(netdev->name, "eth%d");
3917 err = register_netdev(netdev); 4224 err = register_netdev(netdev);
3918 if (err) 4225 if (err)
@@ -3938,6 +4245,9 @@ err_hw_init:
3938err_sw_init: 4245err_sw_init:
3939 ixgbe_reset_interrupt_capability(adapter); 4246 ixgbe_reset_interrupt_capability(adapter);
3940err_eeprom: 4247err_eeprom:
4248 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4249 del_timer_sync(&adapter->sfp_timer);
4250 cancel_work_sync(&adapter->sfp_task);
3941 iounmap(hw->hw_addr); 4251 iounmap(hw->hw_addr);
3942err_ioremap: 4252err_ioremap:
3943 free_netdev(netdev); 4253 free_netdev(netdev);
@@ -3962,10 +4272,18 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3962{ 4272{
3963 struct net_device *netdev = pci_get_drvdata(pdev); 4273 struct net_device *netdev = pci_get_drvdata(pdev);
3964 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4274 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4275 int err;
3965 4276
3966 set_bit(__IXGBE_DOWN, &adapter->state); 4277 set_bit(__IXGBE_DOWN, &adapter->state);
4278 /* clear the module not found bit to make sure the worker won't
4279 * reschedule
4280 */
4281 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3967 del_timer_sync(&adapter->watchdog_timer); 4282 del_timer_sync(&adapter->watchdog_timer);
3968 4283
4284 del_timer_sync(&adapter->sfp_timer);
4285 cancel_work_sync(&adapter->watchdog_task);
4286 cancel_work_sync(&adapter->sfp_task);
3969 flush_scheduled_work(); 4287 flush_scheduled_work();
3970 4288
3971#ifdef CONFIG_IXGBE_DCA 4289#ifdef CONFIG_IXGBE_DCA
@@ -3976,7 +4294,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3976 } 4294 }
3977 4295
3978#endif 4296#endif
3979 unregister_netdev(netdev); 4297 if (netdev->reg_state == NETREG_REGISTERED)
4298 unregister_netdev(netdev);
3980 4299
3981 ixgbe_reset_interrupt_capability(adapter); 4300 ixgbe_reset_interrupt_capability(adapter);
3982 4301
@@ -3986,12 +4305,16 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
3986 pci_release_regions(pdev); 4305 pci_release_regions(pdev);
3987 4306
3988 DPRINTK(PROBE, INFO, "complete\n"); 4307 DPRINTK(PROBE, INFO, "complete\n");
3989 ixgbe_napi_del_all(adapter);
3990 kfree(adapter->tx_ring); 4308 kfree(adapter->tx_ring);
3991 kfree(adapter->rx_ring); 4309 kfree(adapter->rx_ring);
3992 4310
3993 free_netdev(netdev); 4311 free_netdev(netdev);
3994 4312
4313 err = pci_disable_pcie_error_reporting(pdev);
4314 if (err)
4315 dev_err(&pdev->dev,
4316 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
4317
3995 pci_disable_device(pdev); 4318 pci_disable_device(pdev);
3996} 4319}
3997 4320
@@ -4007,7 +4330,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
4007 pci_channel_state_t state) 4330 pci_channel_state_t state)
4008{ 4331{
4009 struct net_device *netdev = pci_get_drvdata(pdev); 4332 struct net_device *netdev = pci_get_drvdata(pdev);
4010 struct ixgbe_adapter *adapter = netdev->priv; 4333 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4011 4334
4012 netif_device_detach(netdev); 4335 netif_device_detach(netdev);
4013 4336
@@ -4028,22 +4351,34 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
4028static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) 4351static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
4029{ 4352{
4030 struct net_device *netdev = pci_get_drvdata(pdev); 4353 struct net_device *netdev = pci_get_drvdata(pdev);
4031 struct ixgbe_adapter *adapter = netdev->priv; 4354 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4355 pci_ers_result_t result;
4356 int err;
4032 4357
4033 if (pci_enable_device(pdev)) { 4358 if (pci_enable_device(pdev)) {
4034 DPRINTK(PROBE, ERR, 4359 DPRINTK(PROBE, ERR,
4035 "Cannot re-enable PCI device after reset.\n"); 4360 "Cannot re-enable PCI device after reset.\n");
4036 return PCI_ERS_RESULT_DISCONNECT; 4361 result = PCI_ERS_RESULT_DISCONNECT;
4037 } 4362 } else {
4038 pci_set_master(pdev); 4363 pci_set_master(pdev);
4039 pci_restore_state(pdev); 4364 pci_restore_state(pdev);
4040 4365
4041 pci_enable_wake(pdev, PCI_D3hot, 0); 4366 pci_enable_wake(pdev, PCI_D3hot, 0);
4042 pci_enable_wake(pdev, PCI_D3cold, 0); 4367 pci_enable_wake(pdev, PCI_D3cold, 0);
4043 4368
4044 ixgbe_reset(adapter); 4369 ixgbe_reset(adapter);
4370
4371 result = PCI_ERS_RESULT_RECOVERED;
4372 }
4045 4373
4046 return PCI_ERS_RESULT_RECOVERED; 4374 err = pci_cleanup_aer_uncorrect_error_status(pdev);
4375 if (err) {
4376 dev_err(&pdev->dev,
4377 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
4378 /* non-fatal, continue */
4379 }
4380
4381 return result;
4047} 4382}
4048 4383
4049/** 4384/**
@@ -4056,7 +4391,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
4056static void ixgbe_io_resume(struct pci_dev *pdev) 4391static void ixgbe_io_resume(struct pci_dev *pdev)
4057{ 4392{
4058 struct net_device *netdev = pci_get_drvdata(pdev); 4393 struct net_device *netdev = pci_get_drvdata(pdev);
4059 struct ixgbe_adapter *adapter = netdev->priv; 4394 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4060 4395
4061 if (netif_running(netdev)) { 4396 if (netif_running(netdev)) {
4062 if (ixgbe_up(adapter)) { 4397 if (ixgbe_up(adapter)) {
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 764035a8c9a1..5a8669aedf64 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -121,9 +121,15 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
121 enum ixgbe_phy_type phy_type; 121 enum ixgbe_phy_type phy_type;
122 122
123 switch (phy_id) { 123 switch (phy_id) {
124 case TN1010_PHY_ID:
125 phy_type = ixgbe_phy_tn;
126 break;
124 case QT2022_PHY_ID: 127 case QT2022_PHY_ID:
125 phy_type = ixgbe_phy_qt; 128 phy_type = ixgbe_phy_qt;
126 break; 129 break;
130 case ATH_PHY_ID:
131 phy_type = ixgbe_phy_nl;
132 break;
127 default: 133 default:
128 phy_type = ixgbe_phy_unknown; 134 phy_type = ixgbe_phy_unknown;
129 break; 135 break;
@@ -426,3 +432,323 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
426 return 0; 432 return 0;
427} 433}
428 434
435/**
436 * ixgbe_reset_phy_nl - Performs a PHY reset
437 * @hw: pointer to hardware structure
438 **/
439s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
440{
441 u16 phy_offset, control, eword, edata, block_crc;
442 bool end_data = false;
443 u16 list_offset, data_offset;
444 u16 phy_data = 0;
445 s32 ret_val = 0;
446 u32 i;
447
448 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
449 IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
450
451 /* reset the PHY and poll for completion */
452 hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
453 IXGBE_MDIO_PHY_XS_DEV_TYPE,
454 (phy_data | IXGBE_MDIO_PHY_XS_RESET));
455
456 for (i = 0; i < 100; i++) {
457 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
458 IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
459 if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
460 break;
461 msleep(10);
462 }
463
464 if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
465 hw_dbg(hw, "PHY reset did not complete.\n");
466 ret_val = IXGBE_ERR_PHY;
467 goto out;
468 }
469
470 /* Get init offsets */
471 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
472 &data_offset);
473 if (ret_val != 0)
474 goto out;
475
476 ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
477 data_offset++;
478 while (!end_data) {
479 /*
480 * Read control word from PHY init contents offset
481 */
482 ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
483 control = (eword & IXGBE_CONTROL_MASK_NL) >>
484 IXGBE_CONTROL_SHIFT_NL;
485 edata = eword & IXGBE_DATA_MASK_NL;
486 switch (control) {
487 case IXGBE_DELAY_NL:
488 data_offset++;
489 hw_dbg(hw, "DELAY: %d MS\n", edata);
490 msleep(edata);
491 break;
492 case IXGBE_DATA_NL:
493 hw_dbg(hw, "DATA: \n");
494 data_offset++;
495 hw->eeprom.ops.read(hw, data_offset++,
496 &phy_offset);
497 for (i = 0; i < edata; i++) {
498 hw->eeprom.ops.read(hw, data_offset, &eword);
499 hw->phy.ops.write_reg(hw, phy_offset,
500 IXGBE_TWINAX_DEV, eword);
501 hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
502 phy_offset);
503 data_offset++;
504 phy_offset++;
505 }
506 break;
507 case IXGBE_CONTROL_NL:
508 data_offset++;
509 hw_dbg(hw, "CONTROL: \n");
510 if (edata == IXGBE_CONTROL_EOL_NL) {
511 hw_dbg(hw, "EOL\n");
512 end_data = true;
513 } else if (edata == IXGBE_CONTROL_SOL_NL) {
514 hw_dbg(hw, "SOL\n");
515 } else {
516 hw_dbg(hw, "Bad control value\n");
517 ret_val = IXGBE_ERR_PHY;
518 goto out;
519 }
520 break;
521 default:
522 hw_dbg(hw, "Bad control type\n");
523 ret_val = IXGBE_ERR_PHY;
524 goto out;
525 }
526 }
527
528out:
529 return ret_val;
530}
531
532/**
533 * ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns
534 * the PHY type.
535 * @hw: pointer to hardware structure
536 *
537 * Searches for and indentifies the SFP module. Assings appropriate PHY type.
538 **/
539s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
540{
541 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
542 u32 vendor_oui = 0;
543 u8 identifier = 0;
544 u8 comp_codes_1g = 0;
545 u8 comp_codes_10g = 0;
546 u8 oui_bytes[4] = {0, 0, 0, 0};
547 u8 transmission_media = 0;
548
549 status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
550 &identifier);
551
552 if (status == IXGBE_ERR_SFP_NOT_PRESENT) {
553 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
554 goto out;
555 }
556
557 if (identifier == IXGBE_SFF_IDENTIFIER_SFP) {
558 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES,
559 &comp_codes_1g);
560 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES,
561 &comp_codes_10g);
562 hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_TRANSMISSION_MEDIA,
563 &transmission_media);
564
565 /* ID Module
566 * =========
567 * 0 SFP_DA_CU
568 * 1 SFP_SR
569 * 2 SFP_LR
570 */
571 if (transmission_media & IXGBE_SFF_TWIN_AX_CAPABLE)
572 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
573 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
574 hw->phy.sfp_type = ixgbe_sfp_type_sr;
575 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
576 hw->phy.sfp_type = ixgbe_sfp_type_lr;
577 else
578 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
579
580 /* Determine PHY vendor */
581 if (hw->phy.type == ixgbe_phy_unknown) {
582 hw->phy.id = identifier;
583 hw->phy.ops.read_i2c_eeprom(hw,
584 IXGBE_SFF_VENDOR_OUI_BYTE0,
585 &oui_bytes[0]);
586 hw->phy.ops.read_i2c_eeprom(hw,
587 IXGBE_SFF_VENDOR_OUI_BYTE1,
588 &oui_bytes[1]);
589 hw->phy.ops.read_i2c_eeprom(hw,
590 IXGBE_SFF_VENDOR_OUI_BYTE2,
591 &oui_bytes[2]);
592
593 vendor_oui =
594 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
595 (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
596 (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
597
598 switch (vendor_oui) {
599 case IXGBE_SFF_VENDOR_OUI_TYCO:
600 if (transmission_media &
601 IXGBE_SFF_TWIN_AX_CAPABLE)
602 hw->phy.type = ixgbe_phy_tw_tyco;
603 break;
604 case IXGBE_SFF_VENDOR_OUI_FTL:
605 hw->phy.type = ixgbe_phy_sfp_ftl;
606 break;
607 case IXGBE_SFF_VENDOR_OUI_AVAGO:
608 hw->phy.type = ixgbe_phy_sfp_avago;
609 break;
610 default:
611 if (transmission_media &
612 IXGBE_SFF_TWIN_AX_CAPABLE)
613 hw->phy.type = ixgbe_phy_tw_unknown;
614 else
615 hw->phy.type = ixgbe_phy_sfp_unknown;
616 break;
617 }
618 }
619 status = 0;
620 }
621
622out:
623 return status;
624}
625
626/**
627 * ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see
628 * if it supports a given SFP+ module type, if so it returns the offsets to the
629 * phy init sequence block.
630 * @hw: pointer to hardware structure
631 * @list_offset: offset to the SFP ID list
632 * @data_offset: offset to the SFP data block
633 **/
634s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
635 u16 *list_offset,
636 u16 *data_offset)
637{
638 u16 sfp_id;
639
640 if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
641 return IXGBE_ERR_SFP_NOT_SUPPORTED;
642
643 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
644 return IXGBE_ERR_SFP_NOT_PRESENT;
645
646 if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
647 (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
648 return IXGBE_ERR_SFP_NOT_SUPPORTED;
649
650 /* Read offset to PHY init contents */
651 hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
652
653 if ((!*list_offset) || (*list_offset == 0xFFFF))
654 return IXGBE_ERR_PHY;
655
656 /* Shift offset to first ID word */
657 (*list_offset)++;
658
659 /*
660 * Find the matching SFP ID in the EEPROM
661 * and program the init sequence
662 */
663 hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
664
665 while (sfp_id != IXGBE_PHY_INIT_END_NL) {
666 if (sfp_id == hw->phy.sfp_type) {
667 (*list_offset)++;
668 hw->eeprom.ops.read(hw, *list_offset, data_offset);
669 if ((!*data_offset) || (*data_offset == 0xFFFF)) {
670 hw_dbg(hw, "SFP+ module not supported\n");
671 return IXGBE_ERR_SFP_NOT_SUPPORTED;
672 } else {
673 break;
674 }
675 } else {
676 (*list_offset) += 2;
677 if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
678 return IXGBE_ERR_PHY;
679 }
680 }
681
682 if (sfp_id == IXGBE_PHY_INIT_END_NL) {
683 hw_dbg(hw, "No matching SFP+ module found\n");
684 return IXGBE_ERR_SFP_NOT_SUPPORTED;
685 }
686
687 return 0;
688}
689
690/**
691 * ixgbe_check_phy_link_tnx - Determine link and speed status
692 * @hw: pointer to hardware structure
693 *
694 * Reads the VS1 register to determine if link is up and the current speed for
695 * the PHY.
696 **/
697s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
698 bool *link_up)
699{
700 s32 status = 0;
701 u32 time_out;
702 u32 max_time_out = 10;
703 u16 phy_link = 0;
704 u16 phy_speed = 0;
705 u16 phy_data = 0;
706
707 /* Initialize speed and link to default case */
708 *link_up = false;
709 *speed = IXGBE_LINK_SPEED_10GB_FULL;
710
711 /*
712 * Check current speed and link status of the PHY register.
713 * This is a vendor specific register and may have to
714 * be changed for other copper PHYs.
715 */
716 for (time_out = 0; time_out < max_time_out; time_out++) {
717 udelay(10);
718 status = hw->phy.ops.read_reg(hw,
719 IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
720 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
721 &phy_data);
722 phy_link = phy_data &
723 IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
724 phy_speed = phy_data &
725 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
726 if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
727 *link_up = true;
728 if (phy_speed ==
729 IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
730 *speed = IXGBE_LINK_SPEED_1GB_FULL;
731 break;
732 }
733 }
734
735 return status;
736}
737
738/**
739 * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
740 * @hw: pointer to hardware structure
741 * @firmware_version: pointer to the PHY Firmware Version
742 **/
743s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
744 u16 *firmware_version)
745{
746 s32 status = 0;
747
748 status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
749 IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
750 firmware_version);
751
752 return status;
753}
754
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9bfe3f2b1d8f..43a97bc420f5 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -63,6 +63,18 @@
63#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 63#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500
64#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 64#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00
65 65
66/* I2C SDA and SCL timing parameters for standard mode */
67#define IXGBE_I2C_T_HD_STA 4
68#define IXGBE_I2C_T_LOW 5
69#define IXGBE_I2C_T_HIGH 4
70#define IXGBE_I2C_T_SU_STA 5
71#define IXGBE_I2C_T_HD_DATA 5
72#define IXGBE_I2C_T_SU_DATA 1
73#define IXGBE_I2C_T_RISE 1
74#define IXGBE_I2C_T_FALL 1
75#define IXGBE_I2C_T_SU_STO 4
76#define IXGBE_I2C_T_BUF 5
77
66 78
67s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); 79s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
68s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); 80s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
@@ -77,4 +89,17 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
77 bool autoneg, 89 bool autoneg,
78 bool autoneg_wait_to_complete); 90 bool autoneg_wait_to_complete);
79 91
92/* PHY specific */
93s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
94 ixgbe_link_speed *speed,
95 bool *link_up);
96s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
97 u16 *firmware_version);
98
99s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
100s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
101s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
102 u16 *list_offset,
103 u16 *data_offset);
104
80#endif /* _IXGBE_PHY_H_ */ 105#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index c6f8fa1c4e59..83a11ff9ffd1 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -36,8 +36,12 @@
36/* Device IDs */ 36/* Device IDs */
37#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 37#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6
38#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 38#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
39#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB
40#define IXGBE_DEV_ID_82598AT 0x10C8
39#define IXGBE_DEV_ID_82598EB_CX4 0x10DD 41#define IXGBE_DEV_ID_82598EB_CX4 0x10DD
40#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC 42#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
43#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1
44#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1
41#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 45#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4
42 46
43/* General Registers */ 47/* General Registers */
@@ -452,6 +456,7 @@
452#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 456#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
453#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 457#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7
454#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ 458#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */
459#define IXGBE_TWINAX_DEV 1
455 460
456#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ 461#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */
457 462
@@ -487,12 +492,27 @@
487#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 492#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0
488#define IXGBE_MAX_PHY_ADDR 32 493#define IXGBE_MAX_PHY_ADDR 32
489 494
490/* PHY IDs*/ 495/* PHY IDs */
496#define TN1010_PHY_ID 0x00A19410
497#define TNX_FW_REV 0xB
491#define QT2022_PHY_ID 0x0043A400 498#define QT2022_PHY_ID 0x0043A400
499#define ATH_PHY_ID 0x03429050
492 500
493/* PHY Types */ 501/* PHY Types */
494#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 502#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
495 503
504/* Special PHY Init Routine */
505#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
506#define IXGBE_PHY_INIT_END_NL 0xFFFF
507#define IXGBE_CONTROL_MASK_NL 0xF000
508#define IXGBE_DATA_MASK_NL 0x0FFF
509#define IXGBE_CONTROL_SHIFT_NL 12
510#define IXGBE_DELAY_NL 0
511#define IXGBE_DATA_NL 1
512#define IXGBE_CONTROL_NL 0x000F
513#define IXGBE_CONTROL_EOL_NL 0x0FFF
514#define IXGBE_CONTROL_SOL_NL 0x0000
515
496/* General purpose Interrupt Enable */ 516/* General purpose Interrupt Enable */
497#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ 517#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */
498#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ 518#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */
@@ -1202,8 +1222,10 @@ enum ixgbe_mac_type {
1202 1222
1203enum ixgbe_phy_type { 1223enum ixgbe_phy_type {
1204 ixgbe_phy_unknown = 0, 1224 ixgbe_phy_unknown = 0,
1225 ixgbe_phy_tn,
1205 ixgbe_phy_qt, 1226 ixgbe_phy_qt,
1206 ixgbe_phy_xaui, 1227 ixgbe_phy_xaui,
1228 ixgbe_phy_nl,
1207 ixgbe_phy_tw_tyco, 1229 ixgbe_phy_tw_tyco,
1208 ixgbe_phy_tw_unknown, 1230 ixgbe_phy_tw_unknown,
1209 ixgbe_phy_sfp_avago, 1231 ixgbe_phy_sfp_avago,
@@ -1225,6 +1247,7 @@ enum ixgbe_sfp_type {
1225 ixgbe_sfp_type_da_cu = 0, 1247 ixgbe_sfp_type_da_cu = 0,
1226 ixgbe_sfp_type_sr = 1, 1248 ixgbe_sfp_type_sr = 1,
1227 ixgbe_sfp_type_lr = 2, 1249 ixgbe_sfp_type_lr = 2,
1250 ixgbe_sfp_type_not_present = 0xFFFE,
1228 ixgbe_sfp_type_unknown = 0xFFFF 1251 ixgbe_sfp_type_unknown = 0xFFFF
1229}; 1252};
1230 1253
@@ -1396,6 +1419,8 @@ struct ixgbe_phy_operations {
1396 s32 (*setup_link)(struct ixgbe_hw *); 1419 s32 (*setup_link)(struct ixgbe_hw *);
1397 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, 1420 s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
1398 bool); 1421 bool);
1422 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
1423 s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
1399 s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); 1424 s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
1400 s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); 1425 s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
1401 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); 1426 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
@@ -1486,6 +1511,7 @@ struct ixgbe_info {
1486#define IXGBE_ERR_PHY_ADDR_INVALID -17 1511#define IXGBE_ERR_PHY_ADDR_INVALID -17
1487#define IXGBE_ERR_I2C -18 1512#define IXGBE_ERR_I2C -18
1488#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 1513#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
1514#define IXGBE_ERR_SFP_NOT_PRESENT -20
1489#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 1515#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
1490 1516
1491#endif /* _IXGBE_TYPE_H_ */ 1517#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 7b70c66504a0..014745720560 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -114,8 +114,6 @@ static int ixpdev_rx(struct net_device *dev, int processed, int budget)
114 skb_put(skb, desc->pkt_length); 114 skb_put(skb, desc->pkt_length);
115 skb->protocol = eth_type_trans(skb, nds[desc->channel]); 115 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
116 116
117 dev->last_rx = jiffies;
118
119 netif_receive_skb(skb); 117 netif_receive_skb(skb);
120 } 118 }
121 119
@@ -143,7 +141,7 @@ static int ixpdev_poll(struct napi_struct *napi, int budget)
143 break; 141 break;
144 } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff); 142 } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
145 143
146 netif_rx_complete(dev, napi); 144 netif_rx_complete(napi);
147 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff); 145 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
148 146
149 return rx; 147 return rx;
@@ -206,7 +204,7 @@ static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
206 204
207 ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff); 205 ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
208 if (likely(napi_schedule_prep(&ip->napi))) { 206 if (likely(napi_schedule_prep(&ip->napi))) {
209 __netif_rx_schedule(dev, &ip->napi); 207 __netif_rx_schedule(&ip->napi);
210 } else { 208 } else {
211 printk(KERN_CRIT "ixp2000: irq while polling!!\n"); 209 printk(KERN_CRIT "ixp2000: irq while polling!!\n");
212 } 210 }
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 07944820f745..334ff9e12cdd 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -208,7 +208,6 @@ static int __init jazz_sonic_probe(struct platform_device *pdev)
208 struct sonic_local *lp; 208 struct sonic_local *lp;
209 struct resource *res; 209 struct resource *res;
210 int err = 0; 210 int err = 0;
211 DECLARE_MAC_BUF(mac);
212 211
213 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 212 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
214 if (!res) 213 if (!res)
@@ -233,8 +232,7 @@ static int __init jazz_sonic_probe(struct platform_device *pdev)
233 if (err) 232 if (err)
234 goto out1; 233 goto out1;
235 234
236 printk("%s: MAC %s IRQ %d\n", 235 printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq);
237 dev->name, print_mac(mac, dev->dev_addr), dev->irq);
238 236
239 return 0; 237 return 0;
240 238
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 665e70d620fc..08b34051c646 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -435,15 +435,18 @@ jme_check_link(struct net_device *netdev, int testonly)
435 GHC_DPX); 435 GHC_DPX);
436 switch (phylink & PHY_LINK_SPEED_MASK) { 436 switch (phylink & PHY_LINK_SPEED_MASK) {
437 case PHY_LINK_SPEED_10M: 437 case PHY_LINK_SPEED_10M:
438 ghc |= GHC_SPEED_10M; 438 ghc |= GHC_SPEED_10M |
439 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
439 strcat(linkmsg, "10 Mbps, "); 440 strcat(linkmsg, "10 Mbps, ");
440 break; 441 break;
441 case PHY_LINK_SPEED_100M: 442 case PHY_LINK_SPEED_100M:
442 ghc |= GHC_SPEED_100M; 443 ghc |= GHC_SPEED_100M |
444 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
443 strcat(linkmsg, "100 Mbps, "); 445 strcat(linkmsg, "100 Mbps, ");
444 break; 446 break;
445 case PHY_LINK_SPEED_1000M: 447 case PHY_LINK_SPEED_1000M:
446 ghc |= GHC_SPEED_1000M; 448 ghc |= GHC_SPEED_1000M |
449 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
447 strcat(linkmsg, "1000 Mbps, "); 450 strcat(linkmsg, "1000 Mbps, ");
448 break; 451 break;
449 default: 452 default:
@@ -463,14 +466,6 @@ jme_check_link(struct net_device *netdev, int testonly)
463 TXTRHD_TXREN | 466 TXTRHD_TXREN |
464 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL)); 467 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
465 } 468 }
466 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
467 "Full-Duplex, " :
468 "Half-Duplex, ");
469
470 if (phylink & PHY_LINK_MDI_STAT)
471 strcat(linkmsg, "MDI-X");
472 else
473 strcat(linkmsg, "MDI");
474 469
475 gpreg1 = GPREG1_DEFAULT; 470 gpreg1 = GPREG1_DEFAULT;
476 if (is_buggy250(jme->pdev->device, jme->chiprev)) { 471 if (is_buggy250(jme->pdev->device, jme->chiprev)) {
@@ -492,11 +487,17 @@ jme_check_link(struct net_device *netdev, int testonly)
492 break; 487 break;
493 } 488 }
494 } 489 }
495 jwrite32(jme, JME_GPREG1, gpreg1);
496 490
497 jme->reg_ghc = ghc; 491 jwrite32(jme, JME_GPREG1, gpreg1);
498 jwrite32(jme, JME_GHC, ghc); 492 jwrite32(jme, JME_GHC, ghc);
493 jme->reg_ghc = ghc;
499 494
495 strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
496 "Full-Duplex, " :
497 "Half-Duplex, ");
498 strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
499 "MDI-X" :
500 "MDI");
500 msg_link(jme, "Link is up at %s.\n", linkmsg); 501 msg_link(jme, "Link is up at %s.\n", linkmsg);
501 netif_carrier_on(netdev); 502 netif_carrier_on(netdev);
502 } else { 503 } else {
@@ -931,7 +932,6 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
931 cpu_to_le16(RXWBFLAG_DEST_MUL)) 932 cpu_to_le16(RXWBFLAG_DEST_MUL))
932 ++(NET_STAT(jme).multicast); 933 ++(NET_STAT(jme).multicast);
933 934
934 jme->dev->last_rx = jiffies;
935 NET_STAT(jme).rx_bytes += framesize; 935 NET_STAT(jme).rx_bytes += framesize;
936 ++(NET_STAT(jme).rx_packets); 936 ++(NET_STAT(jme).rx_packets);
937 } 937 }
@@ -1250,7 +1250,6 @@ static int
1250jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget)) 1250jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
1251{ 1251{
1252 struct jme_adapter *jme = jme_napi_priv(holder); 1252 struct jme_adapter *jme = jme_napi_priv(holder);
1253 struct net_device *netdev = jme->dev;
1254 int rest; 1253 int rest;
1255 1254
1256 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget)); 1255 rest = jme_process_receive(jme, JME_NAPI_WEIGHT_VAL(budget));
@@ -2591,14 +2590,6 @@ static const struct ethtool_ops jme_ethtool_ops = {
2591static int 2590static int
2592jme_pci_dma64(struct pci_dev *pdev) 2591jme_pci_dma64(struct pci_dev *pdev)
2593{ 2592{
2594 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
2595 if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
2596 return 1;
2597
2598 if (!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
2599 if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
2600 return 1;
2601
2602 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) 2593 if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
2603 if (!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) 2594 if (!pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))
2604 return 0; 2595 return 0;
@@ -2626,6 +2617,18 @@ jme_check_hw_ver(struct jme_adapter *jme)
2626 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; 2617 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
2627} 2618}
2628 2619
2620static const struct net_device_ops jme_netdev_ops = {
2621 .ndo_open = jme_open,
2622 .ndo_stop = jme_close,
2623 .ndo_validate_addr = eth_validate_addr,
2624 .ndo_start_xmit = jme_start_xmit,
2625 .ndo_set_mac_address = jme_set_macaddr,
2626 .ndo_set_multicast_list = jme_set_multi,
2627 .ndo_change_mtu = jme_change_mtu,
2628 .ndo_tx_timeout = jme_tx_timeout,
2629 .ndo_vlan_rx_register = jme_vlan_rx_register,
2630};
2631
2629static int __devinit 2632static int __devinit
2630jme_init_one(struct pci_dev *pdev, 2633jme_init_one(struct pci_dev *pdev,
2631 const struct pci_device_id *ent) 2634 const struct pci_device_id *ent)
@@ -2675,17 +2678,9 @@ jme_init_one(struct pci_dev *pdev,
2675 rc = -ENOMEM; 2678 rc = -ENOMEM;
2676 goto err_out_release_regions; 2679 goto err_out_release_regions;
2677 } 2680 }
2678 netdev->open = jme_open; 2681 netdev->netdev_ops = &jme_netdev_ops;
2679 netdev->stop = jme_close;
2680 netdev->hard_start_xmit = jme_start_xmit;
2681 netdev->set_mac_address = jme_set_macaddr;
2682 netdev->set_multicast_list = jme_set_multi;
2683 netdev->change_mtu = jme_change_mtu;
2684 netdev->ethtool_ops = &jme_ethtool_ops; 2682 netdev->ethtool_ops = &jme_ethtool_ops;
2685 netdev->tx_timeout = jme_tx_timeout;
2686 netdev->watchdog_timeo = TX_TIMEOUT; 2683 netdev->watchdog_timeo = TX_TIMEOUT;
2687 netdev->vlan_rx_register = jme_vlan_rx_register;
2688 NETDEV_GET_STATS(netdev, &jme_get_stats);
2689 netdev->features = NETIF_F_HW_CSUM | 2684 netdev->features = NETIF_F_HW_CSUM |
2690 NETIF_F_SG | 2685 NETIF_F_SG |
2691 NETIF_F_TSO | 2686 NETIF_F_TSO |
@@ -2861,18 +2856,10 @@ jme_init_one(struct pci_dev *pdev,
2861 goto err_out_free_shadow; 2856 goto err_out_free_shadow;
2862 } 2857 }
2863 2858
2864 msg_probe(jme, 2859 msg_probe(jme, "JMC250 gigabit%s ver:%x rev:%x macaddr:%pM\n",
2865 "JMC250 gigabit%s ver:%x rev:%x "
2866 "macaddr:%02x:%02x:%02x:%02x:%02x:%02x\n",
2867 (jme->fpgaver != 0) ? " (FPGA)" : "", 2860 (jme->fpgaver != 0) ? " (FPGA)" : "",
2868 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, 2861 (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
2869 jme->rev, 2862 jme->rev, netdev->dev_addr);
2870 netdev->dev_addr[0],
2871 netdev->dev_addr[1],
2872 netdev->dev_addr[2],
2873 netdev->dev_addr[3],
2874 netdev->dev_addr[4],
2875 netdev->dev_addr[5]);
2876 2863
2877 return 0; 2864 return 0;
2878 2865
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 3f5d91543246..5154411b5e6b 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -398,15 +398,15 @@ struct jme_ring {
398#define JME_NAPI_WEIGHT(w) int w 398#define JME_NAPI_WEIGHT(w) int w
399#define JME_NAPI_WEIGHT_VAL(w) w 399#define JME_NAPI_WEIGHT_VAL(w) w
400#define JME_NAPI_WEIGHT_SET(w, r) 400#define JME_NAPI_WEIGHT_SET(w, r)
401#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(dev, napis) 401#define JME_RX_COMPLETE(dev, napis) netif_rx_complete(napis)
402#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi); 402#define JME_NAPI_ENABLE(priv) napi_enable(&priv->napi);
403#define JME_NAPI_DISABLE(priv) \ 403#define JME_NAPI_DISABLE(priv) \
404 if (!napi_disable_pending(&priv->napi)) \ 404 if (!napi_disable_pending(&priv->napi)) \
405 napi_disable(&priv->napi); 405 napi_disable(&priv->napi);
406#define JME_RX_SCHEDULE_PREP(priv) \ 406#define JME_RX_SCHEDULE_PREP(priv) \
407 netif_rx_schedule_prep(priv->dev, &priv->napi) 407 netif_rx_schedule_prep(&priv->napi)
408#define JME_RX_SCHEDULE(priv) \ 408#define JME_RX_SCHEDULE(priv) \
409 __netif_rx_schedule(priv->dev, &priv->napi); 409 __netif_rx_schedule(&priv->napi);
410 410
411/* 411/*
412 * Jmac Adapter Private data 412 * Jmac Adapter Private data
@@ -815,16 +815,30 @@ static inline u32 smi_phy_addr(int x)
815 * Global Host Control 815 * Global Host Control
816 */ 816 */
817enum jme_ghc_bit_mask { 817enum jme_ghc_bit_mask {
818 GHC_SWRST = 0x40000000, 818 GHC_SWRST = 0x40000000,
819 GHC_DPX = 0x00000040, 819 GHC_DPX = 0x00000040,
820 GHC_SPEED = 0x00000030, 820 GHC_SPEED = 0x00000030,
821 GHC_LINK_POLL = 0x00000001, 821 GHC_LINK_POLL = 0x00000001,
822}; 822};
823 823
824enum jme_ghc_speed_val { 824enum jme_ghc_speed_val {
825 GHC_SPEED_10M = 0x00000010, 825 GHC_SPEED_10M = 0x00000010,
826 GHC_SPEED_100M = 0x00000020, 826 GHC_SPEED_100M = 0x00000020,
827 GHC_SPEED_1000M = 0x00000030, 827 GHC_SPEED_1000M = 0x00000030,
828};
829
830enum jme_ghc_to_clk {
831 GHC_TO_CLK_OFF = 0x00000000,
832 GHC_TO_CLK_GPHY = 0x00400000,
833 GHC_TO_CLK_PCIE = 0x00800000,
834 GHC_TO_CLK_INVALID = 0x00C00000,
835};
836
837enum jme_ghc_txmac_clk {
838 GHC_TXMAC_CLK_OFF = 0x00000000,
839 GHC_TXMAC_CLK_GPHY = 0x00100000,
840 GHC_TXMAC_CLK_PCIE = 0x00200000,
841 GHC_TXMAC_CLK_INVALID = 0x00300000,
828}; 842};
829 843
830/* 844/*
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index e18576316bda..4a5580c1126a 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -327,7 +327,7 @@ static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
327 327
328 dmas = readl(&lp->rx_dma_regs->dmas); 328 dmas = readl(&lp->rx_dma_regs->dmas);
329 if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) { 329 if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
330 netif_rx_schedule_prep(dev, &lp->napi); 330 netif_rx_schedule_prep(&lp->napi);
331 331
332 dmasm = readl(&lp->rx_dma_regs->dmasm); 332 dmasm = readl(&lp->rx_dma_regs->dmasm);
333 writel(dmasm | (DMA_STAT_DONE | 333 writel(dmasm | (DMA_STAT_DONE |
@@ -409,7 +409,6 @@ static int korina_rx(struct net_device *dev, int limit)
409 409
410 /* Pass the packet to upper layers */ 410 /* Pass the packet to upper layers */
411 netif_receive_skb(skb); 411 netif_receive_skb(skb);
412 dev->last_rx = jiffies;
413 dev->stats.rx_packets++; 412 dev->stats.rx_packets++;
414 dev->stats.rx_bytes += pkt_len; 413 dev->stats.rx_bytes += pkt_len;
415 414
@@ -467,7 +466,7 @@ static int korina_poll(struct napi_struct *napi, int budget)
467 466
468 work_done = korina_rx(dev, budget); 467 work_done = korina_rx(dev, budget);
469 if (work_done < budget) { 468 if (work_done < budget) {
470 netif_rx_complete(dev, napi); 469 netif_rx_complete(napi);
471 470
472 writel(readl(&lp->rx_dma_regs->dmasm) & 471 writel(readl(&lp->rx_dma_regs->dmasm) &
473 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR), 472 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 977ed3401bb3..d7afb938ea62 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -359,7 +359,7 @@ int __init init_module(void)
359 359
360static void cleanup_card(struct net_device *dev) 360static void cleanup_card(struct net_device *dev)
361{ 361{
362 struct lance_private *lp = dev->priv; 362 struct lance_private *lp = dev->ml_priv;
363 if (dev->dma != 4) 363 if (dev->dma != 4)
364 free_dma(dev->dma); 364 free_dma(dev->dma);
365 release_region(dev->base_addr, LANCE_TOTAL_SIZE); 365 release_region(dev->base_addr, LANCE_TOTAL_SIZE);
@@ -418,7 +418,7 @@ static int __init do_lance_probe(struct net_device *dev)
418 if (card < NUM_CARDS) { /*Signature OK*/ 418 if (card < NUM_CARDS) { /*Signature OK*/
419 result = lance_probe1(dev, ioaddr, 0, 0); 419 result = lance_probe1(dev, ioaddr, 0, 0);
420 if (!result) { 420 if (!result) {
421 struct lance_private *lp = dev->priv; 421 struct lance_private *lp = dev->ml_priv;
422 int ver = lp->chip_version; 422 int ver = lp->chip_version;
423 423
424 r->name = chip_table[ver].name; 424 r->name = chip_table[ver].name;
@@ -466,7 +466,6 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
466 unsigned long flags; 466 unsigned long flags;
467 int err = -ENOMEM; 467 int err = -ENOMEM;
468 void __iomem *bios; 468 void __iomem *bios;
469 DECLARE_MAC_BUF(mac);
470 469
471 /* First we look for special cases. 470 /* First we look for special cases.
472 Check for HP's on-board ethernet by looking for 'HP' in the BIOS. 471 Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
@@ -520,7 +519,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
520 } 519 }
521 } 520 }
522 521
523 /* We can't allocate dev->priv from alloc_etherdev() because it must 522 /* We can't allocate private data from alloc_etherdev() because it must
524 a ISA DMA-able region. */ 523 a ISA DMA-able region. */
525 chipname = chip_table[lance_version].name; 524 chipname = chip_table[lance_version].name;
526 printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr); 525 printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
@@ -529,7 +528,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
529 The first six bytes are the station address. */ 528 The first six bytes are the station address. */
530 for (i = 0; i < 6; i++) 529 for (i = 0; i < 6; i++)
531 dev->dev_addr[i] = inb(ioaddr + i); 530 dev->dev_addr[i] = inb(ioaddr + i);
532 printk("%s", print_mac(mac, dev->dev_addr)); 531 printk("%pM", dev->dev_addr);
533 532
534 dev->base_addr = ioaddr; 533 dev->base_addr = ioaddr;
535 /* Make certain the data structures used by the LANCE are aligned and DMAble. */ 534 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
@@ -538,7 +537,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
538 if(lp==NULL) 537 if(lp==NULL)
539 return -ENODEV; 538 return -ENODEV;
540 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); 539 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
541 dev->priv = lp; 540 dev->ml_priv = lp;
542 lp->name = chipname; 541 lp->name = chipname;
543 lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE, 542 lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
544 GFP_DMA | GFP_KERNEL); 543 GFP_DMA | GFP_KERNEL);
@@ -742,7 +741,7 @@ out_lp:
742static int 741static int
743lance_open(struct net_device *dev) 742lance_open(struct net_device *dev)
744{ 743{
745 struct lance_private *lp = dev->priv; 744 struct lance_private *lp = dev->ml_priv;
746 int ioaddr = dev->base_addr; 745 int ioaddr = dev->base_addr;
747 int i; 746 int i;
748 747
@@ -830,7 +829,7 @@ lance_open(struct net_device *dev)
830static void 829static void
831lance_purge_ring(struct net_device *dev) 830lance_purge_ring(struct net_device *dev)
832{ 831{
833 struct lance_private *lp = dev->priv; 832 struct lance_private *lp = dev->ml_priv;
834 int i; 833 int i;
835 834
836 /* Free all the skbuffs in the Rx and Tx queues. */ 835 /* Free all the skbuffs in the Rx and Tx queues. */
@@ -854,7 +853,7 @@ lance_purge_ring(struct net_device *dev)
854static void 853static void
855lance_init_ring(struct net_device *dev, gfp_t gfp) 854lance_init_ring(struct net_device *dev, gfp_t gfp)
856{ 855{
857 struct lance_private *lp = dev->priv; 856 struct lance_private *lp = dev->ml_priv;
858 int i; 857 int i;
859 858
860 lp->cur_rx = lp->cur_tx = 0; 859 lp->cur_rx = lp->cur_tx = 0;
@@ -896,7 +895,7 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
896static void 895static void
897lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit) 896lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
898{ 897{
899 struct lance_private *lp = dev->priv; 898 struct lance_private *lp = dev->ml_priv;
900 899
901 if (must_reinit || 900 if (must_reinit ||
902 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) { 901 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
@@ -910,7 +909,7 @@ lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
910 909
911static void lance_tx_timeout (struct net_device *dev) 910static void lance_tx_timeout (struct net_device *dev)
912{ 911{
913 struct lance_private *lp = (struct lance_private *) dev->priv; 912 struct lance_private *lp = (struct lance_private *) dev->ml_priv;
914 int ioaddr = dev->base_addr; 913 int ioaddr = dev->base_addr;
915 914
916 outw (0, ioaddr + LANCE_ADDR); 915 outw (0, ioaddr + LANCE_ADDR);
@@ -944,7 +943,7 @@ static void lance_tx_timeout (struct net_device *dev)
944 943
945static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) 944static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
946{ 945{
947 struct lance_private *lp = dev->priv; 946 struct lance_private *lp = dev->ml_priv;
948 int ioaddr = dev->base_addr; 947 int ioaddr = dev->base_addr;
949 int entry; 948 int entry;
950 unsigned long flags; 949 unsigned long flags;
@@ -1021,7 +1020,7 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id)
1021 int must_restart; 1020 int must_restart;
1022 1021
1023 ioaddr = dev->base_addr; 1022 ioaddr = dev->base_addr;
1024 lp = dev->priv; 1023 lp = dev->ml_priv;
1025 1024
1026 spin_lock (&lp->devlock); 1025 spin_lock (&lp->devlock);
1027 1026
@@ -1134,7 +1133,7 @@ static irqreturn_t lance_interrupt(int irq, void *dev_id)
1134static int 1133static int
1135lance_rx(struct net_device *dev) 1134lance_rx(struct net_device *dev)
1136{ 1135{
1137 struct lance_private *lp = dev->priv; 1136 struct lance_private *lp = dev->ml_priv;
1138 int entry = lp->cur_rx & RX_RING_MOD_MASK; 1137 int entry = lp->cur_rx & RX_RING_MOD_MASK;
1139 int i; 1138 int i;
1140 1139
@@ -1191,7 +1190,6 @@ lance_rx(struct net_device *dev)
1191 pkt_len); 1190 pkt_len);
1192 skb->protocol=eth_type_trans(skb,dev); 1191 skb->protocol=eth_type_trans(skb,dev);
1193 netif_rx(skb); 1192 netif_rx(skb);
1194 dev->last_rx = jiffies;
1195 lp->stats.rx_packets++; 1193 lp->stats.rx_packets++;
1196 lp->stats.rx_bytes+=pkt_len; 1194 lp->stats.rx_bytes+=pkt_len;
1197 } 1195 }
@@ -1213,7 +1211,7 @@ static int
1213lance_close(struct net_device *dev) 1211lance_close(struct net_device *dev)
1214{ 1212{
1215 int ioaddr = dev->base_addr; 1213 int ioaddr = dev->base_addr;
1216 struct lance_private *lp = dev->priv; 1214 struct lance_private *lp = dev->ml_priv;
1217 1215
1218 netif_stop_queue (dev); 1216 netif_stop_queue (dev);
1219 1217
@@ -1246,7 +1244,7 @@ lance_close(struct net_device *dev)
1246 1244
1247static struct net_device_stats *lance_get_stats(struct net_device *dev) 1245static struct net_device_stats *lance_get_stats(struct net_device *dev)
1248{ 1246{
1249 struct lance_private *lp = dev->priv; 1247 struct lance_private *lp = dev->ml_priv;
1250 1248
1251 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) { 1249 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1252 short ioaddr = dev->base_addr; 1250 short ioaddr = dev->base_addr;
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index b59f442bbf36..7415f517491d 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -739,7 +739,6 @@ memory_squeeze:
739 skb->len = pkt_len; 739 skb->len = pkt_len;
740 skb->protocol = eth_type_trans(skb, dev); 740 skb->protocol = eth_type_trans(skb, dev);
741 netif_rx(skb); 741 netif_rx(skb);
742 dev->last_rx = jiffies;
743 dev->stats.rx_packets++; 742 dev->stats.rx_packets++;
744 dev->stats.rx_bytes += pkt_len; 743 dev->stats.rx_bytes += pkt_len;
745 } 744 }
@@ -1034,12 +1033,8 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1034 1033
1035static void print_eth(unsigned char *add, char *str) 1034static void print_eth(unsigned char *add, char *str)
1036{ 1035{
1037 DECLARE_MAC_BUF(mac); 1036 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1038 DECLARE_MAC_BUF(mac2); 1037 add, add + 6, add, add[12], add[13], str);
1039
1040 printk(KERN_DEBUG "i596 0x%p, %s --> %s %02X%02X, %s\n",
1041 add, print_mac(mac, add + 6), print_mac(mac2, add),
1042 add[12], add[13], str);
1043} 1038}
1044 1039
1045static int __devinit i82596_probe(struct net_device *dev) 1040static int __devinit i82596_probe(struct net_device *dev)
@@ -1343,7 +1338,6 @@ static void set_multicast_list(struct net_device *dev)
1343 struct i596_private *lp = netdev_priv(dev); 1338 struct i596_private *lp = netdev_priv(dev);
1344 struct i596_dma *dma = lp->dma; 1339 struct i596_dma *dma = lp->dma;
1345 int config = 0, cnt; 1340 int config = 0, cnt;
1346 DECLARE_MAC_BUF(mac);
1347 1341
1348 DEB(DEB_MULTI, 1342 DEB(DEB_MULTI,
1349 printk(KERN_DEBUG 1343 printk(KERN_DEBUG
@@ -1407,8 +1401,8 @@ static void set_multicast_list(struct net_device *dev)
1407 if (i596_debug > 1) 1401 if (i596_debug > 1)
1408 DEB(DEB_MULTI, 1402 DEB(DEB_MULTI,
1409 printk(KERN_DEBUG 1403 printk(KERN_DEBUG
1410 "%s: Adding address %s\n", 1404 "%s: Adding address %pM\n",
1411 dev->name, print_mac(mac, cp))); 1405 dev->name, cp));
1412 } 1406 }
1413 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); 1407 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1414 i596_add_cmd(dev, &cmd->cmd); 1408 i596_add_cmd(dev, &cmd->cmd);
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index f80dcc11fe26..789b6cb744b2 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -108,14 +108,13 @@ int ei_debug = 1;
108/* Index to functions. */ 108/* Index to functions. */
109static void ei_tx_intr(struct net_device *dev); 109static void ei_tx_intr(struct net_device *dev);
110static void ei_tx_err(struct net_device *dev); 110static void ei_tx_err(struct net_device *dev);
111static void ei_tx_timeout(struct net_device *dev); 111void ei_tx_timeout(struct net_device *dev);
112static void ei_receive(struct net_device *dev); 112static void ei_receive(struct net_device *dev);
113static void ei_rx_overrun(struct net_device *dev); 113static void ei_rx_overrun(struct net_device *dev);
114 114
115/* Routines generic to NS8390-based boards. */ 115/* Routines generic to NS8390-based boards. */
116static void NS8390_trigger_send(struct net_device *dev, unsigned int length, 116static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
117 int start_page); 117 int start_page);
118static void set_multicast_list(struct net_device *dev);
119static void do_set_multicast_list(struct net_device *dev); 118static void do_set_multicast_list(struct net_device *dev);
120static void __NS8390_init(struct net_device *dev, int startp); 119static void __NS8390_init(struct net_device *dev, int startp);
121 120
@@ -206,10 +205,6 @@ static int __ei_open(struct net_device *dev)
206 unsigned long flags; 205 unsigned long flags;
207 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 206 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
208 207
209 /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
210 wrapper that does e.g. media check & then calls ei_tx_timeout. */
211 if (dev->tx_timeout == NULL)
212 dev->tx_timeout = ei_tx_timeout;
213 if (dev->watchdog_timeo <= 0) 208 if (dev->watchdog_timeo <= 0)
214 dev->watchdog_timeo = TX_TIMEOUT; 209 dev->watchdog_timeo = TX_TIMEOUT;
215 210
@@ -258,7 +253,7 @@ static int __ei_close(struct net_device *dev)
258 * completed (or failed) - i.e. never posted a Tx related interrupt. 253 * completed (or failed) - i.e. never posted a Tx related interrupt.
259 */ 254 */
260 255
261static void ei_tx_timeout(struct net_device *dev) 256static void __ei_tx_timeout(struct net_device *dev)
262{ 257{
263 unsigned long e8390_base = dev->base_addr; 258 unsigned long e8390_base = dev->base_addr;
264 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 259 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
@@ -304,7 +299,7 @@ static void ei_tx_timeout(struct net_device *dev)
304 * Sends a packet to an 8390 network device. 299 * Sends a packet to an 8390 network device.
305 */ 300 */
306 301
307static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) 302static int __ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
308{ 303{
309 unsigned long e8390_base = dev->base_addr; 304 unsigned long e8390_base = dev->base_addr;
310 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 305 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
@@ -764,7 +759,6 @@ static void ei_receive(struct net_device *dev)
764 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); 759 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
765 skb->protocol=eth_type_trans(skb,dev); 760 skb->protocol=eth_type_trans(skb,dev);
766 netif_rx(skb); 761 netif_rx(skb);
767 dev->last_rx = jiffies;
768 dev->stats.rx_packets++; 762 dev->stats.rx_packets++;
769 dev->stats.rx_bytes += pkt_len; 763 dev->stats.rx_bytes += pkt_len;
770 if (pkt_stat & ENRSR_PHY) 764 if (pkt_stat & ENRSR_PHY)
@@ -883,7 +877,7 @@ static void ei_rx_overrun(struct net_device *dev)
883 * Collect the stats. This is called unlocked and from several contexts. 877 * Collect the stats. This is called unlocked and from several contexts.
884 */ 878 */
885 879
886static struct net_device_stats *get_stats(struct net_device *dev) 880static struct net_device_stats *__ei_get_stats(struct net_device *dev)
887{ 881{
888 unsigned long ioaddr = dev->base_addr; 882 unsigned long ioaddr = dev->base_addr;
889 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 883 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
@@ -992,7 +986,7 @@ static void do_set_multicast_list(struct net_device *dev)
992 * not called too often. Must protect against both bh and irq users 986 * not called too often. Must protect against both bh and irq users
993 */ 987 */
994 988
995static void set_multicast_list(struct net_device *dev) 989static void __ei_set_multicast_list(struct net_device *dev)
996{ 990{
997 unsigned long flags; 991 unsigned long flags;
998 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 992 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
@@ -1016,10 +1010,6 @@ static void ethdev_setup(struct net_device *dev)
1016 if (ei_debug > 1) 1010 if (ei_debug > 1)
1017 printk(version); 1011 printk(version);
1018 1012
1019 dev->hard_start_xmit = &ei_start_xmit;
1020 dev->get_stats = get_stats;
1021 dev->set_multicast_list = &set_multicast_list;
1022
1023 ether_setup(dev); 1013 ether_setup(dev);
1024 1014
1025 spin_lock_init(&ei_local->page_lock); 1015 spin_lock_init(&ei_local->page_lock);
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index b36989097883..41cbaaef0654 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -53,9 +53,6 @@ static const char *version =
53 53
54static int lne390_probe1(struct net_device *dev, int ioaddr); 54static int lne390_probe1(struct net_device *dev, int ioaddr);
55 55
56static int lne390_open(struct net_device *dev);
57static int lne390_close(struct net_device *dev);
58
59static void lne390_reset_8390(struct net_device *dev); 56static void lne390_reset_8390(struct net_device *dev);
60 57
61static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); 58static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
@@ -169,7 +166,6 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
169{ 166{
170 int i, revision, ret; 167 int i, revision, ret;
171 unsigned long eisa_id; 168 unsigned long eisa_id;
172 DECLARE_MAC_BUF(mac);
173 169
174 if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV; 170 if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV;
175 171
@@ -203,8 +199,8 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
203 199
204 for(i = 0; i < ETHER_ADDR_LEN; i++) 200 for(i = 0; i < ETHER_ADDR_LEN; i++)
205 dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i); 201 dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
206 printk("lne390.c: LNE390%X in EISA slot %d, address %s.\n", 202 printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
207 0xa+revision, ioaddr/0x1000, print_mac(mac, dev->dev_addr)); 203 0xa+revision, ioaddr/0x1000, dev->dev_addr);
208 204
209 printk("lne390.c: "); 205 printk("lne390.c: ");
210 206
@@ -279,11 +275,7 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
279 ei_status.block_output = &lne390_block_output; 275 ei_status.block_output = &lne390_block_output;
280 ei_status.get_8390_hdr = &lne390_get_8390_hdr; 276 ei_status.get_8390_hdr = &lne390_get_8390_hdr;
281 277
282 dev->open = &lne390_open; 278 dev->netdev_ops = &ei_netdev_ops;
283 dev->stop = &lne390_close;
284#ifdef CONFIG_NET_POLL_CONTROLLER
285 dev->poll_controller = ei_poll;
286#endif
287 NS8390_init(dev, 0); 279 NS8390_init(dev, 0);
288 280
289 ret = register_netdev(dev); 281 ret = register_netdev(dev);
@@ -375,21 +367,6 @@ static void lne390_block_output(struct net_device *dev, int count,
375 memcpy_toio(shmem, buf, count); 367 memcpy_toio(shmem, buf, count);
376} 368}
377 369
378static int lne390_open(struct net_device *dev)
379{
380 ei_open(dev);
381 return 0;
382}
383
384static int lne390_close(struct net_device *dev)
385{
386
387 if (ei_debug > 1)
388 printk("%s: Shutting down ethercard.\n", dev->name);
389
390 ei_close(dev);
391 return 0;
392}
393 370
394#ifdef MODULE 371#ifdef MODULE
395#define MAX_LNE_CARDS 4 /* Max number of LNE390 cards per module */ 372#define MAX_LNE_CARDS 4 /* Max number of LNE390 cards per module */
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index b1ac63ab8c16..b7d438a367f3 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -76,8 +76,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
76 76
77 skb->protocol = eth_type_trans(skb,dev); 77 skb->protocol = eth_type_trans(skb,dev);
78 78
79 dev->last_rx = jiffies;
80
81 /* it's OK to use per_cpu_ptr() because BHs are off */ 79 /* it's OK to use per_cpu_ptr() because BHs are off */
82 pcpu_lstats = dev->ml_priv; 80 pcpu_lstats = dev->ml_priv;
83 lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id()); 81 lb_stats = per_cpu_ptr(pcpu_lstats, smp_processor_id());
@@ -89,7 +87,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
89 return 0; 87 return 0;
90} 88}
91 89
92static struct net_device_stats *get_stats(struct net_device *dev) 90static struct net_device_stats *loopback_get_stats(struct net_device *dev)
93{ 91{
94 const struct pcpu_lstats *pcpu_lstats; 92 const struct pcpu_lstats *pcpu_lstats;
95 struct net_device_stats *stats = &dev->stats; 93 struct net_device_stats *stats = &dev->stats;
@@ -145,15 +143,19 @@ static void loopback_dev_free(struct net_device *dev)
145 free_netdev(dev); 143 free_netdev(dev);
146} 144}
147 145
146static const struct net_device_ops loopback_ops = {
147 .ndo_init = loopback_dev_init,
148 .ndo_start_xmit= loopback_xmit,
149 .ndo_get_stats = loopback_get_stats,
150};
151
148/* 152/*
149 * The loopback device is special. There is only one instance 153 * The loopback device is special. There is only one instance
150 * per network namespace. 154 * per network namespace.
151 */ 155 */
152static void loopback_setup(struct net_device *dev) 156static void loopback_setup(struct net_device *dev)
153{ 157{
154 dev->get_stats = &get_stats;
155 dev->mtu = (16 * 1024) + 20 + 20 + 12; 158 dev->mtu = (16 * 1024) + 20 + 20 + 12;
156 dev->hard_start_xmit = loopback_xmit;
157 dev->hard_header_len = ETH_HLEN; /* 14 */ 159 dev->hard_header_len = ETH_HLEN; /* 14 */
158 dev->addr_len = ETH_ALEN; /* 6 */ 160 dev->addr_len = ETH_ALEN; /* 6 */
159 dev->tx_queue_len = 0; 161 dev->tx_queue_len = 0;
@@ -167,8 +169,8 @@ static void loopback_setup(struct net_device *dev)
167 | NETIF_F_NETNS_LOCAL; 169 | NETIF_F_NETNS_LOCAL;
168 dev->ethtool_ops = &loopback_ethtool_ops; 170 dev->ethtool_ops = &loopback_ethtool_ops;
169 dev->header_ops = &eth_header_ops; 171 dev->header_ops = &eth_header_ops;
170 dev->init = loopback_dev_init; 172 dev->netdev_ops = &loopback_ops;
171 dev->destructor = loopback_dev_free; 173 dev->destructor = loopback_dev_free;
172} 174}
173 175
174/* Setup and register the loopback device. */ 176/* Setup and register the loopback device. */
@@ -206,17 +208,8 @@ static __net_exit void loopback_net_exit(struct net *net)
206 unregister_netdev(dev); 208 unregister_netdev(dev);
207} 209}
208 210
209static struct pernet_operations __net_initdata loopback_net_ops = { 211/* Registered in net/core/dev.c */
212struct pernet_operations __net_initdata loopback_net_ops = {
210 .init = loopback_net_init, 213 .init = loopback_net_init,
211 .exit = loopback_net_exit, 214 .exit = loopback_net_exit,
212}; 215};
213
214static int __init loopback_init(void)
215{
216 return register_pernet_device(&loopback_net_ops);
217}
218
219/* Loopback is special. It should be initialized before any other network
220 * device and network subsystem.
221 */
222fs_initcall(loopback_init);
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 83fa9d82a004..4d1a059921c6 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -390,7 +390,7 @@ i596_timeout(struct net_device *dev, char *msg, int ct) {
390 struct i596_private *lp; 390 struct i596_private *lp;
391 int boguscnt = ct; 391 int boguscnt = ct;
392 392
393 lp = (struct i596_private *) dev->priv; 393 lp = netdev_priv(dev);
394 while (lp->scb.command) { 394 while (lp->scb.command) {
395 if (--boguscnt == 0) { 395 if (--boguscnt == 0) {
396 printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n", 396 printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n",
@@ -411,7 +411,7 @@ init_rx_bufs(struct net_device *dev, int num) {
411 int i; 411 int i;
412 // struct i596_rbd *rbd; 412 // struct i596_rbd *rbd;
413 413
414 lp = (struct i596_private *) dev->priv; 414 lp = netdev_priv(dev);
415 lp->scb.pa_rfd = I596_NULL; 415 lp->scb.pa_rfd = I596_NULL;
416 416
417 for (i = 0; i < num; i++) { 417 for (i = 0; i < num; i++) {
@@ -468,7 +468,7 @@ remove_rx_bufs(struct net_device *dev) {
468 struct i596_private *lp; 468 struct i596_private *lp;
469 struct i596_rfd *rfd; 469 struct i596_rfd *rfd;
470 470
471 lp = (struct i596_private *) dev->priv; 471 lp = netdev_priv(dev);
472 lp->rx_tail->pa_next = I596_NULL; 472 lp->rx_tail->pa_next = I596_NULL;
473 473
474 do { 474 do {
@@ -517,7 +517,7 @@ CLEAR_INT(void) {
517/* selftest or dump */ 517/* selftest or dump */
518static void 518static void
519i596_port_do(struct net_device *dev, int portcmd, char *cmdname) { 519i596_port_do(struct net_device *dev, int portcmd, char *cmdname) {
520 struct i596_private *lp = dev->priv; 520 struct i596_private *lp = netdev_priv(dev);
521 u16 *outp; 521 u16 *outp;
522 int i, m; 522 int i, m;
523 523
@@ -541,7 +541,7 @@ i596_port_do(struct net_device *dev, int portcmd, char *cmdname) {
541 541
542static int 542static int
543i596_scp_setup(struct net_device *dev) { 543i596_scp_setup(struct net_device *dev) {
544 struct i596_private *lp = dev->priv; 544 struct i596_private *lp = netdev_priv(dev);
545 int boguscnt; 545 int boguscnt;
546 546
547 /* Setup SCP, ISCP, SCB */ 547 /* Setup SCP, ISCP, SCB */
@@ -622,7 +622,7 @@ init_i596(struct net_device *dev) {
622 if (i596_scp_setup(dev)) 622 if (i596_scp_setup(dev))
623 return 1; 623 return 1;
624 624
625 lp = (struct i596_private *) dev->priv; 625 lp = netdev_priv(dev);
626 lp->scb.command = 0; 626 lp->scb.command = 0;
627 627
628 memcpy ((void *)lp->i596_config, init_setup, 14); 628 memcpy ((void *)lp->i596_config, init_setup, 14);
@@ -676,7 +676,6 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp,
676 676
677 skb->protocol = eth_type_trans(skb,dev); 677 skb->protocol = eth_type_trans(skb,dev);
678 netif_rx(skb); 678 netif_rx(skb);
679 dev->last_rx = jiffies;
680 dev->stats.rx_packets++; 679 dev->stats.rx_packets++;
681 } else { 680 } else {
682#if 0 681#if 0
@@ -705,7 +704,7 @@ i596_rx_one(struct net_device *dev, struct i596_private *lp,
705 704
706static int 705static int
707i596_rx(struct net_device *dev) { 706i596_rx(struct net_device *dev) {
708 struct i596_private *lp = (struct i596_private *) dev->priv; 707 struct i596_private *lp = netdev_priv(dev);
709 struct i596_rfd *rfd; 708 struct i596_rfd *rfd;
710 int frames = 0; 709 int frames = 0;
711 710
@@ -738,7 +737,7 @@ i596_cleanup_cmd(struct net_device *dev) {
738 struct i596_private *lp; 737 struct i596_private *lp;
739 struct i596_cmd *cmd; 738 struct i596_cmd *cmd;
740 739
741 lp = (struct i596_private *) dev->priv; 740 lp = netdev_priv(dev);
742 while (lp->cmd_head) { 741 while (lp->cmd_head) {
743 cmd = (struct i596_cmd *)lp->cmd_head; 742 cmd = (struct i596_cmd *)lp->cmd_head;
744 743
@@ -806,7 +805,7 @@ static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioad
806} 805}
807 806
808static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) { 807static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) {
809 struct i596_private *lp = dev->priv; 808 struct i596_private *lp = netdev_priv(dev);
810 int ioaddr = dev->base_addr; 809 int ioaddr = dev->base_addr;
811 unsigned long flags; 810 unsigned long flags;
812 811
@@ -912,7 +911,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
912 911
913static void 912static void
914i596_tx_timeout (struct net_device *dev) { 913i596_tx_timeout (struct net_device *dev) {
915 struct i596_private *lp = dev->priv; 914 struct i596_private *lp = netdev_priv(dev);
916 int ioaddr = dev->base_addr; 915 int ioaddr = dev->base_addr;
917 916
918 /* Transmitter timeout, serious problems. */ 917 /* Transmitter timeout, serious problems. */
@@ -970,7 +969,7 @@ static int __init lp486e_probe(struct net_device *dev) {
970 return -EBUSY; 969 return -EBUSY;
971 } 970 }
972 971
973 lp = (struct i596_private *) dev->priv; 972 lp = netdev_priv(dev);
974 spin_lock_init(&lp->cmd_lock); 973 spin_lock_init(&lp->cmd_lock);
975 974
976 /* 975 /*
@@ -1147,7 +1146,7 @@ static irqreturn_t
1147i596_interrupt(int irq, void *dev_instance) 1146i596_interrupt(int irq, void *dev_instance)
1148{ 1147{
1149 struct net_device *dev = dev_instance; 1148 struct net_device *dev = dev_instance;
1150 struct i596_private *lp = dev->priv; 1149 struct i596_private *lp = netdev_priv(dev);
1151 unsigned short status, ack_cmd = 0; 1150 unsigned short status, ack_cmd = 0;
1152 int frames_in = 0; 1151 int frames_in = 0;
1153 1152
@@ -1215,7 +1214,7 @@ i596_interrupt(int irq, void *dev_instance)
1215} 1214}
1216 1215
1217static int i596_close(struct net_device *dev) { 1216static int i596_close(struct net_device *dev) {
1218 struct i596_private *lp = dev->priv; 1217 struct i596_private *lp = netdev_priv(dev);
1219 1218
1220 netif_stop_queue(dev); 1219 netif_stop_queue(dev);
1221 1220
@@ -1242,7 +1241,7 @@ static int i596_close(struct net_device *dev) {
1242*/ 1241*/
1243 1242
1244static void set_multicast_list(struct net_device *dev) { 1243static void set_multicast_list(struct net_device *dev) {
1245 struct i596_private *lp = dev->priv; 1244 struct i596_private *lp = netdev_priv(dev);
1246 struct i596_cmd *cmd; 1245 struct i596_cmd *cmd;
1247 1246
1248 if (i596_debug > 1) 1247 if (i596_debug > 1)
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index 98e3eb2697c9..57716e22660c 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -304,7 +304,7 @@ struct net_device * __init mac8390_probe(int unit)
304 if (!MACH_IS_MAC) 304 if (!MACH_IS_MAC)
305 return ERR_PTR(-ENODEV); 305 return ERR_PTR(-ENODEV);
306 306
307 dev = ____alloc_ei_netdev(0); 307 dev = alloc_ei_netdev();
308 if (!dev) 308 if (!dev)
309 return ERR_PTR(-ENOMEM); 309 return ERR_PTR(-ENOMEM);
310 310
@@ -478,6 +478,20 @@ void cleanup_module(void)
478 478
479#endif /* MODULE */ 479#endif /* MODULE */
480 480
481static const struct net_device_ops mac8390_netdev_ops = {
482 .ndo_open = mac8390_open,
483 .ndo_stop = mac8390_close,
484 .ndo_start_xmit = ei_start_xmit,
485 .ndo_tx_timeout = ei_tx_timeout,
486 .ndo_get_stats = ei_get_stats,
487 .ndo_set_multicast_list = ei_set_multicast_list,
488 .ndo_validate_addr = eth_validate_addr,
489 .ndo_change_mtu = eth_change_mtu,
490#ifdef CONFIG_NET_POLL_CONTROLLER
491 .ndo_poll_controller = ei_poll,
492#endif
493};
494
481static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev, 495static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
482 enum mac8390_type type) 496 enum mac8390_type type)
483{ 497{
@@ -503,11 +517,7 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
503 int access_bitmode = 0; 517 int access_bitmode = 0;
504 518
505 /* Now fill in our stuff */ 519 /* Now fill in our stuff */
506 dev->open = &mac8390_open; 520 dev->netdev_ops = &mac8390_netdev_ops;
507 dev->stop = &mac8390_close;
508#ifdef CONFIG_NET_POLL_CONTROLLER
509 dev->poll_controller = __ei_poll;
510#endif
511 521
512 /* GAR, ei_status is actually a macro even though it looks global */ 522 /* GAR, ei_status is actually a macro even though it looks global */
513 ei_status.name = cardname[type]; 523 ei_status.name = cardname[type];
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 4ce8afd481c3..380a1a54d530 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -181,7 +181,6 @@ struct net_device * __init mac89x0_probe(int unit)
181 unsigned long ioaddr; 181 unsigned long ioaddr;
182 unsigned short sig; 182 unsigned short sig;
183 int err = -ENODEV; 183 int err = -ENODEV;
184 DECLARE_MAC_BUF(mac);
185 184
186 if (!MACH_IS_MAC) 185 if (!MACH_IS_MAC)
187 return ERR_PTR(-ENODEV); 186 return ERR_PTR(-ENODEV);
@@ -279,8 +278,7 @@ struct net_device * __init mac89x0_probe(int unit)
279 278
280 /* print the IRQ and ethernet address. */ 279 /* print the IRQ and ethernet address. */
281 280
282 printk(" IRQ %d ADDR %s\n", 281 printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr);
283 dev->irq, print_mac(mac, dev->dev_addr));
284 282
285 dev->open = net_open; 283 dev->open = net_open;
286 dev->stop = net_close; 284 dev->stop = net_close;
@@ -518,7 +516,6 @@ net_rx(struct net_device *dev)
518 516
519 skb->protocol=eth_type_trans(skb,dev); 517 skb->protocol=eth_type_trans(skb,dev);
520 netif_rx(skb); 518 netif_rx(skb);
521 dev->last_rx = jiffies;
522 lp->stats.rx_packets++; 519 lp->stats.rx_packets++;
523 lp->stats.rx_bytes += length; 520 lp->stats.rx_bytes += length;
524} 521}
@@ -628,14 +625,3 @@ cleanup_module(void)
628 free_netdev(dev_cs89x0); 625 free_netdev(dev_cs89x0);
629} 626}
630#endif /* MODULE */ 627#endif /* MODULE */
631
632/*
633 * Local variables:
634 * compile-command: "m68k-linux-gcc -D__KERNEL__ -I../../include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -pipe -fno-strength-reduce -ffixed-a2 -DMODULE -DMODVERSIONS -include ../../include/linux/modversions.h -c -o mac89x0.o mac89x0.c"
635 * version-control: t
636 * kept-new-versions: 5
637 * c-indent-level: 8
638 * tab-width: 8
639 * End:
640 *
641 */
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 01f7a31bac76..a04da4ecaa88 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -435,7 +435,6 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
435 435
436 bp->stats.rx_packets++; 436 bp->stats.rx_packets++;
437 bp->stats.rx_bytes += len; 437 bp->stats.rx_bytes += len;
438 bp->dev->last_rx = jiffies;
439 dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n", 438 dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n",
440 skb->len, skb->csum); 439 skb->len, skb->csum);
441 netif_receive_skb(skb); 440 netif_receive_skb(skb);
@@ -520,7 +519,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
520 * this function was called last time, and no packets 519 * this function was called last time, and no packets
521 * have been received since. 520 * have been received since.
522 */ 521 */
523 netif_rx_complete(dev, napi); 522 netif_rx_complete(napi);
524 goto out; 523 goto out;
525 } 524 }
526 525
@@ -531,13 +530,13 @@ static int macb_poll(struct napi_struct *napi, int budget)
531 dev_warn(&bp->pdev->dev, 530 dev_warn(&bp->pdev->dev,
532 "No RX buffers complete, status = %02lx\n", 531 "No RX buffers complete, status = %02lx\n",
533 (unsigned long)status); 532 (unsigned long)status);
534 netif_rx_complete(dev, napi); 533 netif_rx_complete(napi);
535 goto out; 534 goto out;
536 } 535 }
537 536
538 work_done = macb_rx(bp, budget); 537 work_done = macb_rx(bp, budget);
539 if (work_done < budget) 538 if (work_done < budget)
540 netif_rx_complete(dev, napi); 539 netif_rx_complete(napi);
541 540
542 /* 541 /*
543 * We've done what we can to clean the buffers. Make sure we 542 * We've done what we can to clean the buffers. Make sure we
@@ -572,7 +571,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
572 } 571 }
573 572
574 if (status & MACB_RX_INT_FLAGS) { 573 if (status & MACB_RX_INT_FLAGS) {
575 if (netif_rx_schedule_prep(dev, &bp->napi)) { 574 if (netif_rx_schedule_prep(&bp->napi)) {
576 /* 575 /*
577 * There's no point taking any more interrupts 576 * There's no point taking any more interrupts
578 * until we have processed the buffers 577 * until we have processed the buffers
@@ -580,7 +579,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
580 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 579 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
581 dev_dbg(&bp->pdev->dev, 580 dev_dbg(&bp->pdev->dev,
582 "scheduling RX softirq\n"); 581 "scheduling RX softirq\n");
583 __netif_rx_schedule(dev, &bp->napi); 582 __netif_rx_schedule(&bp->napi);
584 } 583 }
585 } 584 }
586 585
@@ -1104,7 +1103,6 @@ static int __init macb_probe(struct platform_device *pdev)
1104 unsigned long pclk_hz; 1103 unsigned long pclk_hz;
1105 u32 config; 1104 u32 config;
1106 int err = -ENXIO; 1105 int err = -ENXIO;
1107 DECLARE_MAC_BUF(mac);
1108 1106
1109 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1107 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1110 if (!regs) { 1108 if (!regs) {
@@ -1223,10 +1221,8 @@ static int __init macb_probe(struct platform_device *pdev)
1223 1221
1224 platform_set_drvdata(pdev, dev); 1222 platform_set_drvdata(pdev, dev);
1225 1223
1226 printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d " 1224 printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n",
1227 "(%s)\n", 1225 dev->name, dev->base_addr, dev->irq, dev->dev_addr);
1228 dev->name, dev->base_addr, dev->irq,
1229 print_mac(mac, dev->dev_addr));
1230 1226
1231 phydev = bp->phy_dev; 1227 phydev = bp->phy_dev;
1232 printk(KERN_INFO "%s: attached PHY driver [%s] " 1228 printk(KERN_INFO "%s: attached PHY driver [%s] "
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 451acdca2a21..feebbd92aff2 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -101,7 +101,6 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
101 struct mace_data *mp; 101 struct mace_data *mp;
102 const unsigned char *addr; 102 const unsigned char *addr;
103 int j, rev, rc = -EBUSY; 103 int j, rev, rc = -EBUSY;
104 DECLARE_MAC_BUF(mac);
105 104
106 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { 105 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
107 printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n", 106 printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n",
@@ -144,7 +143,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
144 } 143 }
145 SET_NETDEV_DEV(dev, &mdev->ofdev.dev); 144 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
146 145
147 mp = dev->priv; 146 mp = netdev_priv(dev);
148 mp->mdev = mdev; 147 mp->mdev = mdev;
149 macio_set_drvdata(mdev, dev); 148 macio_set_drvdata(mdev, dev);
150 149
@@ -165,7 +164,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
165 in_8(&mp->mace->chipid_lo); 164 in_8(&mp->mace->chipid_lo);
166 165
167 166
168 mp = (struct mace_data *) dev->priv; 167 mp = netdev_priv(dev);
169 mp->maccc = ENXMT | ENRCV; 168 mp->maccc = ENXMT | ENRCV;
170 169
171 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000); 170 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
@@ -241,8 +240,8 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
241 goto err_free_rx_irq; 240 goto err_free_rx_irq;
242 } 241 }
243 242
244 printk(KERN_INFO "%s: MACE at %s, chip revision %d.%d\n", 243 printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n",
245 dev->name, print_mac(mac, dev->dev_addr), 244 dev->name, dev->dev_addr,
246 mp->chipid >> 8, mp->chipid & 0xff); 245 mp->chipid >> 8, mp->chipid & 0xff);
247 246
248 return 0; 247 return 0;
@@ -276,7 +275,7 @@ static int __devexit mace_remove(struct macio_dev *mdev)
276 275
277 macio_set_drvdata(mdev, NULL); 276 macio_set_drvdata(mdev, NULL);
278 277
279 mp = dev->priv; 278 mp = netdev_priv(dev);
280 279
281 unregister_netdev(dev); 280 unregister_netdev(dev);
282 281
@@ -312,7 +311,7 @@ static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
312 311
313static void mace_reset(struct net_device *dev) 312static void mace_reset(struct net_device *dev)
314{ 313{
315 struct mace_data *mp = (struct mace_data *) dev->priv; 314 struct mace_data *mp = netdev_priv(dev);
316 volatile struct mace __iomem *mb = mp->mace; 315 volatile struct mace __iomem *mb = mp->mace;
317 int i; 316 int i;
318 317
@@ -367,7 +366,7 @@ static void mace_reset(struct net_device *dev)
367 366
368static void __mace_set_address(struct net_device *dev, void *addr) 367static void __mace_set_address(struct net_device *dev, void *addr)
369{ 368{
370 struct mace_data *mp = (struct mace_data *) dev->priv; 369 struct mace_data *mp = netdev_priv(dev);
371 volatile struct mace __iomem *mb = mp->mace; 370 volatile struct mace __iomem *mb = mp->mace;
372 unsigned char *p = addr; 371 unsigned char *p = addr;
373 int i; 372 int i;
@@ -388,7 +387,7 @@ static void __mace_set_address(struct net_device *dev, void *addr)
388 387
389static int mace_set_address(struct net_device *dev, void *addr) 388static int mace_set_address(struct net_device *dev, void *addr)
390{ 389{
391 struct mace_data *mp = (struct mace_data *) dev->priv; 390 struct mace_data *mp = netdev_priv(dev);
392 volatile struct mace __iomem *mb = mp->mace; 391 volatile struct mace __iomem *mb = mp->mace;
393 unsigned long flags; 392 unsigned long flags;
394 393
@@ -423,7 +422,7 @@ static inline void mace_clean_rings(struct mace_data *mp)
423 422
424static int mace_open(struct net_device *dev) 423static int mace_open(struct net_device *dev)
425{ 424{
426 struct mace_data *mp = (struct mace_data *) dev->priv; 425 struct mace_data *mp = netdev_priv(dev);
427 volatile struct mace __iomem *mb = mp->mace; 426 volatile struct mace __iomem *mb = mp->mace;
428 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 427 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
429 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 428 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
@@ -493,7 +492,7 @@ static int mace_open(struct net_device *dev)
493 492
494static int mace_close(struct net_device *dev) 493static int mace_close(struct net_device *dev)
495{ 494{
496 struct mace_data *mp = (struct mace_data *) dev->priv; 495 struct mace_data *mp = netdev_priv(dev);
497 volatile struct mace __iomem *mb = mp->mace; 496 volatile struct mace __iomem *mb = mp->mace;
498 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 497 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
499 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 498 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
@@ -513,7 +512,7 @@ static int mace_close(struct net_device *dev)
513 512
514static inline void mace_set_timeout(struct net_device *dev) 513static inline void mace_set_timeout(struct net_device *dev)
515{ 514{
516 struct mace_data *mp = (struct mace_data *) dev->priv; 515 struct mace_data *mp = netdev_priv(dev);
517 516
518 if (mp->timeout_active) 517 if (mp->timeout_active)
519 del_timer(&mp->tx_timeout); 518 del_timer(&mp->tx_timeout);
@@ -526,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
526 525
527static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 526static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
528{ 527{
529 struct mace_data *mp = (struct mace_data *) dev->priv; 528 struct mace_data *mp = netdev_priv(dev);
530 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 529 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
531 volatile struct dbdma_cmd *cp, *np; 530 volatile struct dbdma_cmd *cp, *np;
532 unsigned long flags; 531 unsigned long flags;
@@ -581,7 +580,7 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
581 580
582static void mace_set_multicast(struct net_device *dev) 581static void mace_set_multicast(struct net_device *dev)
583{ 582{
584 struct mace_data *mp = (struct mace_data *) dev->priv; 583 struct mace_data *mp = netdev_priv(dev);
585 volatile struct mace __iomem *mb = mp->mace; 584 volatile struct mace __iomem *mb = mp->mace;
586 int i, j; 585 int i, j;
587 u32 crc; 586 u32 crc;
@@ -656,7 +655,7 @@ static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_de
656static irqreturn_t mace_interrupt(int irq, void *dev_id) 655static irqreturn_t mace_interrupt(int irq, void *dev_id)
657{ 656{
658 struct net_device *dev = (struct net_device *) dev_id; 657 struct net_device *dev = (struct net_device *) dev_id;
659 struct mace_data *mp = (struct mace_data *) dev->priv; 658 struct mace_data *mp = netdev_priv(dev);
660 volatile struct mace __iomem *mb = mp->mace; 659 volatile struct mace __iomem *mb = mp->mace;
661 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 660 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
662 volatile struct dbdma_cmd *cp; 661 volatile struct dbdma_cmd *cp;
@@ -802,7 +801,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
802static void mace_tx_timeout(unsigned long data) 801static void mace_tx_timeout(unsigned long data)
803{ 802{
804 struct net_device *dev = (struct net_device *) data; 803 struct net_device *dev = (struct net_device *) data;
805 struct mace_data *mp = (struct mace_data *) dev->priv; 804 struct mace_data *mp = netdev_priv(dev);
806 volatile struct mace __iomem *mb = mp->mace; 805 volatile struct mace __iomem *mb = mp->mace;
807 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 806 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
808 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 807 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
@@ -873,7 +872,7 @@ static irqreturn_t mace_txdma_intr(int irq, void *dev_id)
873static irqreturn_t mace_rxdma_intr(int irq, void *dev_id) 872static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
874{ 873{
875 struct net_device *dev = (struct net_device *) dev_id; 874 struct net_device *dev = (struct net_device *) dev_id;
876 struct mace_data *mp = (struct mace_data *) dev->priv; 875 struct mace_data *mp = netdev_priv(dev);
877 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; 876 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
878 volatile struct dbdma_cmd *cp, *np; 877 volatile struct dbdma_cmd *cp, *np;
879 int i, nb, stat, next; 878 int i, nb, stat, next;
@@ -929,7 +928,6 @@ static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
929 skb->protocol = eth_type_trans(skb, dev); 928 skb->protocol = eth_type_trans(skb, dev);
930 dev->stats.rx_bytes += skb->len; 929 dev->stats.rx_bytes += skb->len;
931 netif_rx(skb); 930 netif_rx(skb);
932 dev->last_rx = jiffies;
933 mp->rx_bufs[i] = NULL; 931 mp->rx_bufs[i] = NULL;
934 ++dev->stats.rx_packets; 932 ++dev->stats.rx_packets;
935 } 933 }
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 85587a6667b9..274e99bb63ac 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -194,7 +194,6 @@ static int __devinit mace_probe(struct platform_device *pdev)
194 unsigned char checksum = 0; 194 unsigned char checksum = 0;
195 static int found = 0; 195 static int found = 0;
196 int err; 196 int err;
197 DECLARE_MAC_BUF(mac);
198 197
199 if (found || macintosh_config->ether_type != MAC_ETHER_MACE) 198 if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
200 return -ENODEV; 199 return -ENODEV;
@@ -249,8 +248,8 @@ static int __devinit mace_probe(struct platform_device *pdev)
249 dev->set_multicast_list = mace_set_multicast; 248 dev->set_multicast_list = mace_set_multicast;
250 dev->set_mac_address = mace_set_address; 249 dev->set_mac_address = mace_set_address;
251 250
252 printk(KERN_INFO "%s: 68K MACE, hardware address %s\n", 251 printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
253 dev->name, print_mac(mac, dev->dev_addr)); 252 dev->name, dev->dev_addr);
254 253
255 err = register_netdev(dev); 254 err = register_netdev(dev);
256 if (!err) 255 if (!err)
@@ -674,7 +673,6 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
674 673
675 skb->protocol = eth_type_trans(skb, dev); 674 skb->protocol = eth_type_trans(skb, dev);
676 netif_rx(skb); 675 netif_rx(skb);
677 dev->last_rx = jiffies;
678 dev->stats.rx_packets++; 676 dev->stats.rx_packets++;
679 dev->stats.rx_bytes += frame_length; 677 dev->stats.rx_bytes += frame_length;
680 } 678 }
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index e64c2086d33c..205bb05c25d6 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -220,7 +220,6 @@ static int __init mac_onboard_sonic_ethernet_addr(struct net_device *dev)
220 struct sonic_local *lp = netdev_priv(dev); 220 struct sonic_local *lp = netdev_priv(dev);
221 const int prom_addr = ONBOARD_SONIC_PROM_BASE; 221 const int prom_addr = ONBOARD_SONIC_PROM_BASE;
222 int i; 222 int i;
223 DECLARE_MAC_BUF(mac);
224 223
225 /* On NuBus boards we can sometimes look in the ROM resources. 224 /* On NuBus boards we can sometimes look in the ROM resources.
226 No such luck for comm-slot/onboard. */ 225 No such luck for comm-slot/onboard. */
@@ -264,8 +263,8 @@ static int __init mac_onboard_sonic_ethernet_addr(struct net_device *dev)
264 dev->dev_addr[1] = val >> 8; 263 dev->dev_addr[1] = val >> 8;
265 dev->dev_addr[0] = val & 0xff; 264 dev->dev_addr[0] = val & 0xff;
266 265
267 printk(KERN_INFO "HW Address from CAM 15: %s\n", 266 printk(KERN_INFO "HW Address from CAM 15: %pM\n",
268 print_mac(mac, dev->dev_addr)); 267 dev->dev_addr);
269 } else return 0; 268 } else return 0;
270 269
271 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) && 270 if (memcmp(dev->dev_addr, "\x08\x00\x07", 3) &&
@@ -560,7 +559,6 @@ static int __init mac_sonic_probe(struct platform_device *pdev)
560 struct net_device *dev; 559 struct net_device *dev;
561 struct sonic_local *lp; 560 struct sonic_local *lp;
562 int err; 561 int err;
563 DECLARE_MAC_BUF(mac);
564 562
565 dev = alloc_etherdev(sizeof(struct sonic_local)); 563 dev = alloc_etherdev(sizeof(struct sonic_local));
566 if (!dev) 564 if (!dev)
@@ -584,8 +582,7 @@ found:
584 if (err) 582 if (err)
585 goto out; 583 goto out;
586 584
587 printk("%s: MAC %s IRQ %d\n", 585 printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq);
588 dev->name, print_mac(mac, dev->dev_addr), dev->irq);
589 586
590 return 0; 587 return 0;
591 588
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 590039cbb146..7e24b5048686 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -87,7 +87,6 @@ static void macvlan_broadcast(struct sk_buff *skb,
87 dev->stats.rx_bytes += skb->len + ETH_HLEN; 87 dev->stats.rx_bytes += skb->len + ETH_HLEN;
88 dev->stats.rx_packets++; 88 dev->stats.rx_packets++;
89 dev->stats.multicast++; 89 dev->stats.multicast++;
90 dev->last_rx = jiffies;
91 90
92 nskb->dev = dev; 91 nskb->dev = dev;
93 if (!compare_ether_addr(eth->h_dest, dev->broadcast)) 92 if (!compare_ether_addr(eth->h_dest, dev->broadcast))
@@ -136,7 +135,6 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
136 135
137 dev->stats.rx_bytes += skb->len + ETH_HLEN; 136 dev->stats.rx_bytes += skb->len + ETH_HLEN;
138 dev->stats.rx_packets++; 137 dev->stats.rx_packets++;
139 dev->last_rx = jiffies;
140 138
141 skb->dev = dev; 139 skb->dev = dev;
142 skb->pkt_type = PACKET_HOST; 140 skb->pkt_type = PACKET_HOST;
@@ -145,7 +143,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
145 return NULL; 143 return NULL;
146} 144}
147 145
148static int macvlan_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 146static int macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
149{ 147{
150 const struct macvlan_dev *vlan = netdev_priv(dev); 148 const struct macvlan_dev *vlan = netdev_priv(dev);
151 unsigned int len = skb->len; 149 unsigned int len = skb->len;
@@ -336,24 +334,53 @@ static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
336 return lowerdev->ethtool_ops->get_rx_csum(lowerdev); 334 return lowerdev->ethtool_ops->get_rx_csum(lowerdev);
337} 335}
338 336
337static int macvlan_ethtool_get_settings(struct net_device *dev,
338 struct ethtool_cmd *cmd)
339{
340 const struct macvlan_dev *vlan = netdev_priv(dev);
341 struct net_device *lowerdev = vlan->lowerdev;
342
343 if (!lowerdev->ethtool_ops->get_settings)
344 return -EOPNOTSUPP;
345
346 return lowerdev->ethtool_ops->get_settings(lowerdev, cmd);
347}
348
349static u32 macvlan_ethtool_get_flags(struct net_device *dev)
350{
351 const struct macvlan_dev *vlan = netdev_priv(dev);
352 struct net_device *lowerdev = vlan->lowerdev;
353
354 if (!lowerdev->ethtool_ops->get_flags)
355 return 0;
356 return lowerdev->ethtool_ops->get_flags(lowerdev);
357}
358
339static const struct ethtool_ops macvlan_ethtool_ops = { 359static const struct ethtool_ops macvlan_ethtool_ops = {
340 .get_link = ethtool_op_get_link, 360 .get_link = ethtool_op_get_link,
361 .get_settings = macvlan_ethtool_get_settings,
341 .get_rx_csum = macvlan_ethtool_get_rx_csum, 362 .get_rx_csum = macvlan_ethtool_get_rx_csum,
342 .get_drvinfo = macvlan_ethtool_get_drvinfo, 363 .get_drvinfo = macvlan_ethtool_get_drvinfo,
364 .get_flags = macvlan_ethtool_get_flags,
365};
366
367static const struct net_device_ops macvlan_netdev_ops = {
368 .ndo_init = macvlan_init,
369 .ndo_open = macvlan_open,
370 .ndo_stop = macvlan_stop,
371 .ndo_start_xmit = macvlan_start_xmit,
372 .ndo_change_mtu = macvlan_change_mtu,
373 .ndo_change_rx_flags = macvlan_change_rx_flags,
374 .ndo_set_mac_address = macvlan_set_mac_address,
375 .ndo_set_multicast_list = macvlan_set_multicast_list,
376 .ndo_validate_addr = eth_validate_addr,
343}; 377};
344 378
345static void macvlan_setup(struct net_device *dev) 379static void macvlan_setup(struct net_device *dev)
346{ 380{
347 ether_setup(dev); 381 ether_setup(dev);
348 382
349 dev->init = macvlan_init; 383 dev->netdev_ops = &macvlan_netdev_ops;
350 dev->open = macvlan_open;
351 dev->stop = macvlan_stop;
352 dev->change_mtu = macvlan_change_mtu;
353 dev->change_rx_flags = macvlan_change_rx_flags;
354 dev->set_mac_address = macvlan_set_mac_address;
355 dev->set_multicast_list = macvlan_set_multicast_list;
356 dev->hard_start_xmit = macvlan_hard_start_xmit;
357 dev->destructor = free_netdev; 384 dev->destructor = free_netdev;
358 dev->header_ops = &macvlan_hard_header_ops, 385 dev->header_ops = &macvlan_hard_header_ops,
359 dev->ethtool_ops = &macvlan_ethtool_ops; 386 dev->ethtool_ops = &macvlan_ethtool_ops;
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index a1e22ed1f6ee..c336a1f42510 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -94,10 +94,9 @@ char o2meth_eaddr[8]={0,0,0,0,0,0,0,0};
94static inline void load_eaddr(struct net_device *dev) 94static inline void load_eaddr(struct net_device *dev)
95{ 95{
96 int i; 96 int i;
97 DECLARE_MAC_BUF(mac);
98 u64 macaddr; 97 u64 macaddr;
99 98
100 DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); 99 DPRINTK("Loading MAC Address: %pM\n", dev->dev_addr);
101 macaddr = 0; 100 macaddr = 0;
102 for (i = 0; i < 6; i++) 101 for (i = 0; i < 6; i++)
103 macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); 102 macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
@@ -421,7 +420,6 @@ static void meth_rx(struct net_device* dev, unsigned long int_status)
421 skb_put(skb_c, len); 420 skb_put(skb_c, len);
422 priv->rx_skbs[priv->rx_write] = skb; 421 priv->rx_skbs[priv->rx_write] = skb;
423 skb_c->protocol = eth_type_trans(skb_c, dev); 422 skb_c->protocol = eth_type_trans(skb_c, dev);
424 dev->last_rx = jiffies;
425 dev->stats.rx_packets++; 423 dev->stats.rx_packets++;
426 dev->stats.rx_bytes += len; 424 dev->stats.rx_bytes += len;
427 netif_rx(skb_c); 425 netif_rx(skb_c);
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 674f836e225b..91f50de84be9 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -71,6 +71,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
71 err = mlx4_en_map_buffer(&cq->wqres.buf); 71 err = mlx4_en_map_buffer(&cq->wqres.buf);
72 if (err) 72 if (err)
73 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 73 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
74 else
75 cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
74 76
75 return err; 77 return err;
76} 78}
@@ -85,7 +87,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
85 cq->mcq.arm_db = cq->wqres.db.db + 1; 87 cq->mcq.arm_db = cq->wqres.db.db + 1;
86 *cq->mcq.set_ci_db = 0; 88 *cq->mcq.set_ci_db = 0;
87 *cq->mcq.arm_db = 0; 89 *cq->mcq.arm_db = 0;
88 cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
89 memset(cq->buf, 0, cq->buf_size); 90 memset(cq->buf, 0, cq->buf_size);
90 91
91 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, 92 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
@@ -139,7 +140,6 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
139 140
140int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 141int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
141{ 142{
142 cq->armed = 1;
143 mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, 143 mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
144 &priv->mdev->uar_lock); 144 &priv->mdev->uar_lock);
145 145
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 96e709d6440a..ebada3c7aff2 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -369,7 +369,6 @@ static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
369 369
370static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 370static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
371{ 371{
372 struct mlx4_en_dev *mdev = priv->mdev;
373 struct mlx4_en_cq *cq; 372 struct mlx4_en_cq *cq;
374 int i; 373 int i;
375 374
@@ -379,15 +378,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
379 * satisfy our coelsing target. 378 * satisfy our coelsing target.
380 * - moder_time is set to a fixed value. 379 * - moder_time is set to a fixed value.
381 */ 380 */
382 priv->rx_frames = (mdev->profile.rx_moder_cnt == 381 priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->mtu + 1;
383 MLX4_EN_AUTO_CONF) ? 382 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
384 MLX4_EN_RX_COAL_TARGET /
385 priv->dev->mtu + 1 :
386 mdev->profile.rx_moder_cnt;
387 priv->rx_usecs = (mdev->profile.rx_moder_time ==
388 MLX4_EN_AUTO_CONF) ?
389 MLX4_EN_RX_COAL_TIME :
390 mdev->profile.rx_moder_time;
391 mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 383 mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
392 "rx_frames:%d rx_usecs:%d\n", 384 "rx_frames:%d rx_usecs:%d\n",
393 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 385 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
@@ -411,7 +403,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
411 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 403 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
412 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 404 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
413 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 405 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
414 priv->adaptive_rx_coal = mdev->profile.auto_moder; 406 priv->adaptive_rx_coal = 1;
415 priv->last_moder_time = MLX4_EN_AUTO_CONF; 407 priv->last_moder_time = MLX4_EN_AUTO_CONF;
416 priv->last_moder_jiffies = 0; 408 priv->last_moder_jiffies = 0;
417 priv->last_moder_packets = 0; 409 priv->last_moder_packets = 0;
@@ -953,6 +945,23 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
953 return 0; 945 return 0;
954} 946}
955 947
948static const struct net_device_ops mlx4_netdev_ops = {
949 .ndo_open = mlx4_en_open,
950 .ndo_stop = mlx4_en_close,
951 .ndo_start_xmit = mlx4_en_xmit,
952 .ndo_get_stats = mlx4_en_get_stats,
953 .ndo_set_multicast_list = mlx4_en_set_multicast,
954 .ndo_set_mac_address = mlx4_en_set_mac,
955 .ndo_change_mtu = mlx4_en_change_mtu,
956 .ndo_tx_timeout = mlx4_en_tx_timeout,
957 .ndo_vlan_rx_register = mlx4_en_vlan_rx_register,
958 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
959 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
960#ifdef CONFIG_NET_POLL_CONTROLLER
961 .ndo_poll_controller = mlx4_en_netpoll,
962#endif
963};
964
956int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 965int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
957 struct mlx4_en_port_profile *prof) 966 struct mlx4_en_port_profile *prof)
958{ 967{
@@ -1029,22 +1038,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1029 /* 1038 /*
1030 * Initialize netdev entry points 1039 * Initialize netdev entry points
1031 */ 1040 */
1032 1041 dev->netdev_ops = &mlx4_netdev_ops;
1033 dev->open = &mlx4_en_open;
1034 dev->stop = &mlx4_en_close;
1035 dev->hard_start_xmit = &mlx4_en_xmit;
1036 dev->get_stats = &mlx4_en_get_stats;
1037 dev->set_multicast_list = &mlx4_en_set_multicast;
1038 dev->set_mac_address = &mlx4_en_set_mac;
1039 dev->change_mtu = &mlx4_en_change_mtu;
1040 dev->tx_timeout = &mlx4_en_tx_timeout;
1041 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 1042 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1042 dev->vlan_rx_register = mlx4_en_vlan_rx_register; 1043
1043 dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid;
1044 dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid;
1045#ifdef CONFIG_NET_POLL_CONTROLLER
1046 dev->poll_controller = mlx4_en_netpoll;
1047#endif
1048 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1044 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1049 1045
1050 /* Set defualt MAC */ 1046 /* Set defualt MAC */
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
index 95706ee1c019..047b37f5a747 100644
--- a/drivers/net/mlx4/en_params.c
+++ b/drivers/net/mlx4/en_params.c
@@ -60,24 +60,11 @@ MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
60 "Number of LRO sessions per ring or disabled (0)"); 60 "Number of LRO sessions per ring or disabled (0)");
61 61
62/* Priority pausing */ 62/* Priority pausing */
63MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE,
64 "Pause policy on TX: 0 never generate pause frames "
65 "1 generate pause frames according to RX buffer threshold");
66MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE,
67 "Pause policy on RX: 0 ignore received pause frames "
68 "1 respect received pause frames");
69MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]." 63MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
70 " Per priority bit mask"); 64 " Per priority bit mask");
71MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." 65MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
72 " Per priority bit mask"); 66 " Per priority bit mask");
73 67
74/* Interrupt moderation tunning */
75MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF,
76 "Max coalesced descriptors for Rx interrupt moderation");
77MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF,
78 "Timeout following last packet for Rx interrupt moderation");
79MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation");
80
81MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)"); 68MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
82MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)"); 69MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
83 70
@@ -92,16 +79,13 @@ int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
92 struct mlx4_en_profile *params = &mdev->profile; 79 struct mlx4_en_profile *params = &mdev->profile;
93 int i; 80 int i;
94 81
95 params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF);
96 params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF);
97 params->auto_moder = auto_moder;
98 params->rss_xor = (rss_xor != 0); 82 params->rss_xor = (rss_xor != 0);
99 params->rss_mask = rss_mask & 0x1f; 83 params->rss_mask = rss_mask & 0x1f;
100 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS); 84 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
101 for (i = 1; i <= MLX4_MAX_PORTS; i++) { 85 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
102 params->prof[i].rx_pause = pprx; 86 params->prof[i].rx_pause = 1;
103 params->prof[i].rx_ppp = pfcrx; 87 params->prof[i].rx_ppp = pfcrx;
104 params->prof[i].tx_pause = pptx; 88 params->prof[i].tx_pause = 1;
105 params->prof[i].tx_ppp = pfctx; 89 params->prof[i].tx_ppp = pfctx;
106 } 90 }
107 if (pfcrx || pfctx) { 91 if (pfcrx || pfctx) {
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 6232227f56c3..c61b0bdca1a4 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -443,7 +443,8 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
443 /* Fill Rx buffers */ 443 /* Fill Rx buffers */
444 ring->full = 0; 444 ring->full = 0;
445 } 445 }
446 if (mlx4_en_fill_rx_buffers(priv)) 446 err = mlx4_en_fill_rx_buffers(priv);
447 if (err)
447 goto err_buffers; 448 goto err_buffers;
448 449
449 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 450 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
@@ -776,8 +777,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
776 } else 777 } else
777 netif_receive_skb(skb); 778 netif_receive_skb(skb);
778 779
779 dev->last_rx = jiffies;
780
781next: 780next:
782 ++cq->mcq.cons_index; 781 ++cq->mcq.cons_index;
783 index = (cq->mcq.cons_index) & ring->size_mask; 782 index = (cq->mcq.cons_index) & ring->size_mask;
@@ -815,7 +814,7 @@ void mlx4_en_rx_irq(struct mlx4_cq *mcq)
815 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 814 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
816 815
817 if (priv->port_up) 816 if (priv->port_up)
818 netif_rx_schedule(cq->dev, &cq->napi); 817 netif_rx_schedule(&cq->napi);
819 else 818 else
820 mlx4_en_arm_cq(priv, cq); 819 mlx4_en_arm_cq(priv, cq);
821} 820}
@@ -835,7 +834,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
835 INC_PERF_COUNTER(priv->pstats.napi_quota); 834 INC_PERF_COUNTER(priv->pstats.napi_quota);
836 else { 835 else {
837 /* Done for now */ 836 /* Done for now */
838 netif_rx_complete(dev, napi); 837 netif_rx_complete(napi);
839 mlx4_en_arm_cq(priv, cq); 838 mlx4_en_arm_cq(priv, cq);
840 } 839 }
841 return done; 840 return done;
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 8592f8fb8475..ff4d75205c25 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -379,8 +379,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
379 379
380 /* Wakeup Tx queue if this ring stopped it */ 380 /* Wakeup Tx queue if this ring stopped it */
381 if (unlikely(ring->blocked)) { 381 if (unlikely(ring->blocked)) {
382 if (((u32) (ring->prod - ring->cons) <= 382 if ((u32) (ring->prod - ring->cons) <=
383 ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) { 383 ring->size - HEADROOM - MAX_DESC_TXBBS) {
384 384
385 /* TODO: support multiqueue netdevs. Currently, we block 385 /* TODO: support multiqueue netdevs. Currently, we block
386 * when *any* ring is full. Note that: 386 * when *any* ring is full. Note that:
@@ -404,14 +404,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
404 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 404 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
405 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 405 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
406 406
407 spin_lock_irq(&ring->comp_lock); 407 if (!spin_trylock(&ring->comp_lock))
408 cq->armed = 0; 408 return;
409 mlx4_en_process_tx_cq(cq->dev, cq); 409 mlx4_en_process_tx_cq(cq->dev, cq);
410 if (ring->blocked) 410 mod_timer(&cq->timer, jiffies + 1);
411 mlx4_en_arm_cq(priv, cq); 411 spin_unlock(&ring->comp_lock);
412 else
413 mod_timer(&cq->timer, jiffies + 1);
414 spin_unlock_irq(&ring->comp_lock);
415} 412}
416 413
417 414
@@ -424,8 +421,10 @@ void mlx4_en_poll_tx_cq(unsigned long data)
424 421
425 INC_PERF_COUNTER(priv->pstats.tx_poll); 422 INC_PERF_COUNTER(priv->pstats.tx_poll);
426 423
427 netif_tx_lock(priv->dev); 424 if (!spin_trylock(&ring->comp_lock)) {
428 spin_lock_irq(&ring->comp_lock); 425 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
426 return;
427 }
429 mlx4_en_process_tx_cq(cq->dev, cq); 428 mlx4_en_process_tx_cq(cq->dev, cq);
430 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); 429 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
431 430
@@ -435,8 +434,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
435 if (inflight && priv->port_up) 434 if (inflight && priv->port_up)
436 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 435 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
437 436
438 spin_unlock_irq(&ring->comp_lock); 437 spin_unlock(&ring->comp_lock);
439 netif_tx_unlock(priv->dev);
440} 438}
441 439
442static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 440static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
@@ -479,7 +477,10 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
479 477
480 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 478 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
481 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 479 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
482 mlx4_en_process_tx_cq(priv->dev, cq); 480 if (spin_trylock(&ring->comp_lock)) {
481 mlx4_en_process_tx_cq(priv->dev, cq);
482 spin_unlock(&ring->comp_lock);
483 }
483} 484}
484 485
485static void *get_frag_ptr(struct sk_buff *skb) 486static void *get_frag_ptr(struct sk_buff *skb)
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index 592c01ae2c5d..6053c357a470 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -118,17 +118,7 @@ static int find_mgm(struct mlx4_dev *dev,
118 return err; 118 return err;
119 119
120 if (0) 120 if (0)
121 mlx4_dbg(dev, "Hash for %04x:%04x:%04x:%04x:" 121 mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
122 "%04x:%04x:%04x:%04x is %04x\n",
123 be16_to_cpu(((__be16 *) gid)[0]),
124 be16_to_cpu(((__be16 *) gid)[1]),
125 be16_to_cpu(((__be16 *) gid)[2]),
126 be16_to_cpu(((__be16 *) gid)[3]),
127 be16_to_cpu(((__be16 *) gid)[4]),
128 be16_to_cpu(((__be16 *) gid)[5]),
129 be16_to_cpu(((__be16 *) gid)[6]),
130 be16_to_cpu(((__be16 *) gid)[7]),
131 *hash);
132 122
133 *index = *hash; 123 *index = *hash;
134 *prev = -1; 124 *prev = -1;
@@ -215,7 +205,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
215 205
216 if (block_mcast_loopback) 206 if (block_mcast_loopback)
217 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | 207 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
218 (1 << MGM_BLCK_LB_BIT)); 208 (1U << MGM_BLCK_LB_BIT));
219 else 209 else
220 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 210 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
221 211
@@ -277,16 +267,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16])
277 goto out; 267 goto out;
278 268
279 if (index == -1) { 269 if (index == -1) {
280 mlx4_err(dev, "MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " 270 mlx4_err(dev, "MGID %pI6 not found\n", gid);
281 "not found\n",
282 be16_to_cpu(((__be16 *) gid)[0]),
283 be16_to_cpu(((__be16 *) gid)[1]),
284 be16_to_cpu(((__be16 *) gid)[2]),
285 be16_to_cpu(((__be16 *) gid)[3]),
286 be16_to_cpu(((__be16 *) gid)[4]),
287 be16_to_cpu(((__be16 *) gid)[5]),
288 be16_to_cpu(((__be16 *) gid)[6]),
289 be16_to_cpu(((__be16 *) gid)[7]));
290 err = -EINVAL; 271 err = -EINVAL;
291 goto out; 272 goto out;
292 } 273 }
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 98ddc0811f93..e78209768def 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -58,17 +58,17 @@
58#define mlx4_dbg(mlevel, priv, format, arg...) \ 58#define mlx4_dbg(mlevel, priv, format, arg...) \
59 if (NETIF_MSG_##mlevel & priv->msg_enable) \ 59 if (NETIF_MSG_##mlevel & priv->msg_enable) \
60 printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\ 60 printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\
61 (&priv->mdev->pdev->dev)->bus_id , ## arg) 61 (dev_name(&priv->mdev->pdev->dev)) , ## arg)
62 62
63#define mlx4_err(mdev, format, arg...) \ 63#define mlx4_err(mdev, format, arg...) \
64 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\ 64 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
65 (&mdev->pdev->dev)->bus_id , ## arg) 65 (dev_name(&mdev->pdev->dev)) , ## arg)
66#define mlx4_info(mdev, format, arg...) \ 66#define mlx4_info(mdev, format, arg...) \
67 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\ 67 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
68 (&mdev->pdev->dev)->bus_id , ## arg) 68 (dev_name(&mdev->pdev->dev)) , ## arg)
69#define mlx4_warn(mdev, format, arg...) \ 69#define mlx4_warn(mdev, format, arg...) \
70 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\ 70 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
71 (&mdev->pdev->dev)->bus_id , ## arg) 71 (dev_name(&mdev->pdev->dev)) , ## arg)
72 72
73/* 73/*
74 * Device constants 74 * Device constants
@@ -311,7 +311,6 @@ struct mlx4_en_cq {
311 enum cq_type is_tx; 311 enum cq_type is_tx;
312 u16 moder_time; 312 u16 moder_time;
313 u16 moder_cnt; 313 u16 moder_cnt;
314 int armed;
315 struct mlx4_cqe *buf; 314 struct mlx4_cqe *buf;
316#define MLX4_EN_OPCODE_ERROR 0x1e 315#define MLX4_EN_OPCODE_ERROR 0x1e
317}; 316};
@@ -334,9 +333,6 @@ struct mlx4_en_profile {
334 u8 rss_mask; 333 u8 rss_mask;
335 u32 active_ports; 334 u32 active_ports;
336 u32 small_pkt_int; 335 u32 small_pkt_int;
337 int rx_moder_cnt;
338 int rx_moder_time;
339 int auto_moder;
340 u8 no_reset; 336 u8 no_reset;
341 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; 337 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
342}; 338};
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index e513f76f2a9f..7253a499d9c8 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -51,8 +51,8 @@
51#include <linux/workqueue.h> 51#include <linux/workqueue.h>
52#include <linux/phy.h> 52#include <linux/phy.h>
53#include <linux/mv643xx_eth.h> 53#include <linux/mv643xx_eth.h>
54#include <asm/io.h> 54#include <linux/io.h>
55#include <asm/types.h> 55#include <linux/types.h>
56#include <asm/system.h> 56#include <asm/system.h>
57 57
58static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 58static char mv643xx_eth_driver_name[] = "mv643xx_eth";
@@ -78,16 +78,17 @@ static char mv643xx_eth_driver_version[] = "1.4";
78#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4)) 78#define WINDOW_PROTECT(w) (0x0294 + ((w) << 4))
79 79
80/* 80/*
81 * Per-port registers. 81 * Main per-port registers. These live at offset 0x0400 for
82 * port #0, 0x0800 for port #1, and 0x0c00 for port #2.
82 */ 83 */
83#define PORT_CONFIG(p) (0x0400 + ((p) << 10)) 84#define PORT_CONFIG 0x0000
84#define UNICAST_PROMISCUOUS_MODE 0x00000001 85#define UNICAST_PROMISCUOUS_MODE 0x00000001
85#define PORT_CONFIG_EXT(p) (0x0404 + ((p) << 10)) 86#define PORT_CONFIG_EXT 0x0004
86#define MAC_ADDR_LOW(p) (0x0414 + ((p) << 10)) 87#define MAC_ADDR_LOW 0x0014
87#define MAC_ADDR_HIGH(p) (0x0418 + ((p) << 10)) 88#define MAC_ADDR_HIGH 0x0018
88#define SDMA_CONFIG(p) (0x041c + ((p) << 10)) 89#define SDMA_CONFIG 0x001c
89#define PORT_SERIAL_CONTROL(p) (0x043c + ((p) << 10)) 90#define PORT_SERIAL_CONTROL 0x003c
90#define PORT_STATUS(p) (0x0444 + ((p) << 10)) 91#define PORT_STATUS 0x0044
91#define TX_FIFO_EMPTY 0x00000400 92#define TX_FIFO_EMPTY 0x00000400
92#define TX_IN_PROGRESS 0x00000080 93#define TX_IN_PROGRESS 0x00000080
93#define PORT_SPEED_MASK 0x00000030 94#define PORT_SPEED_MASK 0x00000030
@@ -97,31 +98,35 @@ static char mv643xx_eth_driver_version[] = "1.4";
97#define FLOW_CONTROL_ENABLED 0x00000008 98#define FLOW_CONTROL_ENABLED 0x00000008
98#define FULL_DUPLEX 0x00000004 99#define FULL_DUPLEX 0x00000004
99#define LINK_UP 0x00000002 100#define LINK_UP 0x00000002
100#define TXQ_COMMAND(p) (0x0448 + ((p) << 10)) 101#define TXQ_COMMAND 0x0048
101#define TXQ_FIX_PRIO_CONF(p) (0x044c + ((p) << 10)) 102#define TXQ_FIX_PRIO_CONF 0x004c
102#define TX_BW_RATE(p) (0x0450 + ((p) << 10)) 103#define TX_BW_RATE 0x0050
103#define TX_BW_MTU(p) (0x0458 + ((p) << 10)) 104#define TX_BW_MTU 0x0058
104#define TX_BW_BURST(p) (0x045c + ((p) << 10)) 105#define TX_BW_BURST 0x005c
105#define INT_CAUSE(p) (0x0460 + ((p) << 10)) 106#define INT_CAUSE 0x0060
106#define INT_TX_END 0x07f80000 107#define INT_TX_END 0x07f80000
107#define INT_RX 0x000003fc 108#define INT_RX 0x000003fc
108#define INT_EXT 0x00000002 109#define INT_EXT 0x00000002
109#define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10)) 110#define INT_CAUSE_EXT 0x0064
110#define INT_EXT_LINK_PHY 0x00110000 111#define INT_EXT_LINK_PHY 0x00110000
111#define INT_EXT_TX 0x000000ff 112#define INT_EXT_TX 0x000000ff
112#define INT_MASK(p) (0x0468 + ((p) << 10)) 113#define INT_MASK 0x0068
113#define INT_MASK_EXT(p) (0x046c + ((p) << 10)) 114#define INT_MASK_EXT 0x006c
114#define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10)) 115#define TX_FIFO_URGENT_THRESHOLD 0x0074
115#define TXQ_FIX_PRIO_CONF_MOVED(p) (0x04dc + ((p) << 10)) 116#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
116#define TX_BW_RATE_MOVED(p) (0x04e0 + ((p) << 10)) 117#define TX_BW_RATE_MOVED 0x00e0
117#define TX_BW_MTU_MOVED(p) (0x04e8 + ((p) << 10)) 118#define TX_BW_MTU_MOVED 0x00e8
118#define TX_BW_BURST_MOVED(p) (0x04ec + ((p) << 10)) 119#define TX_BW_BURST_MOVED 0x00ec
119#define RXQ_CURRENT_DESC_PTR(p, q) (0x060c + ((p) << 10) + ((q) << 4)) 120#define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4))
120#define RXQ_COMMAND(p) (0x0680 + ((p) << 10)) 121#define RXQ_COMMAND 0x0280
121#define TXQ_CURRENT_DESC_PTR(p, q) (0x06c0 + ((p) << 10) + ((q) << 2)) 122#define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2))
122#define TXQ_BW_TOKENS(p, q) (0x0700 + ((p) << 10) + ((q) << 4)) 123#define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4))
123#define TXQ_BW_CONF(p, q) (0x0704 + ((p) << 10) + ((q) << 4)) 124#define TXQ_BW_CONF(q) (0x0304 + ((q) << 4))
124#define TXQ_BW_WRR_CONF(p, q) (0x0708 + ((p) << 10) + ((q) << 4)) 125#define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4))
126
127/*
128 * Misc per-port registers.
129 */
125#define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) 130#define MIB_COUNTERS(p) (0x1000 + ((p) << 7))
126#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) 131#define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10))
127#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) 132#define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10))
@@ -138,14 +143,14 @@ static char mv643xx_eth_driver_version[] = "1.4";
138 143
139#if defined(__BIG_ENDIAN) 144#if defined(__BIG_ENDIAN)
140#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 145#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
141 RX_BURST_SIZE_16_64BIT | \ 146 (RX_BURST_SIZE_16_64BIT | \
142 TX_BURST_SIZE_16_64BIT 147 TX_BURST_SIZE_16_64BIT)
143#elif defined(__LITTLE_ENDIAN) 148#elif defined(__LITTLE_ENDIAN)
144#define PORT_SDMA_CONFIG_DEFAULT_VALUE \ 149#define PORT_SDMA_CONFIG_DEFAULT_VALUE \
145 RX_BURST_SIZE_16_64BIT | \ 150 (RX_BURST_SIZE_16_64BIT | \
146 BLM_RX_NO_SWAP | \ 151 BLM_RX_NO_SWAP | \
147 BLM_TX_NO_SWAP | \ 152 BLM_TX_NO_SWAP | \
148 TX_BURST_SIZE_16_64BIT 153 TX_BURST_SIZE_16_64BIT)
149#else 154#else
150#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined 155#error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined
151#endif 156#endif
@@ -351,6 +356,7 @@ struct tx_queue {
351 356
352struct mv643xx_eth_private { 357struct mv643xx_eth_private {
353 struct mv643xx_eth_shared_private *shared; 358 struct mv643xx_eth_shared_private *shared;
359 void __iomem *base;
354 int port_num; 360 int port_num;
355 361
356 struct net_device *dev; 362 struct net_device *dev;
@@ -401,11 +407,21 @@ static inline u32 rdl(struct mv643xx_eth_private *mp, int offset)
401 return readl(mp->shared->base + offset); 407 return readl(mp->shared->base + offset);
402} 408}
403 409
410static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset)
411{
412 return readl(mp->base + offset);
413}
414
404static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) 415static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
405{ 416{
406 writel(data, mp->shared->base + offset); 417 writel(data, mp->shared->base + offset);
407} 418}
408 419
420static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data)
421{
422 writel(data, mp->base + offset);
423}
424
409 425
410/* rxq/txq helper functions *************************************************/ 426/* rxq/txq helper functions *************************************************/
411static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) 427static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
@@ -421,7 +437,7 @@ static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
421static void rxq_enable(struct rx_queue *rxq) 437static void rxq_enable(struct rx_queue *rxq)
422{ 438{
423 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 439 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
424 wrl(mp, RXQ_COMMAND(mp->port_num), 1 << rxq->index); 440 wrlp(mp, RXQ_COMMAND, 1 << rxq->index);
425} 441}
426 442
427static void rxq_disable(struct rx_queue *rxq) 443static void rxq_disable(struct rx_queue *rxq)
@@ -429,26 +445,25 @@ static void rxq_disable(struct rx_queue *rxq)
429 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 445 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
430 u8 mask = 1 << rxq->index; 446 u8 mask = 1 << rxq->index;
431 447
432 wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8); 448 wrlp(mp, RXQ_COMMAND, mask << 8);
433 while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask) 449 while (rdlp(mp, RXQ_COMMAND) & mask)
434 udelay(10); 450 udelay(10);
435} 451}
436 452
437static void txq_reset_hw_ptr(struct tx_queue *txq) 453static void txq_reset_hw_ptr(struct tx_queue *txq)
438{ 454{
439 struct mv643xx_eth_private *mp = txq_to_mp(txq); 455 struct mv643xx_eth_private *mp = txq_to_mp(txq);
440 int off = TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index);
441 u32 addr; 456 u32 addr;
442 457
443 addr = (u32)txq->tx_desc_dma; 458 addr = (u32)txq->tx_desc_dma;
444 addr += txq->tx_curr_desc * sizeof(struct tx_desc); 459 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
445 wrl(mp, off, addr); 460 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
446} 461}
447 462
448static void txq_enable(struct tx_queue *txq) 463static void txq_enable(struct tx_queue *txq)
449{ 464{
450 struct mv643xx_eth_private *mp = txq_to_mp(txq); 465 struct mv643xx_eth_private *mp = txq_to_mp(txq);
451 wrl(mp, TXQ_COMMAND(mp->port_num), 1 << txq->index); 466 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
452} 467}
453 468
454static void txq_disable(struct tx_queue *txq) 469static void txq_disable(struct tx_queue *txq)
@@ -456,8 +471,8 @@ static void txq_disable(struct tx_queue *txq)
456 struct mv643xx_eth_private *mp = txq_to_mp(txq); 471 struct mv643xx_eth_private *mp = txq_to_mp(txq);
457 u8 mask = 1 << txq->index; 472 u8 mask = 1 << txq->index;
458 473
459 wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8); 474 wrlp(mp, TXQ_COMMAND, mask << 8);
460 while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask) 475 while (rdlp(mp, TXQ_COMMAND) & mask)
461 udelay(10); 476 udelay(10);
462} 477}
463 478
@@ -528,37 +543,38 @@ static int rxq_process(struct rx_queue *rxq, int budget)
528 * on, or the error summary bit is set, the packet needs 543 * on, or the error summary bit is set, the packet needs
529 * to be dropped. 544 * to be dropped.
530 */ 545 */
531 if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 546 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC | ERROR_SUMMARY))
532 (RX_FIRST_DESC | RX_LAST_DESC)) 547 != (RX_FIRST_DESC | RX_LAST_DESC))
533 || (cmd_sts & ERROR_SUMMARY)) { 548 goto err;
534 stats->rx_dropped++;
535
536 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
537 (RX_FIRST_DESC | RX_LAST_DESC)) {
538 if (net_ratelimit())
539 dev_printk(KERN_ERR, &mp->dev->dev,
540 "received packet spanning "
541 "multiple descriptors\n");
542 }
543 549
544 if (cmd_sts & ERROR_SUMMARY) 550 /*
545 stats->rx_errors++; 551 * The -4 is for the CRC in the trailer of the
552 * received packet
553 */
554 skb_put(skb, byte_cnt - 2 - 4);
546 555
547 dev_kfree_skb(skb); 556 if (cmd_sts & LAYER_4_CHECKSUM_OK)
548 } else { 557 skb->ip_summed = CHECKSUM_UNNECESSARY;
549 /* 558 skb->protocol = eth_type_trans(skb, mp->dev);
550 * The -4 is for the CRC in the trailer of the 559 netif_receive_skb(skb);
551 * received packet 560
552 */ 561 continue;
553 skb_put(skb, byte_cnt - 2 - 4); 562
554 563err:
555 if (cmd_sts & LAYER_4_CHECKSUM_OK) 564 stats->rx_dropped++;
556 skb->ip_summed = CHECKSUM_UNNECESSARY; 565
557 skb->protocol = eth_type_trans(skb, mp->dev); 566 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
558 netif_receive_skb(skb); 567 (RX_FIRST_DESC | RX_LAST_DESC)) {
568 if (net_ratelimit())
569 dev_printk(KERN_ERR, &mp->dev->dev,
570 "received packet spanning "
571 "multiple descriptors\n");
559 } 572 }
560 573
561 mp->dev->last_rx = jiffies; 574 if (cmd_sts & ERROR_SUMMARY)
575 stats->rx_errors++;
576
577 dev_kfree_skb(skb);
562 } 578 }
563 579
564 if (rx < budget) 580 if (rx < budget)
@@ -577,6 +593,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
577 struct sk_buff *skb; 593 struct sk_buff *skb;
578 int unaligned; 594 int unaligned;
579 int rx; 595 int rx;
596 struct rx_desc *rx_desc;
580 597
581 skb = __skb_dequeue(&mp->rx_recycle); 598 skb = __skb_dequeue(&mp->rx_recycle);
582 if (skb == NULL) 599 if (skb == NULL)
@@ -599,13 +616,14 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
599 if (rxq->rx_used_desc == rxq->rx_ring_size) 616 if (rxq->rx_used_desc == rxq->rx_ring_size)
600 rxq->rx_used_desc = 0; 617 rxq->rx_used_desc = 0;
601 618
602 rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data, 619 rx_desc = rxq->rx_desc_area + rx;
603 mp->skb_size, DMA_FROM_DEVICE); 620
604 rxq->rx_desc_area[rx].buf_size = mp->skb_size; 621 rx_desc->buf_ptr = dma_map_single(NULL, skb->data,
622 mp->skb_size, DMA_FROM_DEVICE);
623 rx_desc->buf_size = mp->skb_size;
605 rxq->rx_skb[rx] = skb; 624 rxq->rx_skb[rx] = skb;
606 wmb(); 625 wmb();
607 rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA | 626 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
608 RX_ENABLE_INTERRUPT;
609 wmb(); 627 wmb();
610 628
611 /* 629 /*
@@ -638,21 +656,6 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
638 return 0; 656 return 0;
639} 657}
640 658
641static int txq_alloc_desc_index(struct tx_queue *txq)
642{
643 int tx_desc_curr;
644
645 BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
646
647 tx_desc_curr = txq->tx_curr_desc++;
648 if (txq->tx_curr_desc == txq->tx_ring_size)
649 txq->tx_curr_desc = 0;
650
651 BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
652
653 return tx_desc_curr;
654}
655
656static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb) 659static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
657{ 660{
658 int nr_frags = skb_shinfo(skb)->nr_frags; 661 int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -664,7 +667,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
664 struct tx_desc *desc; 667 struct tx_desc *desc;
665 668
666 this_frag = &skb_shinfo(skb)->frags[frag]; 669 this_frag = &skb_shinfo(skb)->frags[frag];
667 tx_index = txq_alloc_desc_index(txq); 670 tx_index = txq->tx_curr_desc++;
671 if (txq->tx_curr_desc == txq->tx_ring_size)
672 txq->tx_curr_desc = 0;
668 desc = &txq->tx_desc_area[tx_index]; 673 desc = &txq->tx_desc_area[tx_index];
669 674
670 /* 675 /*
@@ -746,7 +751,9 @@ no_csum:
746 cmd_sts |= 5 << TX_IHL_SHIFT; 751 cmd_sts |= 5 << TX_IHL_SHIFT;
747 } 752 }
748 753
749 tx_index = txq_alloc_desc_index(txq); 754 tx_index = txq->tx_curr_desc++;
755 if (txq->tx_curr_desc == txq->tx_ring_size)
756 txq->tx_curr_desc = 0;
750 desc = &txq->tx_desc_area[tx_index]; 757 desc = &txq->tx_desc_area[tx_index];
751 758
752 if (nr_frags) { 759 if (nr_frags) {
@@ -831,10 +838,10 @@ static void txq_kick(struct tx_queue *txq)
831 838
832 __netif_tx_lock(nq, smp_processor_id()); 839 __netif_tx_lock(nq, smp_processor_id());
833 840
834 if (rdl(mp, TXQ_COMMAND(mp->port_num)) & (1 << txq->index)) 841 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
835 goto out; 842 goto out;
836 843
837 hw_desc_ptr = rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index)); 844 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
838 expected_ptr = (u32)txq->tx_desc_dma + 845 expected_ptr = (u32)txq->tx_desc_dma +
839 txq->tx_curr_desc * sizeof(struct tx_desc); 846 txq->tx_curr_desc * sizeof(struct tx_desc);
840 847
@@ -941,14 +948,14 @@ static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
941 948
942 switch (mp->shared->tx_bw_control) { 949 switch (mp->shared->tx_bw_control) {
943 case TX_BW_CONTROL_OLD_LAYOUT: 950 case TX_BW_CONTROL_OLD_LAYOUT:
944 wrl(mp, TX_BW_RATE(mp->port_num), token_rate); 951 wrlp(mp, TX_BW_RATE, token_rate);
945 wrl(mp, TX_BW_MTU(mp->port_num), mtu); 952 wrlp(mp, TX_BW_MTU, mtu);
946 wrl(mp, TX_BW_BURST(mp->port_num), bucket_size); 953 wrlp(mp, TX_BW_BURST, bucket_size);
947 break; 954 break;
948 case TX_BW_CONTROL_NEW_LAYOUT: 955 case TX_BW_CONTROL_NEW_LAYOUT:
949 wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate); 956 wrlp(mp, TX_BW_RATE_MOVED, token_rate);
950 wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu); 957 wrlp(mp, TX_BW_MTU_MOVED, mtu);
951 wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size); 958 wrlp(mp, TX_BW_BURST_MOVED, bucket_size);
952 break; 959 break;
953 } 960 }
954} 961}
@@ -967,9 +974,8 @@ static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
967 if (bucket_size > 65535) 974 if (bucket_size > 65535)
968 bucket_size = 65535; 975 bucket_size = 65535;
969 976
970 wrl(mp, TXQ_BW_TOKENS(mp->port_num, txq->index), token_rate << 14); 977 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
971 wrl(mp, TXQ_BW_CONF(mp->port_num, txq->index), 978 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
972 (bucket_size << 10) | token_rate);
973} 979}
974 980
975static void txq_set_fixed_prio_mode(struct tx_queue *txq) 981static void txq_set_fixed_prio_mode(struct tx_queue *txq)
@@ -984,17 +990,17 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
984 off = 0; 990 off = 0;
985 switch (mp->shared->tx_bw_control) { 991 switch (mp->shared->tx_bw_control) {
986 case TX_BW_CONTROL_OLD_LAYOUT: 992 case TX_BW_CONTROL_OLD_LAYOUT:
987 off = TXQ_FIX_PRIO_CONF(mp->port_num); 993 off = TXQ_FIX_PRIO_CONF;
988 break; 994 break;
989 case TX_BW_CONTROL_NEW_LAYOUT: 995 case TX_BW_CONTROL_NEW_LAYOUT:
990 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 996 off = TXQ_FIX_PRIO_CONF_MOVED;
991 break; 997 break;
992 } 998 }
993 999
994 if (off) { 1000 if (off) {
995 val = rdl(mp, off); 1001 val = rdlp(mp, off);
996 val |= 1 << txq->index; 1002 val |= 1 << txq->index;
997 wrl(mp, off, val); 1003 wrlp(mp, off, val);
998 } 1004 }
999} 1005}
1000 1006
@@ -1010,26 +1016,25 @@ static void txq_set_wrr(struct tx_queue *txq, int weight)
1010 off = 0; 1016 off = 0;
1011 switch (mp->shared->tx_bw_control) { 1017 switch (mp->shared->tx_bw_control) {
1012 case TX_BW_CONTROL_OLD_LAYOUT: 1018 case TX_BW_CONTROL_OLD_LAYOUT:
1013 off = TXQ_FIX_PRIO_CONF(mp->port_num); 1019 off = TXQ_FIX_PRIO_CONF;
1014 break; 1020 break;
1015 case TX_BW_CONTROL_NEW_LAYOUT: 1021 case TX_BW_CONTROL_NEW_LAYOUT:
1016 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 1022 off = TXQ_FIX_PRIO_CONF_MOVED;
1017 break; 1023 break;
1018 } 1024 }
1019 1025
1020 if (off) { 1026 if (off) {
1021 val = rdl(mp, off); 1027 val = rdlp(mp, off);
1022 val &= ~(1 << txq->index); 1028 val &= ~(1 << txq->index);
1023 wrl(mp, off, val); 1029 wrlp(mp, off, val);
1024 1030
1025 /* 1031 /*
1026 * Configure WRR weight for this queue. 1032 * Configure WRR weight for this queue.
1027 */ 1033 */
1028 off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
1029 1034
1030 val = rdl(mp, off); 1035 val = rdlp(mp, off);
1031 val = (val & ~0xff) | (weight & 0xff); 1036 val = (val & ~0xff) | (weight & 0xff);
1032 wrl(mp, off, val); 1037 wrlp(mp, TXQ_BW_WRR_CONF(txq->index), val);
1033 } 1038 }
1034} 1039}
1035 1040
@@ -1084,20 +1089,20 @@ static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1084 int ret; 1089 int ret;
1085 1090
1086 if (smi_wait_ready(msp)) { 1091 if (smi_wait_ready(msp)) {
1087 printk("mv643xx_eth: SMI bus busy timeout\n"); 1092 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
1088 return -ETIMEDOUT; 1093 return -ETIMEDOUT;
1089 } 1094 }
1090 1095
1091 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1096 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1092 1097
1093 if (smi_wait_ready(msp)) { 1098 if (smi_wait_ready(msp)) {
1094 printk("mv643xx_eth: SMI bus busy timeout\n"); 1099 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
1095 return -ETIMEDOUT; 1100 return -ETIMEDOUT;
1096 } 1101 }
1097 1102
1098 ret = readl(smi_reg); 1103 ret = readl(smi_reg);
1099 if (!(ret & SMI_READ_VALID)) { 1104 if (!(ret & SMI_READ_VALID)) {
1100 printk("mv643xx_eth: SMI bus read not valid\n"); 1105 printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n");
1101 return -ENODEV; 1106 return -ENODEV;
1102 } 1107 }
1103 1108
@@ -1110,7 +1115,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1110 void __iomem *smi_reg = msp->base + SMI_REG; 1115 void __iomem *smi_reg = msp->base + SMI_REG;
1111 1116
1112 if (smi_wait_ready(msp)) { 1117 if (smi_wait_ready(msp)) {
1113 printk("mv643xx_eth: SMI bus busy timeout\n"); 1118 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
1114 return -ETIMEDOUT; 1119 return -ETIMEDOUT;
1115 } 1120 }
1116 1121
@@ -1118,7 +1123,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1118 (addr << 16) | (val & 0xffff), smi_reg); 1123 (addr << 16) | (val & 0xffff), smi_reg);
1119 1124
1120 if (smi_wait_ready(msp)) { 1125 if (smi_wait_ready(msp)) {
1121 printk("mv643xx_eth: SMI bus busy timeout\n"); 1126 printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n");
1122 return -ETIMEDOUT; 1127 return -ETIMEDOUT;
1123 } 1128 }
1124 1129
@@ -1271,7 +1276,8 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
1271 MIBSTAT(late_collision), 1276 MIBSTAT(late_collision),
1272}; 1277};
1273 1278
1274static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1279static int
1280mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1275{ 1281{
1276 struct mv643xx_eth_private *mp = netdev_priv(dev); 1282 struct mv643xx_eth_private *mp = netdev_priv(dev);
1277 int err; 1283 int err;
@@ -1289,12 +1295,14 @@ static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *
1289 return err; 1295 return err;
1290} 1296}
1291 1297
1292static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd) 1298static int
1299mv643xx_eth_get_settings_phyless(struct net_device *dev,
1300 struct ethtool_cmd *cmd)
1293{ 1301{
1294 struct mv643xx_eth_private *mp = netdev_priv(dev); 1302 struct mv643xx_eth_private *mp = netdev_priv(dev);
1295 u32 port_status; 1303 u32 port_status;
1296 1304
1297 port_status = rdl(mp, PORT_STATUS(mp->port_num)); 1305 port_status = rdlp(mp, PORT_STATUS);
1298 1306
1299 cmd->supported = SUPPORTED_MII; 1307 cmd->supported = SUPPORTED_MII;
1300 cmd->advertising = ADVERTISED_MII; 1308 cmd->advertising = ADVERTISED_MII;
@@ -1323,7 +1331,8 @@ static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethto
1323 return 0; 1331 return 0;
1324} 1332}
1325 1333
1326static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1334static int
1335mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1327{ 1336{
1328 struct mv643xx_eth_private *mp = netdev_priv(dev); 1337 struct mv643xx_eth_private *mp = netdev_priv(dev);
1329 1338
@@ -1335,7 +1344,9 @@ static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *
1335 return phy_ethtool_sset(mp->phy, cmd); 1344 return phy_ethtool_sset(mp->phy, cmd);
1336} 1345}
1337 1346
1338static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd) 1347static int
1348mv643xx_eth_set_settings_phyless(struct net_device *dev,
1349 struct ethtool_cmd *cmd)
1339{ 1350{
1340 return -EINVAL; 1351 return -EINVAL;
1341} 1352}
@@ -1443,11 +1454,8 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
1443/* address handling *********************************************************/ 1454/* address handling *********************************************************/
1444static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) 1455static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1445{ 1456{
1446 unsigned int mac_h; 1457 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH);
1447 unsigned int mac_l; 1458 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW);
1448
1449 mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num));
1450 mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num));
1451 1459
1452 addr[0] = (mac_h >> 24) & 0xff; 1460 addr[0] = (mac_h >> 24) & 0xff;
1453 addr[1] = (mac_h >> 16) & 0xff; 1461 addr[1] = (mac_h >> 16) & 0xff;
@@ -1457,57 +1465,71 @@ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1457 addr[5] = mac_l & 0xff; 1465 addr[5] = mac_l & 0xff;
1458} 1466}
1459 1467
1460static void init_mac_tables(struct mv643xx_eth_private *mp) 1468static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1461{ 1469{
1462 int i; 1470 wrlp(mp, MAC_ADDR_HIGH,
1463 1471 (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
1464 for (i = 0; i < 0x100; i += 4) { 1472 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]);
1465 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
1466 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
1467 }
1468
1469 for (i = 0; i < 0x10; i += 4)
1470 wrl(mp, UNICAST_TABLE(mp->port_num) + i, 0);
1471} 1473}
1472 1474
1473static void set_filter_table_entry(struct mv643xx_eth_private *mp, 1475static u32 uc_addr_filter_mask(struct net_device *dev)
1474 int table, unsigned char entry)
1475{ 1476{
1476 unsigned int table_reg; 1477 struct dev_addr_list *uc_ptr;
1477 1478 u32 nibbles;
1478 /* Set "accepts frame bit" at specified table entry */
1479 table_reg = rdl(mp, table + (entry & 0xfc));
1480 table_reg |= 0x01 << (8 * (entry & 3));
1481 wrl(mp, table + (entry & 0xfc), table_reg);
1482}
1483 1479
1484static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) 1480 if (dev->flags & IFF_PROMISC)
1485{ 1481 return 0;
1486 unsigned int mac_h;
1487 unsigned int mac_l;
1488 int table;
1489 1482
1490 mac_l = (addr[4] << 8) | addr[5]; 1483 nibbles = 1 << (dev->dev_addr[5] & 0x0f);
1491 mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; 1484 for (uc_ptr = dev->uc_list; uc_ptr != NULL; uc_ptr = uc_ptr->next) {
1485 if (memcmp(dev->dev_addr, uc_ptr->da_addr, 5))
1486 return 0;
1487 if ((dev->dev_addr[5] ^ uc_ptr->da_addr[5]) & 0xf0)
1488 return 0;
1492 1489
1493 wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l); 1490 nibbles |= 1 << (uc_ptr->da_addr[5] & 0x0f);
1494 wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h); 1491 }
1495 1492
1496 table = UNICAST_TABLE(mp->port_num); 1493 return nibbles;
1497 set_filter_table_entry(mp, table, addr[5] & 0x0f);
1498} 1494}
1499 1495
1500static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) 1496static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
1501{ 1497{
1502 struct mv643xx_eth_private *mp = netdev_priv(dev); 1498 struct mv643xx_eth_private *mp = netdev_priv(dev);
1499 u32 port_config;
1500 u32 nibbles;
1501 int i;
1503 1502
1504 /* +2 is for the offset of the HW addr type */
1505 memcpy(dev->dev_addr, addr + 2, 6);
1506
1507 init_mac_tables(mp);
1508 uc_addr_set(mp, dev->dev_addr); 1503 uc_addr_set(mp, dev->dev_addr);
1509 1504
1510 return 0; 1505 port_config = rdlp(mp, PORT_CONFIG);
1506 nibbles = uc_addr_filter_mask(dev);
1507 if (!nibbles) {
1508 port_config |= UNICAST_PROMISCUOUS_MODE;
1509 wrlp(mp, PORT_CONFIG, port_config);
1510 return;
1511 }
1512
1513 for (i = 0; i < 16; i += 4) {
1514 int off = UNICAST_TABLE(mp->port_num) + i;
1515 u32 v;
1516
1517 v = 0;
1518 if (nibbles & 1)
1519 v |= 0x00000001;
1520 if (nibbles & 2)
1521 v |= 0x00000100;
1522 if (nibbles & 4)
1523 v |= 0x00010000;
1524 if (nibbles & 8)
1525 v |= 0x01000000;
1526 nibbles >>= 4;
1527
1528 wrl(mp, off, v);
1529 }
1530
1531 port_config &= ~UNICAST_PROMISCUOUS_MODE;
1532 wrlp(mp, PORT_CONFIG, port_config);
1511} 1533}
1512 1534
1513static int addr_crc(unsigned char *addr) 1535static int addr_crc(unsigned char *addr)
@@ -1528,24 +1550,22 @@ static int addr_crc(unsigned char *addr)
1528 return crc; 1550 return crc;
1529} 1551}
1530 1552
1531static void mv643xx_eth_set_rx_mode(struct net_device *dev) 1553static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1532{ 1554{
1533 struct mv643xx_eth_private *mp = netdev_priv(dev); 1555 struct mv643xx_eth_private *mp = netdev_priv(dev);
1534 u32 port_config; 1556 u32 *mc_spec;
1557 u32 *mc_other;
1535 struct dev_addr_list *addr; 1558 struct dev_addr_list *addr;
1536 int i; 1559 int i;
1537 1560
1538 port_config = rdl(mp, PORT_CONFIG(mp->port_num));
1539 if (dev->flags & IFF_PROMISC)
1540 port_config |= UNICAST_PROMISCUOUS_MODE;
1541 else
1542 port_config &= ~UNICAST_PROMISCUOUS_MODE;
1543 wrl(mp, PORT_CONFIG(mp->port_num), port_config);
1544
1545 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1561 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1546 int port_num = mp->port_num; 1562 int port_num;
1547 u32 accept = 0x01010101; 1563 u32 accept;
1564 int i;
1548 1565
1566oom:
1567 port_num = mp->port_num;
1568 accept = 0x01010101;
1549 for (i = 0; i < 0x100; i += 4) { 1569 for (i = 0; i < 0x100; i += 4) {
1550 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); 1570 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
1551 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); 1571 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
@@ -1553,28 +1573,55 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1553 return; 1573 return;
1554 } 1574 }
1555 1575
1556 for (i = 0; i < 0x100; i += 4) { 1576 mc_spec = kmalloc(0x200, GFP_KERNEL);
1557 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0); 1577 if (mc_spec == NULL)
1558 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0); 1578 goto oom;
1559 } 1579 mc_other = mc_spec + (0x100 >> 2);
1580
1581 memset(mc_spec, 0, 0x100);
1582 memset(mc_other, 0, 0x100);
1560 1583
1561 for (addr = dev->mc_list; addr != NULL; addr = addr->next) { 1584 for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
1562 u8 *a = addr->da_addr; 1585 u8 *a = addr->da_addr;
1563 int table; 1586 u32 *table;
1564 1587 int entry;
1565 if (addr->da_addrlen != 6)
1566 continue;
1567 1588
1568 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { 1589 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
1569 table = SPECIAL_MCAST_TABLE(mp->port_num); 1590 table = mc_spec;
1570 set_filter_table_entry(mp, table, a[5]); 1591 entry = a[5];
1571 } else { 1592 } else {
1572 int crc = addr_crc(a); 1593 table = mc_other;
1573 1594 entry = addr_crc(a);
1574 table = OTHER_MCAST_TABLE(mp->port_num);
1575 set_filter_table_entry(mp, table, crc);
1576 } 1595 }
1596
1597 table[entry >> 2] |= 1 << (entry & 3);
1577 } 1598 }
1599
1600 for (i = 0; i < 0x100; i += 4) {
1601 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]);
1602 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]);
1603 }
1604
1605 kfree(mc_spec);
1606}
1607
1608static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1609{
1610 mv643xx_eth_program_unicast_filter(dev);
1611 mv643xx_eth_program_multicast_filter(dev);
1612}
1613
1614static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1615{
1616 struct sockaddr *sa = addr;
1617
1618 memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
1619
1620 netif_addr_lock_bh(dev);
1621 mv643xx_eth_program_unicast_filter(dev);
1622 netif_addr_unlock_bh(dev);
1623
1624 return 0;
1578} 1625}
1579 1626
1580 1627
@@ -1758,26 +1805,25 @@ static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
1758 u32 int_cause; 1805 u32 int_cause;
1759 u32 int_cause_ext; 1806 u32 int_cause_ext;
1760 1807
1761 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & 1808 int_cause = rdlp(mp, INT_CAUSE) & (INT_TX_END | INT_RX | INT_EXT);
1762 (INT_TX_END | INT_RX | INT_EXT);
1763 if (int_cause == 0) 1809 if (int_cause == 0)
1764 return 0; 1810 return 0;
1765 1811
1766 int_cause_ext = 0; 1812 int_cause_ext = 0;
1767 if (int_cause & INT_EXT) 1813 if (int_cause & INT_EXT)
1768 int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num)); 1814 int_cause_ext = rdlp(mp, INT_CAUSE_EXT);
1769 1815
1770 int_cause &= INT_TX_END | INT_RX; 1816 int_cause &= INT_TX_END | INT_RX;
1771 if (int_cause) { 1817 if (int_cause) {
1772 wrl(mp, INT_CAUSE(mp->port_num), ~int_cause); 1818 wrlp(mp, INT_CAUSE, ~int_cause);
1773 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & 1819 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
1774 ~(rdl(mp, TXQ_COMMAND(mp->port_num)) & 0xff); 1820 ~(rdlp(mp, TXQ_COMMAND) & 0xff);
1775 mp->work_rx |= (int_cause & INT_RX) >> 2; 1821 mp->work_rx |= (int_cause & INT_RX) >> 2;
1776 } 1822 }
1777 1823
1778 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX; 1824 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
1779 if (int_cause_ext) { 1825 if (int_cause_ext) {
1780 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext); 1826 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext);
1781 if (int_cause_ext & INT_EXT_LINK_PHY) 1827 if (int_cause_ext & INT_EXT_LINK_PHY)
1782 mp->work_link = 1; 1828 mp->work_link = 1;
1783 mp->work_tx |= int_cause_ext & INT_EXT_TX; 1829 mp->work_tx |= int_cause_ext & INT_EXT_TX;
@@ -1794,7 +1840,7 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1794 if (unlikely(!mv643xx_eth_collect_events(mp))) 1840 if (unlikely(!mv643xx_eth_collect_events(mp)))
1795 return IRQ_NONE; 1841 return IRQ_NONE;
1796 1842
1797 wrl(mp, INT_MASK(mp->port_num), 0); 1843 wrlp(mp, INT_MASK, 0);
1798 napi_schedule(&mp->napi); 1844 napi_schedule(&mp->napi);
1799 1845
1800 return IRQ_HANDLED; 1846 return IRQ_HANDLED;
@@ -1808,7 +1854,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
1808 int duplex; 1854 int duplex;
1809 int fc; 1855 int fc;
1810 1856
1811 port_status = rdl(mp, PORT_STATUS(mp->port_num)); 1857 port_status = rdlp(mp, PORT_STATUS);
1812 if (!(port_status & LINK_UP)) { 1858 if (!(port_status & LINK_UP)) {
1813 if (netif_carrier_ok(dev)) { 1859 if (netif_carrier_ok(dev)) {
1814 int i; 1860 int i;
@@ -1908,7 +1954,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
1908 if (mp->work_rx_oom) 1954 if (mp->work_rx_oom)
1909 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); 1955 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
1910 napi_complete(napi); 1956 napi_complete(napi);
1911 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 1957 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
1912 } 1958 }
1913 1959
1914 return work_done; 1960 return work_done;
@@ -1957,17 +2003,17 @@ static void port_start(struct mv643xx_eth_private *mp)
1957 /* 2003 /*
1958 * Configure basic link parameters. 2004 * Configure basic link parameters.
1959 */ 2005 */
1960 pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); 2006 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1961 2007
1962 pscr |= SERIAL_PORT_ENABLE; 2008 pscr |= SERIAL_PORT_ENABLE;
1963 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 2009 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1964 2010
1965 pscr |= DO_NOT_FORCE_LINK_FAIL; 2011 pscr |= DO_NOT_FORCE_LINK_FAIL;
1966 if (mp->phy == NULL) 2012 if (mp->phy == NULL)
1967 pscr |= FORCE_LINK_PASS; 2013 pscr |= FORCE_LINK_PASS;
1968 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 2014 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1969 2015
1970 wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE); 2016 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE);
1971 2017
1972 /* 2018 /*
1973 * Configure TX path and queues. 2019 * Configure TX path and queues.
@@ -1984,31 +2030,30 @@ static void port_start(struct mv643xx_eth_private *mp)
1984 /* 2030 /*
1985 * Add configured unicast address to address filter table. 2031 * Add configured unicast address to address filter table.
1986 */ 2032 */
1987 uc_addr_set(mp, mp->dev->dev_addr); 2033 mv643xx_eth_program_unicast_filter(mp->dev);
1988 2034
1989 /* 2035 /*
1990 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 2036 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
1991 * frames to RX queue #0, and include the pseudo-header when 2037 * frames to RX queue #0, and include the pseudo-header when
1992 * calculating receive checksums. 2038 * calculating receive checksums.
1993 */ 2039 */
1994 wrl(mp, PORT_CONFIG(mp->port_num), 0x02000000); 2040 wrlp(mp, PORT_CONFIG, 0x02000000);
1995 2041
1996 /* 2042 /*
1997 * Treat BPDUs as normal multicasts, and disable partition mode. 2043 * Treat BPDUs as normal multicasts, and disable partition mode.
1998 */ 2044 */
1999 wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000); 2045 wrlp(mp, PORT_CONFIG_EXT, 0x00000000);
2000 2046
2001 /* 2047 /*
2002 * Enable the receive queues. 2048 * Enable the receive queues.
2003 */ 2049 */
2004 for (i = 0; i < mp->rxq_count; i++) { 2050 for (i = 0; i < mp->rxq_count; i++) {
2005 struct rx_queue *rxq = mp->rxq + i; 2051 struct rx_queue *rxq = mp->rxq + i;
2006 int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
2007 u32 addr; 2052 u32 addr;
2008 2053
2009 addr = (u32)rxq->rx_desc_dma; 2054 addr = (u32)rxq->rx_desc_dma;
2010 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2055 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
2011 wrl(mp, off, addr); 2056 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr);
2012 2057
2013 rxq_enable(rxq); 2058 rxq_enable(rxq);
2014 } 2059 }
@@ -2019,7 +2064,7 @@ static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
2019 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; 2064 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
2020 u32 val; 2065 u32 val;
2021 2066
2022 val = rdl(mp, SDMA_CONFIG(mp->port_num)); 2067 val = rdlp(mp, SDMA_CONFIG);
2023 if (mp->shared->extended_rx_coal_limit) { 2068 if (mp->shared->extended_rx_coal_limit) {
2024 if (coal > 0xffff) 2069 if (coal > 0xffff)
2025 coal = 0xffff; 2070 coal = 0xffff;
@@ -2032,7 +2077,7 @@ static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
2032 val &= ~0x003fff00; 2077 val &= ~0x003fff00;
2033 val |= (coal & 0x3fff) << 8; 2078 val |= (coal & 0x3fff) << 8;
2034 } 2079 }
2035 wrl(mp, SDMA_CONFIG(mp->port_num), val); 2080 wrlp(mp, SDMA_CONFIG, val);
2036} 2081}
2037 2082
2038static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay) 2083static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
@@ -2041,7 +2086,7 @@ static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
2041 2086
2042 if (coal > 0x3fff) 2087 if (coal > 0x3fff)
2043 coal = 0x3fff; 2088 coal = 0x3fff;
2044 wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4); 2089 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, (coal & 0x3fff) << 4);
2045} 2090}
2046 2091
2047static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) 2092static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
@@ -2070,9 +2115,9 @@ static int mv643xx_eth_open(struct net_device *dev)
2070 int err; 2115 int err;
2071 int i; 2116 int i;
2072 2117
2073 wrl(mp, INT_CAUSE(mp->port_num), 0); 2118 wrlp(mp, INT_CAUSE, 0);
2074 wrl(mp, INT_CAUSE_EXT(mp->port_num), 0); 2119 wrlp(mp, INT_CAUSE_EXT, 0);
2075 rdl(mp, INT_CAUSE_EXT(mp->port_num)); 2120 rdlp(mp, INT_CAUSE_EXT);
2076 2121
2077 err = request_irq(dev->irq, mv643xx_eth_irq, 2122 err = request_irq(dev->irq, mv643xx_eth_irq,
2078 IRQF_SHARED, dev->name, dev); 2123 IRQF_SHARED, dev->name, dev);
@@ -2081,8 +2126,6 @@ static int mv643xx_eth_open(struct net_device *dev)
2081 return -EAGAIN; 2126 return -EAGAIN;
2082 } 2127 }
2083 2128
2084 init_mac_tables(mp);
2085
2086 mv643xx_eth_recalc_skb_size(mp); 2129 mv643xx_eth_recalc_skb_size(mp);
2087 2130
2088 napi_enable(&mp->napi); 2131 napi_enable(&mp->napi);
@@ -2121,8 +2164,8 @@ static int mv643xx_eth_open(struct net_device *dev)
2121 set_rx_coal(mp, 0); 2164 set_rx_coal(mp, 0);
2122 set_tx_coal(mp, 0); 2165 set_tx_coal(mp, 0);
2123 2166
2124 wrl(mp, INT_MASK_EXT(mp->port_num), INT_EXT_LINK_PHY | INT_EXT_TX); 2167 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
2125 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 2168 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
2126 2169
2127 return 0; 2170 return 0;
2128 2171
@@ -2147,7 +2190,7 @@ static void port_reset(struct mv643xx_eth_private *mp)
2147 txq_disable(mp->txq + i); 2190 txq_disable(mp->txq + i);
2148 2191
2149 while (1) { 2192 while (1) {
2150 u32 ps = rdl(mp, PORT_STATUS(mp->port_num)); 2193 u32 ps = rdlp(mp, PORT_STATUS);
2151 2194
2152 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY) 2195 if ((ps & (TX_IN_PROGRESS | TX_FIFO_EMPTY)) == TX_FIFO_EMPTY)
2153 break; 2196 break;
@@ -2155,11 +2198,11 @@ static void port_reset(struct mv643xx_eth_private *mp)
2155 } 2198 }
2156 2199
2157 /* Reset the Enable bit in the Configuration Register */ 2200 /* Reset the Enable bit in the Configuration Register */
2158 data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); 2201 data = rdlp(mp, PORT_SERIAL_CONTROL);
2159 data &= ~(SERIAL_PORT_ENABLE | 2202 data &= ~(SERIAL_PORT_ENABLE |
2160 DO_NOT_FORCE_LINK_FAIL | 2203 DO_NOT_FORCE_LINK_FAIL |
2161 FORCE_LINK_PASS); 2204 FORCE_LINK_PASS);
2162 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data); 2205 wrlp(mp, PORT_SERIAL_CONTROL, data);
2163} 2206}
2164 2207
2165static int mv643xx_eth_stop(struct net_device *dev) 2208static int mv643xx_eth_stop(struct net_device *dev)
@@ -2167,8 +2210,8 @@ static int mv643xx_eth_stop(struct net_device *dev)
2167 struct mv643xx_eth_private *mp = netdev_priv(dev); 2210 struct mv643xx_eth_private *mp = netdev_priv(dev);
2168 int i; 2211 int i;
2169 2212
2170 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 2213 wrlp(mp, INT_MASK, 0x00000000);
2171 rdl(mp, INT_MASK(mp->port_num)); 2214 rdlp(mp, INT_MASK);
2172 2215
2173 del_timer_sync(&mp->mib_counters_timer); 2216 del_timer_sync(&mp->mib_counters_timer);
2174 2217
@@ -2261,12 +2304,12 @@ static void mv643xx_eth_netpoll(struct net_device *dev)
2261{ 2304{
2262 struct mv643xx_eth_private *mp = netdev_priv(dev); 2305 struct mv643xx_eth_private *mp = netdev_priv(dev);
2263 2306
2264 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 2307 wrlp(mp, INT_MASK, 0x00000000);
2265 rdl(mp, INT_MASK(mp->port_num)); 2308 rdlp(mp, INT_MASK);
2266 2309
2267 mv643xx_eth_irq(dev->irq, dev); 2310 mv643xx_eth_irq(dev->irq, dev);
2268 2311
2269 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 2312 wrlp(mp, INT_MASK, INT_TX_END | INT_RX | INT_EXT);
2270} 2313}
2271#endif 2314#endif
2272 2315
@@ -2314,8 +2357,8 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2314 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the 2357 * [21:8], or a 16-bit coal limit in bits [25,21:7] of the
2315 * SDMA config register. 2358 * SDMA config register.
2316 */ 2359 */
2317 writel(0x02000000, msp->base + SDMA_CONFIG(0)); 2360 writel(0x02000000, msp->base + 0x0400 + SDMA_CONFIG);
2318 if (readl(msp->base + SDMA_CONFIG(0)) & 0x02000000) 2361 if (readl(msp->base + 0x0400 + SDMA_CONFIG) & 0x02000000)
2319 msp->extended_rx_coal_limit = 1; 2362 msp->extended_rx_coal_limit = 1;
2320 else 2363 else
2321 msp->extended_rx_coal_limit = 0; 2364 msp->extended_rx_coal_limit = 0;
@@ -2325,12 +2368,12 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2325 * yes, whether its associated registers are in the old or 2368 * yes, whether its associated registers are in the old or
2326 * the new place. 2369 * the new place.
2327 */ 2370 */
2328 writel(1, msp->base + TX_BW_MTU_MOVED(0)); 2371 writel(1, msp->base + 0x0400 + TX_BW_MTU_MOVED);
2329 if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1) { 2372 if (readl(msp->base + 0x0400 + TX_BW_MTU_MOVED) & 1) {
2330 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT; 2373 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2331 } else { 2374 } else {
2332 writel(7, msp->base + TX_BW_RATE(0)); 2375 writel(7, msp->base + 0x0400 + TX_BW_RATE);
2333 if (readl(msp->base + TX_BW_RATE(0)) & 7) 2376 if (readl(msp->base + 0x0400 + TX_BW_RATE) & 7)
2334 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT; 2377 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2335 else 2378 else
2336 msp->tx_bw_control = TX_BW_CONTROL_ABSENT; 2379 msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
@@ -2339,7 +2382,7 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2339 2382
2340static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2383static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2341{ 2384{
2342 static int mv643xx_eth_version_printed = 0; 2385 static int mv643xx_eth_version_printed;
2343 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; 2386 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2344 struct mv643xx_eth_shared_private *msp; 2387 struct mv643xx_eth_shared_private *msp;
2345 struct resource *res; 2388 struct resource *res;
@@ -2563,10 +2606,10 @@ static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2563{ 2606{
2564 u32 pscr; 2607 u32 pscr;
2565 2608
2566 pscr = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num)); 2609 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
2567 if (pscr & SERIAL_PORT_ENABLE) { 2610 if (pscr & SERIAL_PORT_ENABLE) {
2568 pscr &= ~SERIAL_PORT_ENABLE; 2611 pscr &= ~SERIAL_PORT_ENABLE;
2569 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 2612 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2570 } 2613 }
2571 2614
2572 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2615 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
@@ -2584,7 +2627,7 @@ static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2584 pscr |= SET_FULL_DUPLEX_MODE; 2627 pscr |= SET_FULL_DUPLEX_MODE;
2585 } 2628 }
2586 2629
2587 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 2630 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
2588} 2631}
2589 2632
2590static int mv643xx_eth_probe(struct platform_device *pdev) 2633static int mv643xx_eth_probe(struct platform_device *pdev)
@@ -2593,7 +2636,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2593 struct mv643xx_eth_private *mp; 2636 struct mv643xx_eth_private *mp;
2594 struct net_device *dev; 2637 struct net_device *dev;
2595 struct resource *res; 2638 struct resource *res;
2596 DECLARE_MAC_BUF(mac);
2597 int err; 2639 int err;
2598 2640
2599 pd = pdev->dev.platform_data; 2641 pd = pdev->dev.platform_data;
@@ -2617,6 +2659,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2617 platform_set_drvdata(pdev, mp); 2659 platform_set_drvdata(pdev, mp);
2618 2660
2619 mp->shared = platform_get_drvdata(pd->shared); 2661 mp->shared = platform_get_drvdata(pd->shared);
2662 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10);
2620 mp->port_num = pd->port_number; 2663 mp->port_num = pd->port_number;
2621 2664
2622 mp->dev = dev; 2665 mp->dev = dev;
@@ -2664,7 +2707,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2664 dev->hard_start_xmit = mv643xx_eth_xmit; 2707 dev->hard_start_xmit = mv643xx_eth_xmit;
2665 dev->open = mv643xx_eth_open; 2708 dev->open = mv643xx_eth_open;
2666 dev->stop = mv643xx_eth_stop; 2709 dev->stop = mv643xx_eth_stop;
2667 dev->set_multicast_list = mv643xx_eth_set_rx_mode; 2710 dev->set_rx_mode = mv643xx_eth_set_rx_mode;
2668 dev->set_mac_address = mv643xx_eth_set_mac_address; 2711 dev->set_mac_address = mv643xx_eth_set_mac_address;
2669 dev->do_ioctl = mv643xx_eth_ioctl; 2712 dev->do_ioctl = mv643xx_eth_ioctl;
2670 dev->change_mtu = mv643xx_eth_change_mtu; 2713 dev->change_mtu = mv643xx_eth_change_mtu;
@@ -2687,8 +2730,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2687 if (err) 2730 if (err)
2688 goto out; 2731 goto out;
2689 2732
2690 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n", 2733 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n",
2691 mp->port_num, print_mac(mac, dev->dev_addr)); 2734 mp->port_num, dev->dev_addr);
2692 2735
2693 if (mp->tx_desc_sram_size > 0) 2736 if (mp->tx_desc_sram_size > 0)
2694 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); 2737 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
@@ -2721,8 +2764,8 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
2721 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2764 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2722 2765
2723 /* Mask all interrupts on ethernet port */ 2766 /* Mask all interrupts on ethernet port */
2724 wrl(mp, INT_MASK(mp->port_num), 0); 2767 wrlp(mp, INT_MASK, 0);
2725 rdl(mp, INT_MASK(mp->port_num)); 2768 rdlp(mp, INT_MASK);
2726 2769
2727 if (netif_running(mp->dev)) 2770 if (netif_running(mp->dev))
2728 port_reset(mp); 2771 port_reset(mp);
diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c
index 06ca4252155f..435e5a847c43 100644
--- a/drivers/net/mvme147.c
+++ b/drivers/net/mvme147.c
@@ -67,7 +67,6 @@ struct net_device * __init mvme147lance_probe(int unit)
67 u_long *addr; 67 u_long *addr;
68 u_long address; 68 u_long address;
69 int err; 69 int err;
70 DECLARE_MAC_BUF(mac);
71 70
72 if (!MACH_IS_MVME147 || called) 71 if (!MACH_IS_MVME147 || called)
73 return ERR_PTR(-ENODEV); 72 return ERR_PTR(-ENODEV);
@@ -102,11 +101,11 @@ struct net_device * __init mvme147lance_probe(int unit)
102 dev->dev_addr[3]=address&0xff; 101 dev->dev_addr[3]=address&0xff;
103 102
104 printk("%s: MVME147 at 0x%08lx, irq %d, " 103 printk("%s: MVME147 at 0x%08lx, irq %d, "
105 "Hardware Address %s\n", 104 "Hardware Address %pM\n",
106 dev->name, dev->base_addr, MVME147_LANCE_IRQ, 105 dev->name, dev->base_addr, MVME147_LANCE_IRQ,
107 print_mac(mac, dev->dev_addr)); 106 dev->dev_addr);
108 107
109 lp = (struct m147lance_private *)dev->priv; 108 lp = netdev_priv(dev);
110 lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */ 109 lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */
111 if (!lp->ram) 110 if (!lp->ram)
112 { 111 {
@@ -190,7 +189,7 @@ int __init init_module(void)
190 189
191void __exit cleanup_module(void) 190void __exit cleanup_module(void)
192{ 191{
193 struct m147lance_private *lp = dev_mvme147_lance->priv; 192 struct m147lance_private *lp = netdev_priv(dev_mvme147_lance);
194 unregister_netdev(dev_mvme147_lance); 193 unregister_netdev(dev_mvme147_lance);
195 free_pages(lp->ram, 3); 194 free_pages(lp->ram, 3);
196 free_netdev(dev_mvme147_lance); 195 free_netdev(dev_mvme147_lance);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index b37867097308..5e70180bf569 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
75#include "myri10ge_mcp.h" 75#include "myri10ge_mcp.h"
76#include "myri10ge_mcp_gen_header.h" 76#include "myri10ge_mcp_gen_header.h"
77 77
78#define MYRI10GE_VERSION_STR "1.4.3-1.378" 78#define MYRI10GE_VERSION_STR "1.4.4-1.395"
79 79
80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 80MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
81MODULE_AUTHOR("Maintainer: help@myri.com"); 81MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -1024,7 +1024,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
1024 ss->dca_tag = NULL; 1024 ss->dca_tag = NULL;
1025 } 1025 }
1026 } 1026 }
1027#endif /* CONFIG_DCA */ 1027#endif /* CONFIG_MYRI10GE_DCA */
1028 1028
1029 /* reset mcp/driver shared state back to 0 */ 1029 /* reset mcp/driver shared state back to 0 */
1030 1030
@@ -1121,7 +1121,7 @@ static int myri10ge_notify_dca_device(struct device *dev, void *data)
1121 myri10ge_teardown_dca(mgp); 1121 myri10ge_teardown_dca(mgp);
1122 return 0; 1122 return 0;
1123} 1123}
1124#endif /* CONFIG_DCA */ 1124#endif /* CONFIG_MYRI10GE_DCA */
1125 1125
1126static inline void 1126static inline void
1127myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst, 1127myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
@@ -1309,7 +1309,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
1309 1309
1310 skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); 1310 skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
1311 if (unlikely(skb == NULL)) { 1311 if (unlikely(skb == NULL)) {
1312 mgp->stats.rx_dropped++; 1312 ss->stats.rx_dropped++;
1313 do { 1313 do {
1314 i--; 1314 i--;
1315 put_page(rx_frags[i].page); 1315 put_page(rx_frags[i].page);
@@ -1334,7 +1334,6 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, struct myri10ge_rx_buf *rx,
1334 myri10ge_vlan_ip_csum(skb, csum); 1334 myri10ge_vlan_ip_csum(skb, csum);
1335 } 1335 }
1336 netif_receive_skb(skb); 1336 netif_receive_skb(skb);
1337 dev->last_rx = jiffies;
1338 return 1; 1337 return 1;
1339} 1338}
1340 1339
@@ -1504,7 +1503,6 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1504{ 1503{
1505 struct myri10ge_slice_state *ss = 1504 struct myri10ge_slice_state *ss =
1506 container_of(napi, struct myri10ge_slice_state, napi); 1505 container_of(napi, struct myri10ge_slice_state, napi);
1507 struct net_device *netdev = ss->mgp->dev;
1508 int work_done; 1506 int work_done;
1509 1507
1510#ifdef CONFIG_MYRI10GE_DCA 1508#ifdef CONFIG_MYRI10GE_DCA
@@ -1516,7 +1514,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
1516 work_done = myri10ge_clean_rx_done(ss, budget); 1514 work_done = myri10ge_clean_rx_done(ss, budget);
1517 1515
1518 if (work_done < budget) { 1516 if (work_done < budget) {
1519 netif_rx_complete(netdev, napi); 1517 netif_rx_complete(napi);
1520 put_be32(htonl(3), ss->irq_claim); 1518 put_be32(htonl(3), ss->irq_claim);
1521 } 1519 }
1522 return work_done; 1520 return work_done;
@@ -1534,7 +1532,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1534 /* an interrupt on a non-zero receive-only slice is implicitly 1532 /* an interrupt on a non-zero receive-only slice is implicitly
1535 * valid since MSI-X irqs are not shared */ 1533 * valid since MSI-X irqs are not shared */
1536 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) { 1534 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
1537 netif_rx_schedule(ss->dev, &ss->napi); 1535 netif_rx_schedule(&ss->napi);
1538 return (IRQ_HANDLED); 1536 return (IRQ_HANDLED);
1539 } 1537 }
1540 1538
@@ -1545,7 +1543,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
1545 /* low bit indicates receives are present, so schedule 1543 /* low bit indicates receives are present, so schedule
1546 * napi poll handler */ 1544 * napi poll handler */
1547 if (stats->valid & 1) 1545 if (stats->valid & 1)
1548 netif_rx_schedule(ss->dev, &ss->napi); 1546 netif_rx_schedule(&ss->napi);
1549 1547
1550 if (!mgp->msi_enabled && !mgp->msix_enabled) { 1548 if (!mgp->msi_enabled && !mgp->msix_enabled) {
1551 put_be32(0, mgp->irq_deassert); 1549 put_be32(0, mgp->irq_deassert);
@@ -2230,6 +2228,8 @@ myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
2230 *ip_hdr = iph; 2228 *ip_hdr = iph;
2231 if (iph->protocol != IPPROTO_TCP) 2229 if (iph->protocol != IPPROTO_TCP)
2232 return -1; 2230 return -1;
2231 if (iph->frag_off & htons(IP_MF | IP_OFFSET))
2232 return -1;
2233 *hdr_flags |= LRO_TCP; 2233 *hdr_flags |= LRO_TCP;
2234 *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2); 2234 *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
2235 2235
@@ -2927,6 +2927,7 @@ static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev)
2927{ 2927{
2928 struct sk_buff *segs, *curr; 2928 struct sk_buff *segs, *curr;
2929 struct myri10ge_priv *mgp = netdev_priv(dev); 2929 struct myri10ge_priv *mgp = netdev_priv(dev);
2930 struct myri10ge_slice_state *ss;
2930 int status; 2931 int status;
2931 2932
2932 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); 2933 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
@@ -2953,8 +2954,9 @@ static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev)
2953 return 0; 2954 return 0;
2954 2955
2955drop: 2956drop:
2957 ss = &mgp->ss[skb_get_queue_mapping(skb)];
2956 dev_kfree_skb_any(skb); 2958 dev_kfree_skb_any(skb);
2957 mgp->stats.tx_dropped += 1; 2959 ss->stats.tx_dropped += 1;
2958 return 0; 2960 return 0;
2959} 2961}
2960 2962
@@ -2985,7 +2987,6 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
2985 struct dev_mc_list *mc_list; 2987 struct dev_mc_list *mc_list;
2986 __be32 data[2] = { 0, 0 }; 2988 __be32 data[2] = { 0, 0 };
2987 int err; 2989 int err;
2988 DECLARE_MAC_BUF(mac);
2989 2990
2990 /* can be called from atomic contexts, 2991 /* can be called from atomic contexts,
2991 * pass 1 to force atomicity in myri10ge_send_cmd() */ 2992 * pass 1 to force atomicity in myri10ge_send_cmd() */
@@ -3032,8 +3033,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
3032 printk(KERN_ERR "myri10ge: %s: Failed " 3033 printk(KERN_ERR "myri10ge: %s: Failed "
3033 "MXGEFW_JOIN_MULTICAST_GROUP, error status:" 3034 "MXGEFW_JOIN_MULTICAST_GROUP, error status:"
3034 "%d\t", dev->name, err); 3035 "%d\t", dev->name, err);
3035 printk(KERN_ERR "MAC %s\n", 3036 printk(KERN_ERR "MAC %pM\n", mc_list->dmi_addr);
3036 print_mac(mac, mc_list->dmi_addr));
3037 goto abort; 3037 goto abort;
3038 } 3038 }
3039 } 3039 }
@@ -3732,6 +3732,17 @@ abort_with_fw:
3732 myri10ge_load_firmware(mgp, 0); 3732 myri10ge_load_firmware(mgp, 0);
3733} 3733}
3734 3734
3735static const struct net_device_ops myri10ge_netdev_ops = {
3736 .ndo_open = myri10ge_open,
3737 .ndo_stop = myri10ge_close,
3738 .ndo_start_xmit = myri10ge_xmit,
3739 .ndo_get_stats = myri10ge_get_stats,
3740 .ndo_validate_addr = eth_validate_addr,
3741 .ndo_change_mtu = myri10ge_change_mtu,
3742 .ndo_set_multicast_list = myri10ge_set_multicast_list,
3743 .ndo_set_mac_address = myri10ge_set_mac_address,
3744};
3745
3735static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3746static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3736{ 3747{
3737 struct net_device *netdev; 3748 struct net_device *netdev;
@@ -3740,6 +3751,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3740 int i; 3751 int i;
3741 int status = -ENXIO; 3752 int status = -ENXIO;
3742 int dac_enabled; 3753 int dac_enabled;
3754 unsigned hdr_offset, ss_offset;
3743 3755
3744 netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES); 3756 netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
3745 if (netdev == NULL) { 3757 if (netdev == NULL) {
@@ -3807,14 +3819,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3807 if (mgp->mtrr >= 0) 3819 if (mgp->mtrr >= 0)
3808 mgp->wc_enabled = 1; 3820 mgp->wc_enabled = 1;
3809#endif 3821#endif
3810 /* Hack. need to get rid of these magic numbers */
3811 mgp->sram_size =
3812 2 * 1024 * 1024 - (2 * (48 * 1024) + (32 * 1024)) - 0x100;
3813 if (mgp->sram_size > mgp->board_span) {
3814 dev_err(&pdev->dev, "board span %ld bytes too small\n",
3815 mgp->board_span);
3816 goto abort_with_mtrr;
3817 }
3818 mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); 3822 mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
3819 if (mgp->sram == NULL) { 3823 if (mgp->sram == NULL) {
3820 dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", 3824 dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
@@ -3822,9 +3826,19 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3822 status = -ENXIO; 3826 status = -ENXIO;
3823 goto abort_with_mtrr; 3827 goto abort_with_mtrr;
3824 } 3828 }
3829 hdr_offset =
3830 ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
3831 ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
3832 mgp->sram_size = ntohl(__raw_readl(mgp->sram + ss_offset));
3833 if (mgp->sram_size > mgp->board_span ||
3834 mgp->sram_size <= MYRI10GE_FW_OFFSET) {
3835 dev_err(&pdev->dev,
3836 "invalid sram_size %dB or board span %ldB\n",
3837 mgp->sram_size, mgp->board_span);
3838 goto abort_with_ioremap;
3839 }
3825 memcpy_fromio(mgp->eeprom_strings, 3840 memcpy_fromio(mgp->eeprom_strings,
3826 mgp->sram + mgp->sram_size - MYRI10GE_EEPROM_STRINGS_SIZE, 3841 mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE);
3827 MYRI10GE_EEPROM_STRINGS_SIZE);
3828 memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2); 3842 memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
3829 status = myri10ge_read_mac_addr(mgp); 3843 status = myri10ge_read_mac_addr(mgp);
3830 if (status) 3844 if (status)
@@ -3860,15 +3874,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3860 myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; 3874 myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
3861 if ((myri10ge_initial_mtu + ETH_HLEN) < 68) 3875 if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
3862 myri10ge_initial_mtu = 68; 3876 myri10ge_initial_mtu = 68;
3877
3878 netdev->netdev_ops = &myri10ge_netdev_ops;
3863 netdev->mtu = myri10ge_initial_mtu; 3879 netdev->mtu = myri10ge_initial_mtu;
3864 netdev->open = myri10ge_open;
3865 netdev->stop = myri10ge_close;
3866 netdev->hard_start_xmit = myri10ge_xmit;
3867 netdev->get_stats = myri10ge_get_stats;
3868 netdev->base_addr = mgp->iomem_base; 3880 netdev->base_addr = mgp->iomem_base;
3869 netdev->change_mtu = myri10ge_change_mtu;
3870 netdev->set_multicast_list = myri10ge_set_multicast_list;
3871 netdev->set_mac_address = myri10ge_set_mac_address;
3872 netdev->features = mgp->features; 3881 netdev->features = mgp->features;
3873 3882
3874 if (dac_enabled) 3883 if (dac_enabled)
@@ -4019,7 +4028,7 @@ static struct notifier_block myri10ge_dca_notifier = {
4019 .next = NULL, 4028 .next = NULL,
4020 .priority = 0, 4029 .priority = 0,
4021}; 4030};
4022#endif /* CONFIG_DCA */ 4031#endif /* CONFIG_MYRI10GE_DCA */
4023 4032
4024static __init int myri10ge_init_module(void) 4033static __init int myri10ge_init_module(void)
4025{ 4034{
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index 993721090777..11be150e4d67 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -111,61 +111,61 @@ enum myri10ge_mcp_cmd_type {
111 MXGEFW_CMD_NONE = 0, 111 MXGEFW_CMD_NONE = 0,
112 /* Reset the mcp, it is left in a safe state, waiting 112 /* Reset the mcp, it is left in a safe state, waiting
113 * for the driver to set all its parameters */ 113 * for the driver to set all its parameters */
114 MXGEFW_CMD_RESET, 114 MXGEFW_CMD_RESET = 1,
115 115
116 /* get the version number of the current firmware.. 116 /* get the version number of the current firmware..
117 * (may be available in the eeprom strings..? */ 117 * (may be available in the eeprom strings..? */
118 MXGEFW_GET_MCP_VERSION, 118 MXGEFW_GET_MCP_VERSION = 2,
119 119
120 /* Parameters which must be set by the driver before it can 120 /* Parameters which must be set by the driver before it can
121 * issue MXGEFW_CMD_ETHERNET_UP. They persist until the next 121 * issue MXGEFW_CMD_ETHERNET_UP. They persist until the next
122 * MXGEFW_CMD_RESET is issued */ 122 * MXGEFW_CMD_RESET is issued */
123 123
124 MXGEFW_CMD_SET_INTRQ_DMA, 124 MXGEFW_CMD_SET_INTRQ_DMA = 3,
125 /* data0 = LSW of the host address 125 /* data0 = LSW of the host address
126 * data1 = MSW of the host address 126 * data1 = MSW of the host address
127 * data2 = slice number if multiple slices are used 127 * data2 = slice number if multiple slices are used
128 */ 128 */
129 129
130 MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */ 130 MXGEFW_CMD_SET_BIG_BUFFER_SIZE = 4, /* in bytes, power of 2 */
131 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */ 131 MXGEFW_CMD_SET_SMALL_BUFFER_SIZE = 5, /* in bytes */
132 132
133 /* Parameters which refer to lanai SRAM addresses where the 133 /* Parameters which refer to lanai SRAM addresses where the
134 * driver must issue PIO writes for various things */ 134 * driver must issue PIO writes for various things */
135 135
136 MXGEFW_CMD_GET_SEND_OFFSET, 136 MXGEFW_CMD_GET_SEND_OFFSET = 6,
137 MXGEFW_CMD_GET_SMALL_RX_OFFSET, 137 MXGEFW_CMD_GET_SMALL_RX_OFFSET = 7,
138 MXGEFW_CMD_GET_BIG_RX_OFFSET, 138 MXGEFW_CMD_GET_BIG_RX_OFFSET = 8,
139 /* data0 = slice number if multiple slices are used */ 139 /* data0 = slice number if multiple slices are used */
140 140
141 MXGEFW_CMD_GET_IRQ_ACK_OFFSET, 141 MXGEFW_CMD_GET_IRQ_ACK_OFFSET = 9,
142 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, 142 MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET = 10,
143 143
144 /* Parameters which refer to rings stored on the MCP, 144 /* Parameters which refer to rings stored on the MCP,
145 * and whose size is controlled by the mcp */ 145 * and whose size is controlled by the mcp */
146 146
147 MXGEFW_CMD_GET_SEND_RING_SIZE, /* in bytes */ 147 MXGEFW_CMD_GET_SEND_RING_SIZE = 11, /* in bytes */
148 MXGEFW_CMD_GET_RX_RING_SIZE, /* in bytes */ 148 MXGEFW_CMD_GET_RX_RING_SIZE = 12, /* in bytes */
149 149
150 /* Parameters which refer to rings stored in the host, 150 /* Parameters which refer to rings stored in the host,
151 * and whose size is controlled by the host. Note that 151 * and whose size is controlled by the host. Note that
152 * all must be physically contiguous and must contain 152 * all must be physically contiguous and must contain
153 * a power of 2 number of entries. */ 153 * a power of 2 number of entries. */
154 154
155 MXGEFW_CMD_SET_INTRQ_SIZE, /* in bytes */ 155 MXGEFW_CMD_SET_INTRQ_SIZE = 13, /* in bytes */
156#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31) 156#define MXGEFW_CMD_SET_INTRQ_SIZE_FLAG_NO_STRICT_SIZE_CHECK (1 << 31)
157 157
158 /* command to bring ethernet interface up. Above parameters 158 /* command to bring ethernet interface up. Above parameters
159 * (plus mtu & mac address) must have been exchanged prior 159 * (plus mtu & mac address) must have been exchanged prior
160 * to issuing this command */ 160 * to issuing this command */
161 MXGEFW_CMD_ETHERNET_UP, 161 MXGEFW_CMD_ETHERNET_UP = 14,
162 162
163 /* command to bring ethernet interface down. No further sends 163 /* command to bring ethernet interface down. No further sends
164 * or receives may be processed until an MXGEFW_CMD_ETHERNET_UP 164 * or receives may be processed until an MXGEFW_CMD_ETHERNET_UP
165 * is issued, and all interrupt queues must be flushed prior 165 * is issued, and all interrupt queues must be flushed prior
166 * to ack'ing this command */ 166 * to ack'ing this command */
167 167
168 MXGEFW_CMD_ETHERNET_DOWN, 168 MXGEFW_CMD_ETHERNET_DOWN = 15,
169 169
170 /* commands the driver may issue live, without resetting 170 /* commands the driver may issue live, without resetting
171 * the nic. Note that increasing the mtu "live" should 171 * the nic. Note that increasing the mtu "live" should
@@ -173,40 +173,40 @@ enum myri10ge_mcp_cmd_type {
173 * sufficiently large to handle the new mtu. Decreasing 173 * sufficiently large to handle the new mtu. Decreasing
174 * the mtu live is safe */ 174 * the mtu live is safe */
175 175
176 MXGEFW_CMD_SET_MTU, 176 MXGEFW_CMD_SET_MTU = 16,
177 MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, /* in microseconds */ 177 MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET = 17, /* in microseconds */
178 MXGEFW_CMD_SET_STATS_INTERVAL, /* in microseconds */ 178 MXGEFW_CMD_SET_STATS_INTERVAL = 18, /* in microseconds */
179 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE, /* replaced by SET_STATS_DMA_V2 */ 179 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE = 19, /* replaced by SET_STATS_DMA_V2 */
180 180
181 MXGEFW_ENABLE_PROMISC, 181 MXGEFW_ENABLE_PROMISC = 20,
182 MXGEFW_DISABLE_PROMISC, 182 MXGEFW_DISABLE_PROMISC = 21,
183 MXGEFW_SET_MAC_ADDRESS, 183 MXGEFW_SET_MAC_ADDRESS = 22,
184 184
185 MXGEFW_ENABLE_FLOW_CONTROL, 185 MXGEFW_ENABLE_FLOW_CONTROL = 23,
186 MXGEFW_DISABLE_FLOW_CONTROL, 186 MXGEFW_DISABLE_FLOW_CONTROL = 24,
187 187
188 /* do a DMA test 188 /* do a DMA test
189 * data0,data1 = DMA address 189 * data0,data1 = DMA address
190 * data2 = RDMA length (MSH), WDMA length (LSH) 190 * data2 = RDMA length (MSH), WDMA length (LSH)
191 * command return data = repetitions (MSH), 0.5-ms ticks (LSH) 191 * command return data = repetitions (MSH), 0.5-ms ticks (LSH)
192 */ 192 */
193 MXGEFW_DMA_TEST, 193 MXGEFW_DMA_TEST = 25,
194 194
195 MXGEFW_ENABLE_ALLMULTI, 195 MXGEFW_ENABLE_ALLMULTI = 26,
196 MXGEFW_DISABLE_ALLMULTI, 196 MXGEFW_DISABLE_ALLMULTI = 27,
197 197
198 /* returns MXGEFW_CMD_ERROR_MULTICAST 198 /* returns MXGEFW_CMD_ERROR_MULTICAST
199 * if there is no room in the cache 199 * if there is no room in the cache
200 * data0,MSH(data1) = multicast group address */ 200 * data0,MSH(data1) = multicast group address */
201 MXGEFW_JOIN_MULTICAST_GROUP, 201 MXGEFW_JOIN_MULTICAST_GROUP = 28,
202 /* returns MXGEFW_CMD_ERROR_MULTICAST 202 /* returns MXGEFW_CMD_ERROR_MULTICAST
203 * if the address is not in the cache, 203 * if the address is not in the cache,
204 * or is equal to FF-FF-FF-FF-FF-FF 204 * or is equal to FF-FF-FF-FF-FF-FF
205 * data0,MSH(data1) = multicast group address */ 205 * data0,MSH(data1) = multicast group address */
206 MXGEFW_LEAVE_MULTICAST_GROUP, 206 MXGEFW_LEAVE_MULTICAST_GROUP = 29,
207 MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, 207 MXGEFW_LEAVE_ALL_MULTICAST_GROUPS = 30,
208 208
209 MXGEFW_CMD_SET_STATS_DMA_V2, 209 MXGEFW_CMD_SET_STATS_DMA_V2 = 31,
210 /* data0, data1 = bus addr, 210 /* data0, data1 = bus addr,
211 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows 211 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows
212 * adding new stuff to mcp_irq_data without changing the ABI 212 * adding new stuff to mcp_irq_data without changing the ABI
@@ -216,14 +216,14 @@ enum myri10ge_mcp_cmd_type {
216 * (in the upper 16 bits). 216 * (in the upper 16 bits).
217 */ 217 */
218 218
219 MXGEFW_CMD_UNALIGNED_TEST, 219 MXGEFW_CMD_UNALIGNED_TEST = 32,
220 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned 220 /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned
221 * chipset */ 221 * chipset */
222 222
223 MXGEFW_CMD_UNALIGNED_STATUS, 223 MXGEFW_CMD_UNALIGNED_STATUS = 33,
224 /* return data = boolean, true if the chipset is known to be unaligned */ 224 /* return data = boolean, true if the chipset is known to be unaligned */
225 225
226 MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS, 226 MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS = 34,
227 /* data0 = number of big buffers to use. It must be 0 or a power of 2. 227 /* data0 = number of big buffers to use. It must be 0 or a power of 2.
228 * 0 indicates that the NIC consumes as many buffers as they are required 228 * 0 indicates that the NIC consumes as many buffers as they are required
229 * for packet. This is the default behavior. 229 * for packet. This is the default behavior.
@@ -233,8 +233,8 @@ enum myri10ge_mcp_cmd_type {
233 * the NIC to be able to receive maximum-sized packets. 233 * the NIC to be able to receive maximum-sized packets.
234 */ 234 */
235 235
236 MXGEFW_CMD_GET_MAX_RSS_QUEUES, 236 MXGEFW_CMD_GET_MAX_RSS_QUEUES = 35,
237 MXGEFW_CMD_ENABLE_RSS_QUEUES, 237 MXGEFW_CMD_ENABLE_RSS_QUEUES = 36,
238 /* data0 = number of slices n (0, 1, ..., n-1) to enable 238 /* data0 = number of slices n (0, 1, ..., n-1) to enable
239 * data1 = interrupt mode | use of multiple transmit queues. 239 * data1 = interrupt mode | use of multiple transmit queues.
240 * 0=share one INTx/MSI. 240 * 0=share one INTx/MSI.
@@ -249,18 +249,18 @@ enum myri10ge_mcp_cmd_type {
249#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1 249#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1
250#define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2 250#define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2
251 251
252 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, 252 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET = 37,
253 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, 253 MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA = 38,
254 /* data0, data1 = bus address lsw, msw */ 254 /* data0, data1 = bus address lsw, msw */
255 MXGEFW_CMD_GET_RSS_TABLE_OFFSET, 255 MXGEFW_CMD_GET_RSS_TABLE_OFFSET = 39,
256 /* get the offset of the indirection table */ 256 /* get the offset of the indirection table */
257 MXGEFW_CMD_SET_RSS_TABLE_SIZE, 257 MXGEFW_CMD_SET_RSS_TABLE_SIZE = 40,
258 /* set the size of the indirection table */ 258 /* set the size of the indirection table */
259 MXGEFW_CMD_GET_RSS_KEY_OFFSET, 259 MXGEFW_CMD_GET_RSS_KEY_OFFSET = 41,
260 /* get the offset of the secret key */ 260 /* get the offset of the secret key */
261 MXGEFW_CMD_RSS_KEY_UPDATED, 261 MXGEFW_CMD_RSS_KEY_UPDATED = 42,
262 /* tell nic that the secret key's been updated */ 262 /* tell nic that the secret key's been updated */
263 MXGEFW_CMD_SET_RSS_ENABLE, 263 MXGEFW_CMD_SET_RSS_ENABLE = 43,
264 /* data0 = enable/disable rss 264 /* data0 = enable/disable rss
265 * 0: disable rss. nic does not distribute receive packets. 265 * 0: disable rss. nic does not distribute receive packets.
266 * 1: enable rss. nic distributes receive packets among queues. 266 * 1: enable rss. nic distributes receive packets among queues.
@@ -277,7 +277,7 @@ enum myri10ge_mcp_cmd_type {
277#define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5 277#define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5
278#define MXGEFW_RSS_HASH_TYPE_MAX 0x5 278#define MXGEFW_RSS_HASH_TYPE_MAX 0x5
279 279
280 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, 280 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE = 44,
281 /* Return data = the max. size of the entire headers of a IPv6 TSO packet. 281 /* Return data = the max. size of the entire headers of a IPv6 TSO packet.
282 * If the header size of a IPv6 TSO packet is larger than the specified 282 * If the header size of a IPv6 TSO packet is larger than the specified
283 * value, then the driver must not use TSO. 283 * value, then the driver must not use TSO.
@@ -286,7 +286,7 @@ enum myri10ge_mcp_cmd_type {
286 * always has enough header buffer to store maximum-sized headers. 286 * always has enough header buffer to store maximum-sized headers.
287 */ 287 */
288 288
289 MXGEFW_CMD_SET_TSO_MODE, 289 MXGEFW_CMD_SET_TSO_MODE = 45,
290 /* data0 = TSO mode. 290 /* data0 = TSO mode.
291 * 0: Linux/FreeBSD style (NIC default) 291 * 0: Linux/FreeBSD style (NIC default)
292 * 1: NDIS/NetBSD style 292 * 1: NDIS/NetBSD style
@@ -294,33 +294,37 @@ enum myri10ge_mcp_cmd_type {
294#define MXGEFW_TSO_MODE_LINUX 0 294#define MXGEFW_TSO_MODE_LINUX 0
295#define MXGEFW_TSO_MODE_NDIS 1 295#define MXGEFW_TSO_MODE_NDIS 1
296 296
297 MXGEFW_CMD_MDIO_READ, 297 MXGEFW_CMD_MDIO_READ = 46,
298 /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ 298 /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */
299 MXGEFW_CMD_MDIO_WRITE, 299 MXGEFW_CMD_MDIO_WRITE = 47,
300 /* data0 = dev_addr, data1 = register/addr, data2 = value */ 300 /* data0 = dev_addr, data1 = register/addr, data2 = value */
301 301
302 MXGEFW_CMD_XFP_I2C_READ, 302 MXGEFW_CMD_I2C_READ = 48,
303 /* Starts to get a fresh copy of one byte or of the whole xfp i2c table, the 303 /* Starts to get a fresh copy of one byte or of the module i2c table, the
304 * obtained data is cached inside the xaui-xfi chip : 304 * obtained data is cached inside the xaui-xfi chip :
305 * data0 : "all" flag : 0 => get one byte, 1=> get 256 bytes, 305 * data0 : 0 => get one byte, 1=> get 256 bytes
306 * data1 : if (data0 == 0): index of byte to refresh [ not used otherwise ] 306 * data1 : If data0 == 0: location to refresh
307 * bit 7:0 register location
308 * bit 8:15 is the i2c slave addr (0 is interpreted as 0xA1)
309 * bit 23:16 is the i2c bus number (for multi-port NICs)
310 * If data0 == 1: unused
307 * The operation might take ~1ms for a single byte or ~65ms when refreshing all 256 bytes 311 * The operation might take ~1ms for a single byte or ~65ms when refreshing all 256 bytes
308 * During the i2c operation, MXGEFW_CMD_XFP_I2C_READ or MXGEFW_CMD_XFP_BYTE attempts 312 * During the i2c operation, MXGEFW_CMD_I2C_READ or MXGEFW_CMD_I2C_BYTE attempts
309 * will return MXGEFW_CMD_ERROR_BUSY 313 * will return MXGEFW_CMD_ERROR_BUSY
310 */ 314 */
311 MXGEFW_CMD_XFP_BYTE, 315 MXGEFW_CMD_I2C_BYTE = 49,
312 /* Return the last obtained copy of a given byte in the xfp i2c table 316 /* Return the last obtained copy of a given byte in the xfp i2c table
313 * (copy cached during the last relevant MXGEFW_CMD_XFP_I2C_READ) 317 * (copy cached during the last relevant MXGEFW_CMD_I2C_READ)
314 * data0 : index of the desired table entry 318 * data0 : index of the desired table entry
315 * Return data = the byte stored at the requested index in the table 319 * Return data = the byte stored at the requested index in the table
316 */ 320 */
317 321
318 MXGEFW_CMD_GET_VPUMP_OFFSET, 322 MXGEFW_CMD_GET_VPUMP_OFFSET = 50,
319 /* Return data = NIC memory offset of mcp_vpump_public_global */ 323 /* Return data = NIC memory offset of mcp_vpump_public_global */
320 MXGEFW_CMD_RESET_VPUMP, 324 MXGEFW_CMD_RESET_VPUMP = 51,
321 /* Resets the VPUMP state */ 325 /* Resets the VPUMP state */
322 326
323 MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, 327 MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE = 52,
324 /* data0 = mcp_slot type to use. 328 /* data0 = mcp_slot type to use.
325 * 0 = the default 4B mcp_slot 329 * 0 = the default 4B mcp_slot
326 * 1 = 8B mcp_slot_8 330 * 1 = 8B mcp_slot_8
@@ -328,7 +332,7 @@ enum myri10ge_mcp_cmd_type {
328#define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0 332#define MXGEFW_RSS_MCP_SLOT_TYPE_MIN 0
329#define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1 333#define MXGEFW_RSS_MCP_SLOT_TYPE_WITH_HASH 1
330 334
331 MXGEFW_CMD_SET_THROTTLE_FACTOR, 335 MXGEFW_CMD_SET_THROTTLE_FACTOR = 53,
332 /* set the throttle factor for ethp_z8e 336 /* set the throttle factor for ethp_z8e
333 * data0 = throttle_factor 337 * data0 = throttle_factor
334 * throttle_factor = 256 * pcie-raw-speed / tx_speed 338 * throttle_factor = 256 * pcie-raw-speed / tx_speed
@@ -344,45 +348,50 @@ enum myri10ge_mcp_cmd_type {
344 * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s 348 * with tx_boundary == 4096, max-throttle-factor == 4095 => min-speed == 1Gb/s
345 */ 349 */
346 350
347 MXGEFW_CMD_VPUMP_UP, 351 MXGEFW_CMD_VPUMP_UP = 54,
348 /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */ 352 /* Allocates VPump Connection, Send Request and Zero copy buffer address tables */
349 MXGEFW_CMD_GET_VPUMP_CLK, 353 MXGEFW_CMD_GET_VPUMP_CLK = 55,
350 /* Get the lanai clock */ 354 /* Get the lanai clock */
351 355
352 MXGEFW_CMD_GET_DCA_OFFSET, 356 MXGEFW_CMD_GET_DCA_OFFSET = 56,
353 /* offset of dca control for WDMAs */ 357 /* offset of dca control for WDMAs */
354 358
355 /* VMWare NetQueue commands */ 359 /* VMWare NetQueue commands */
356 MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE, 360 MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57,
357 MXGEFW_CMD_NETQ_ADD_FILTER, 361 MXGEFW_CMD_NETQ_ADD_FILTER = 58,
358 /* data0 = filter_id << 16 | queue << 8 | type */ 362 /* data0 = filter_id << 16 | queue << 8 | type */
359 /* data1 = MS4 of MAC Addr */ 363 /* data1 = MS4 of MAC Addr */
360 /* data2 = LS2_MAC << 16 | VLAN_tag */ 364 /* data2 = LS2_MAC << 16 | VLAN_tag */
361 MXGEFW_CMD_NETQ_DEL_FILTER, 365 MXGEFW_CMD_NETQ_DEL_FILTER = 59,
362 /* data0 = filter_id */ 366 /* data0 = filter_id */
363 MXGEFW_CMD_NETQ_QUERY1, 367 MXGEFW_CMD_NETQ_QUERY1 = 60,
364 MXGEFW_CMD_NETQ_QUERY2, 368 MXGEFW_CMD_NETQ_QUERY2 = 61,
365 MXGEFW_CMD_NETQ_QUERY3, 369 MXGEFW_CMD_NETQ_QUERY3 = 62,
366 MXGEFW_CMD_NETQ_QUERY4, 370 MXGEFW_CMD_NETQ_QUERY4 = 63,
367 371
372 MXGEFW_CMD_RELAX_RXBUFFER_ALIGNMENT = 64,
373 /* When set, small receive buffers can cross page boundaries.
374 * Both small and big receive buffers may start at any address.
375 * This option has performance implications, so use with caution.
376 */
368}; 377};
369 378
370enum myri10ge_mcp_cmd_status { 379enum myri10ge_mcp_cmd_status {
371 MXGEFW_CMD_OK = 0, 380 MXGEFW_CMD_OK = 0,
372 MXGEFW_CMD_UNKNOWN, 381 MXGEFW_CMD_UNKNOWN = 1,
373 MXGEFW_CMD_ERROR_RANGE, 382 MXGEFW_CMD_ERROR_RANGE = 2,
374 MXGEFW_CMD_ERROR_BUSY, 383 MXGEFW_CMD_ERROR_BUSY = 3,
375 MXGEFW_CMD_ERROR_EMPTY, 384 MXGEFW_CMD_ERROR_EMPTY = 4,
376 MXGEFW_CMD_ERROR_CLOSED, 385 MXGEFW_CMD_ERROR_CLOSED = 5,
377 MXGEFW_CMD_ERROR_HASH_ERROR, 386 MXGEFW_CMD_ERROR_HASH_ERROR = 6,
378 MXGEFW_CMD_ERROR_BAD_PORT, 387 MXGEFW_CMD_ERROR_BAD_PORT = 7,
379 MXGEFW_CMD_ERROR_RESOURCES, 388 MXGEFW_CMD_ERROR_RESOURCES = 8,
380 MXGEFW_CMD_ERROR_MULTICAST, 389 MXGEFW_CMD_ERROR_MULTICAST = 9,
381 MXGEFW_CMD_ERROR_UNALIGNED, 390 MXGEFW_CMD_ERROR_UNALIGNED = 10,
382 MXGEFW_CMD_ERROR_NO_MDIO, 391 MXGEFW_CMD_ERROR_NO_MDIO = 11,
383 MXGEFW_CMD_ERROR_XFP_FAILURE, 392 MXGEFW_CMD_ERROR_I2C_FAILURE = 12,
384 MXGEFW_CMD_ERROR_XFP_ABSENT, 393 MXGEFW_CMD_ERROR_I2C_ABSENT = 13,
385 MXGEFW_CMD_ERROR_BAD_PCIE_LINK 394 MXGEFW_CMD_ERROR_BAD_PCIE_LINK = 14
386}; 395};
387 396
388#define MXGEFW_OLD_IRQ_DATA_LEN 40 397#define MXGEFW_OLD_IRQ_DATA_LEN 40
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
index a8662ea8079a..caa6cbbb631e 100644
--- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -41,6 +41,8 @@ struct mcp_gen_header {
41 unsigned short handoff_id_major; /* must be equal */ 41 unsigned short handoff_id_major; /* must be equal */
42 unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */ 42 unsigned short handoff_id_caps; /* bitfield: new mcp must have superset */
43 unsigned msix_table_addr; /* start address of msix table in firmware */ 43 unsigned msix_table_addr; /* start address of msix table in firmware */
44 unsigned bss_addr; /* start of bss */
45 unsigned features;
44 /* 8 */ 46 /* 8 */
45}; 47};
46 48
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 3ad7589d6a1c..899ed065a147 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -318,13 +318,10 @@ static void myri_is_not_so_happy(struct myri_eth *mp)
318#ifdef DEBUG_HEADER 318#ifdef DEBUG_HEADER
319static void dump_ehdr(struct ethhdr *ehdr) 319static void dump_ehdr(struct ethhdr *ehdr)
320{ 320{
321 DECLARE_MAC_BUF(mac); 321 printk("ehdr[h_dst(%pM)"
322 DECLARE_MAC_BUF(mac2); 322 "h_source(%pM)"
323 printk("ehdr[h_dst(%s)"
324 "h_source(%s)"
325 "h_proto(%04x)]\n", 323 "h_proto(%04x)]\n",
326 print_mac(mac, ehdr->h_dest), print_mac(mac2, ehdr->h_source), 324 ehdr->h_dest, ehdr->h_source, ehdr->h_proto);
327 ehdr->h_proto);
328} 325}
329 326
330static void dump_ehdr_and_myripad(unsigned char *stuff) 327static void dump_ehdr_and_myripad(unsigned char *stuff)
@@ -528,7 +525,6 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
528 DRX(("prot[%04x] netif_rx ", skb->protocol)); 525 DRX(("prot[%04x] netif_rx ", skb->protocol));
529 netif_rx(skb); 526 netif_rx(skb);
530 527
531 dev->last_rx = jiffies;
532 dev->stats.rx_packets++; 528 dev->stats.rx_packets++;
533 dev->stats.rx_bytes += len; 529 dev->stats.rx_bytes += len;
534 next: 530 next:
@@ -540,7 +536,7 @@ static void myri_rx(struct myri_eth *mp, struct net_device *dev)
540static irqreturn_t myri_interrupt(int irq, void *dev_id) 536static irqreturn_t myri_interrupt(int irq, void *dev_id)
541{ 537{
542 struct net_device *dev = (struct net_device *) dev_id; 538 struct net_device *dev = (struct net_device *) dev_id;
543 struct myri_eth *mp = (struct myri_eth *) dev->priv; 539 struct myri_eth *mp = netdev_priv(dev);
544 void __iomem *lregs = mp->lregs; 540 void __iomem *lregs = mp->lregs;
545 struct myri_channel __iomem *chan = &mp->shmem->channel; 541 struct myri_channel __iomem *chan = &mp->shmem->channel;
546 unsigned long flags; 542 unsigned long flags;
@@ -579,14 +575,14 @@ static irqreturn_t myri_interrupt(int irq, void *dev_id)
579 575
580static int myri_open(struct net_device *dev) 576static int myri_open(struct net_device *dev)
581{ 577{
582 struct myri_eth *mp = (struct myri_eth *) dev->priv; 578 struct myri_eth *mp = netdev_priv(dev);
583 579
584 return myri_init(mp, in_interrupt()); 580 return myri_init(mp, in_interrupt());
585} 581}
586 582
587static int myri_close(struct net_device *dev) 583static int myri_close(struct net_device *dev)
588{ 584{
589 struct myri_eth *mp = (struct myri_eth *) dev->priv; 585 struct myri_eth *mp = netdev_priv(dev);
590 586
591 myri_clean_rings(mp); 587 myri_clean_rings(mp);
592 return 0; 588 return 0;
@@ -594,7 +590,7 @@ static int myri_close(struct net_device *dev)
594 590
595static void myri_tx_timeout(struct net_device *dev) 591static void myri_tx_timeout(struct net_device *dev)
596{ 592{
597 struct myri_eth *mp = (struct myri_eth *) dev->priv; 593 struct myri_eth *mp = netdev_priv(dev);
598 594
599 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 595 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
600 596
@@ -605,7 +601,7 @@ static void myri_tx_timeout(struct net_device *dev)
605 601
606static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev) 602static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
607{ 603{
608 struct myri_eth *mp = (struct myri_eth *) dev->priv; 604 struct myri_eth *mp = netdev_priv(dev);
609 struct sendq __iomem *sq = mp->sq; 605 struct sendq __iomem *sq = mp->sq;
610 struct myri_txd __iomem *txd; 606 struct myri_txd __iomem *txd;
611 unsigned long flags; 607 unsigned long flags;
@@ -905,7 +901,6 @@ static int __devinit myri_sbus_probe(struct of_device *op, const struct of_devic
905 struct device_node *dp = op->node; 901 struct device_node *dp = op->node;
906 static unsigned version_printed; 902 static unsigned version_printed;
907 struct net_device *dev; 903 struct net_device *dev;
908 DECLARE_MAC_BUF(mac);
909 struct myri_eth *mp; 904 struct myri_eth *mp;
910 const void *prop; 905 const void *prop;
911 static int num; 906 static int num;
@@ -1088,15 +1083,15 @@ static int __devinit myri_sbus_probe(struct of_device *op, const struct of_devic
1088 1083
1089 num++; 1084 num++;
1090 1085
1091 printk("%s: MyriCOM MyriNET Ethernet %s\n", 1086 printk("%s: MyriCOM MyriNET Ethernet %pM\n",
1092 dev->name, print_mac(mac, dev->dev_addr)); 1087 dev->name, dev->dev_addr);
1093 1088
1094 return 0; 1089 return 0;
1095 1090
1096err_free_irq: 1091err_free_irq:
1097 free_irq(dev->irq, dev); 1092 free_irq(dev->irq, dev);
1098err: 1093err:
1099 /* This will also free the co-allocated 'dev->priv' */ 1094 /* This will also free the co-allocated private data*/
1100 free_netdev(dev); 1095 free_netdev(dev);
1101 return -ENODEV; 1096 return -ENODEV;
1102} 1097}
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index f7fa3944659b..478edb92bca3 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -792,7 +792,6 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
792 const int pcibar = 1; /* PCI base address register */ 792 const int pcibar = 1; /* PCI base address register */
793 int prev_eedata; 793 int prev_eedata;
794 u32 tmp; 794 u32 tmp;
795 DECLARE_MAC_BUF(mac);
796 795
797/* when built into the kernel, we only print version if device is found */ 796/* when built into the kernel, we only print version if device is found */
798#ifndef MODULE 797#ifndef MODULE
@@ -948,10 +947,10 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
948 947
949 if (netif_msg_drv(np)) { 948 if (netif_msg_drv(np)) {
950 printk(KERN_INFO "natsemi %s: %s at %#08llx " 949 printk(KERN_INFO "natsemi %s: %s at %#08llx "
951 "(%s), %s, IRQ %d", 950 "(%s), %pM, IRQ %d",
952 dev->name, natsemi_pci_info[chip_idx].name, 951 dev->name, natsemi_pci_info[chip_idx].name,
953 (unsigned long long)iostart, pci_name(np->pci_dev), 952 (unsigned long long)iostart, pci_name(np->pci_dev),
954 print_mac(mac, dev->dev_addr), irq); 953 dev->dev_addr, irq);
955 if (dev->if_port == PORT_TP) 954 if (dev->if_port == PORT_TP)
956 printk(", port TP.\n"); 955 printk(", port TP.\n");
957 else if (np->ignore_phy) 956 else if (np->ignore_phy)
@@ -2194,10 +2193,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
2194 2193
2195 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]); 2194 prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2196 2195
2197 if (netif_rx_schedule_prep(dev, &np->napi)) { 2196 if (netif_rx_schedule_prep(&np->napi)) {
2198 /* Disable interrupts and register for poll */ 2197 /* Disable interrupts and register for poll */
2199 natsemi_irq_disable(dev); 2198 natsemi_irq_disable(dev);
2200 __netif_rx_schedule(dev, &np->napi); 2199 __netif_rx_schedule(&np->napi);
2201 } else 2200 } else
2202 printk(KERN_WARNING 2201 printk(KERN_WARNING
2203 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n", 2202 "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
@@ -2249,7 +2248,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget)
2249 np->intr_status = readl(ioaddr + IntrStatus); 2248 np->intr_status = readl(ioaddr + IntrStatus);
2250 } while (np->intr_status); 2249 } while (np->intr_status);
2251 2250
2252 netif_rx_complete(dev, napi); 2251 netif_rx_complete(napi);
2253 2252
2254 /* Reenable interrupts providing nothing is trying to shut 2253 /* Reenable interrupts providing nothing is trying to shut
2255 * the chip down. */ 2254 * the chip down. */
@@ -2362,7 +2361,6 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2362 } 2361 }
2363 skb->protocol = eth_type_trans(skb, dev); 2362 skb->protocol = eth_type_trans(skb, dev);
2364 netif_receive_skb(skb); 2363 netif_receive_skb(skb);
2365 dev->last_rx = jiffies;
2366 np->stats.rx_packets++; 2364 np->stats.rx_packets++;
2367 np->stats.rx_bytes += pkt_len; 2365 np->stats.rx_bytes += pkt_len;
2368 } 2366 }
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index fbc7531d3c7d..b57239171046 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev)
167#ifndef MODULE 167#ifndef MODULE
168struct net_device * __init ne_probe(int unit) 168struct net_device * __init ne_probe(int unit)
169{ 169{
170 struct net_device *dev = ____alloc_ei_netdev(0); 170 struct net_device *dev = alloc_ei_netdev();
171 int err; 171 int err;
172 172
173 if (!dev) 173 if (!dev)
@@ -193,6 +193,21 @@ out:
193} 193}
194#endif 194#endif
195 195
196static const struct net_device_ops ne_netdev_ops = {
197 .ndo_open = ne_open,
198 .ndo_stop = ne_close,
199
200 .ndo_start_xmit = ei_start_xmit,
201 .ndo_tx_timeout = ei_tx_timeout,
202 .ndo_get_stats = ei_get_stats,
203 .ndo_set_multicast_list = ei_set_multicast_list,
204 .ndo_validate_addr = eth_validate_addr,
205 .ndo_change_mtu = eth_change_mtu,
206#ifdef CONFIG_NET_POLL_CONTROLLER
207 .ndo_poll_controller = ei_poll,
208#endif
209};
210
196static int __init ne_probe1(struct net_device *dev, int ioaddr) 211static int __init ne_probe1(struct net_device *dev, int ioaddr)
197{ 212{
198 int i; 213 int i;
@@ -204,7 +219,6 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
204 static unsigned version_printed; 219 static unsigned version_printed;
205 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 220 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
206 unsigned char bus_width; 221 unsigned char bus_width;
207 DECLARE_MAC_BUF(mac);
208 222
209 if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) 223 if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
210 return -EBUSY; 224 return -EBUSY;
@@ -299,7 +313,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
299 313
300 for(i = 0; i < ETHER_ADDR_LEN; i++) 314 for(i = 0; i < ETHER_ADDR_LEN; i++)
301 dev->dev_addr[i] = SA_prom[i]; 315 dev->dev_addr[i] = SA_prom[i];
302 printk(" %s\n", print_mac(mac, dev->dev_addr)); 316 printk(" %pM\n", dev->dev_addr);
303 317
304 printk("%s: %s found at %#x, using IRQ %d.\n", 318 printk("%s: %s found at %#x, using IRQ %d.\n",
305 dev->name, name, ioaddr, dev->irq); 319 dev->name, name, ioaddr, dev->irq);
@@ -320,11 +334,9 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
320 ei_status.block_output = &ne_block_output; 334 ei_status.block_output = &ne_block_output;
321 ei_status.get_8390_hdr = &ne_get_8390_hdr; 335 ei_status.get_8390_hdr = &ne_get_8390_hdr;
322 ei_status.priv = 0; 336 ei_status.priv = 0;
323 dev->open = &ne_open; 337
324 dev->stop = &ne_close; 338 dev->netdev_ops = &ne_netdev_ops;
325#ifdef CONFIG_NET_POLL_CONTROLLER 339
326 dev->poll_controller = __ei_poll;
327#endif
328 __NS8390_init(dev, 0); 340 __NS8390_init(dev, 0);
329 341
330 ret = register_netdev(dev); 342 ret = register_netdev(dev);
@@ -625,7 +637,7 @@ int init_module(void)
625 int err; 637 int err;
626 638
627 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { 639 for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
628 struct net_device *dev = ____alloc_ei_netdev(0); 640 struct net_device *dev = alloc_ei_netdev();
629 if (!dev) 641 if (!dev)
630 break; 642 break;
631 if (io[this_dev]) { 643 if (io[this_dev]) {
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index eb681c0d51ba..5c3e242428f1 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -174,9 +174,6 @@ bad_clone_list[] __initdata = {
174static int ne_probe1(struct net_device *dev, unsigned long ioaddr); 174static int ne_probe1(struct net_device *dev, unsigned long ioaddr);
175static int ne_probe_isapnp(struct net_device *dev); 175static int ne_probe_isapnp(struct net_device *dev);
176 176
177static int ne_open(struct net_device *dev);
178static int ne_close(struct net_device *dev);
179
180static void ne_reset_8390(struct net_device *dev); 177static void ne_reset_8390(struct net_device *dev);
181static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, 178static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
182 int ring_page); 179 int ring_page);
@@ -297,7 +294,6 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
297 int neX000, ctron, copam, bad_card; 294 int neX000, ctron, copam, bad_card;
298 int reg0, ret; 295 int reg0, ret;
299 static unsigned version_printed; 296 static unsigned version_printed;
300 DECLARE_MAC_BUF(mac);
301 297
302 if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) 298 if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
303 return -EBUSY; 299 return -EBUSY;
@@ -517,7 +513,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
517 } 513 }
518#endif 514#endif
519 515
520 printk("%s\n", print_mac(mac, dev->dev_addr)); 516 printk("%pM\n", dev->dev_addr);
521 517
522 ei_status.name = name; 518 ei_status.name = name;
523 ei_status.tx_start_page = start_page; 519 ei_status.tx_start_page = start_page;
@@ -537,11 +533,8 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
537 ei_status.block_output = &ne_block_output; 533 ei_status.block_output = &ne_block_output;
538 ei_status.get_8390_hdr = &ne_get_8390_hdr; 534 ei_status.get_8390_hdr = &ne_get_8390_hdr;
539 ei_status.priv = 0; 535 ei_status.priv = 0;
540 dev->open = &ne_open; 536
541 dev->stop = &ne_close; 537 dev->netdev_ops = &eip_netdev_ops;
542#ifdef CONFIG_NET_POLL_CONTROLLER
543 dev->poll_controller = eip_poll;
544#endif
545 NS8390p_init(dev, 0); 538 NS8390p_init(dev, 0);
546 539
547 ret = register_netdev(dev); 540 ret = register_netdev(dev);
@@ -558,20 +551,6 @@ err_out:
558 return ret; 551 return ret;
559} 552}
560 553
561static int ne_open(struct net_device *dev)
562{
563 eip_open(dev);
564 return 0;
565}
566
567static int ne_close(struct net_device *dev)
568{
569 if (ei_debug > 1)
570 printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
571 eip_close(dev);
572 return 0;
573}
574
575/* Hard reset the card. This used to pause for the same period that a 554/* Hard reset the card. This used to pause for the same period that a
576 8390 reset command required, but that shouldn't be necessary. */ 555 8390 reset command required, but that shouldn't be necessary. */
577 556
@@ -950,7 +929,7 @@ static void __init ne_add_devices(void)
950} 929}
951 930
952#ifdef MODULE 931#ifdef MODULE
953int __init init_module() 932int __init init_module(void)
954{ 933{
955 int retval; 934 int retval;
956 ne_add_devices(); 935 ne_add_devices();
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index 332df75a9ab6..a53bb201d3c7 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -137,9 +137,6 @@ extern int netcard_probe(struct net_device *dev);
137 137
138static int ne2_probe1(struct net_device *dev, int slot); 138static int ne2_probe1(struct net_device *dev, int slot);
139 139
140static int ne_open(struct net_device *dev);
141static int ne_close(struct net_device *dev);
142
143static void ne_reset_8390(struct net_device *dev); 140static void ne_reset_8390(struct net_device *dev);
144static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, 141static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
145 int ring_page); 142 int ring_page);
@@ -302,7 +299,6 @@ out:
302static int ne2_procinfo(char *buf, int slot, struct net_device *dev) 299static int ne2_procinfo(char *buf, int slot, struct net_device *dev)
303{ 300{
304 int len=0; 301 int len=0;
305 DECLARE_MAC_BUF(mac);
306 302
307 len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" ); 303 len += sprintf(buf+len, "The NE/2 Ethernet Adapter\n" );
308 len += sprintf(buf+len, "Driver written by Wim Dumon "); 304 len += sprintf(buf+len, "Driver written by Wim Dumon ");
@@ -313,7 +309,7 @@ static int ne2_procinfo(char *buf, int slot, struct net_device *dev)
313 len += sprintf(buf+len, "Based on the original NE2000 drivers\n" ); 309 len += sprintf(buf+len, "Based on the original NE2000 drivers\n" );
314 len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr); 310 len += sprintf(buf+len, "Base IO: %#x\n", (unsigned int)dev->base_addr);
315 len += sprintf(buf+len, "IRQ : %d\n", dev->irq); 311 len += sprintf(buf+len, "IRQ : %d\n", dev->irq);
316 len += sprintf(buf+len, "HW addr : %s\n", print_mac(mac, dev->dev_addr)); 312 len += sprintf(buf+len, "HW addr : %pM\n", dev->dev_addr);
317 313
318 return len; 314 return len;
319} 315}
@@ -326,7 +322,6 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
326 const char *name = "NE/2"; 322 const char *name = "NE/2";
327 int start_page, stop_page; 323 int start_page, stop_page;
328 static unsigned version_printed; 324 static unsigned version_printed;
329 DECLARE_MAC_BUF(mac);
330 325
331 if (ei_debug && version_printed++ == 0) 326 if (ei_debug && version_printed++ == 0)
332 printk(version); 327 printk(version);
@@ -469,7 +464,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
469 for(i = 0; i < ETHER_ADDR_LEN; i++) 464 for(i = 0; i < ETHER_ADDR_LEN; i++)
470 dev->dev_addr[i] = SA_prom[i]; 465 dev->dev_addr[i] = SA_prom[i];
471 466
472 printk(" %s\n", print_mac(mac, dev->dev_addr)); 467 printk(" %pM\n", dev->dev_addr);
473 468
474 printk("%s: %s found at %#x, using IRQ %d.\n", 469 printk("%s: %s found at %#x, using IRQ %d.\n",
475 dev->name, name, base_addr, dev->irq); 470 dev->name, name, base_addr, dev->irq);
@@ -494,11 +489,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
494 489
495 ei_status.priv = slot; 490 ei_status.priv = slot;
496 491
497 dev->open = &ne_open; 492 dev->netdev_ops = &eip_netdev_ops;
498 dev->stop = &ne_close;
499#ifdef CONFIG_NET_POLL_CONTROLLER
500 dev->poll_controller = eip_poll;
501#endif
502 NS8390p_init(dev, 0); 493 NS8390p_init(dev, 0);
503 494
504 retval = register_netdev(dev); 495 retval = register_netdev(dev);
@@ -513,20 +504,6 @@ out:
513 return retval; 504 return retval;
514} 505}
515 506
516static int ne_open(struct net_device *dev)
517{
518 eip_open(dev);
519 return 0;
520}
521
522static int ne_close(struct net_device *dev)
523{
524 if (ei_debug > 1)
525 printk("%s: Shutting down ethercard.\n", dev->name);
526 eip_close(dev);
527 return 0;
528}
529
530/* Hard reset the card. This used to pause for the same period that a 507/* Hard reset the card. This used to pause for the same period that a
531 8390 reset command required, but that shouldn't be necessary. */ 508 8390 reset command required, but that shouldn't be necessary. */
532static void ne_reset_8390(struct net_device *dev) 509static void ne_reset_8390(struct net_device *dev)
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index de0de744a8fa..62f20ba211cb 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -200,6 +200,19 @@ struct ne2k_pci_card {
200 in the 'dev' and 'ei_status' structures. 200 in the 'dev' and 'ei_status' structures.
201*/ 201*/
202 202
203static const struct net_device_ops ne2k_netdev_ops = {
204 .ndo_open = ne2k_pci_open,
205 .ndo_stop = ne2k_pci_close,
206 .ndo_start_xmit = ei_start_xmit,
207 .ndo_tx_timeout = ei_tx_timeout,
208 .ndo_get_stats = ei_get_stats,
209 .ndo_set_multicast_list = ei_set_multicast_list,
210 .ndo_validate_addr = eth_validate_addr,
211 .ndo_change_mtu = eth_change_mtu,
212#ifdef CONFIG_NET_POLL_CONTROLLER
213 .ndo_poll_controller = ei_poll,
214#endif
215};
203 216
204static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, 217static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
205 const struct pci_device_id *ent) 218 const struct pci_device_id *ent)
@@ -212,7 +225,6 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
212 static unsigned int fnd_cnt; 225 static unsigned int fnd_cnt;
213 long ioaddr; 226 long ioaddr;
214 int flags = pci_clone_list[chip_idx].flags; 227 int flags = pci_clone_list[chip_idx].flags;
215 DECLARE_MAC_BUF(mac);
216 228
217/* when built into the kernel, we only print version if device is found */ 229/* when built into the kernel, we only print version if device is found */
218#ifndef MODULE 230#ifndef MODULE
@@ -266,6 +278,8 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
266 dev_err(&pdev->dev, "cannot allocate ethernet device\n"); 278 dev_err(&pdev->dev, "cannot allocate ethernet device\n");
267 goto err_out_free_res; 279 goto err_out_free_res;
268 } 280 }
281 dev->netdev_ops = &ne2k_netdev_ops;
282
269 SET_NETDEV_DEV(dev, &pdev->dev); 283 SET_NETDEV_DEV(dev, &pdev->dev);
270 284
271 /* Reset card. Who knows what dain-bramaged state it was left in. */ 285 /* Reset card. Who knows what dain-bramaged state it was left in. */
@@ -354,12 +368,8 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
354 ei_status.block_output = &ne2k_pci_block_output; 368 ei_status.block_output = &ne2k_pci_block_output;
355 ei_status.get_8390_hdr = &ne2k_pci_get_8390_hdr; 369 ei_status.get_8390_hdr = &ne2k_pci_get_8390_hdr;
356 ei_status.priv = (unsigned long) pdev; 370 ei_status.priv = (unsigned long) pdev;
357 dev->open = &ne2k_pci_open; 371
358 dev->stop = &ne2k_pci_close;
359 dev->ethtool_ops = &ne2k_pci_ethtool_ops; 372 dev->ethtool_ops = &ne2k_pci_ethtool_ops;
360#ifdef CONFIG_NET_POLL_CONTROLLER
361 dev->poll_controller = ei_poll;
362#endif
363 NS8390_init(dev, 0); 373 NS8390_init(dev, 0);
364 374
365 i = register_netdev(dev); 375 i = register_netdev(dev);
@@ -368,9 +378,9 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
368 378
369 for(i = 0; i < 6; i++) 379 for(i = 0; i < 6; i++)
370 dev->dev_addr[i] = SA_prom[i]; 380 dev->dev_addr[i] = SA_prom[i];
371 printk("%s: %s found at %#lx, IRQ %d, %s.\n", 381 printk("%s: %s found at %#lx, IRQ %d, %pM.\n",
372 dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq, 382 dev->name, pci_clone_list[chip_idx].name, ioaddr, dev->irq,
373 print_mac(mac, dev->dev_addr)); 383 dev->dev_addr);
374 384
375 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 385 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
376 386
@@ -626,7 +636,7 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
626static void ne2k_pci_get_drvinfo(struct net_device *dev, 636static void ne2k_pci_get_drvinfo(struct net_device *dev,
627 struct ethtool_drvinfo *info) 637 struct ethtool_drvinfo *info)
628{ 638{
629 struct ei_device *ei = dev->priv; 639 struct ei_device *ei = netdev_priv(dev);
630 struct pci_dev *pci_dev = (struct pci_dev *) ei->priv; 640 struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
631 641
632 strcpy(info->driver, DRV_NAME); 642 strcpy(info->driver, DRV_NAME);
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index 425043a88db9..fac43fd6fc87 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -45,9 +45,6 @@
45 45
46#define DRV_NAME "ne3210" 46#define DRV_NAME "ne3210"
47 47
48static int ne3210_open(struct net_device *dev);
49static int ne3210_close(struct net_device *dev);
50
51static void ne3210_reset_8390(struct net_device *dev); 48static void ne3210_reset_8390(struct net_device *dev);
52 49
53static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); 50static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
@@ -99,7 +96,6 @@ static int __init ne3210_eisa_probe (struct device *device)
99 int i, retval, port_index; 96 int i, retval, port_index;
100 struct eisa_device *edev = to_eisa_device (device); 97 struct eisa_device *edev = to_eisa_device (device);
101 struct net_device *dev; 98 struct net_device *dev;
102 DECLARE_MAC_BUF(mac);
103 99
104 /* Allocate dev->priv and fill in 8390 specific dev fields. */ 100 /* Allocate dev->priv and fill in 8390 specific dev fields. */
105 if (!(dev = alloc_ei_netdev ())) { 101 if (!(dev = alloc_ei_netdev ())) {
@@ -131,8 +127,8 @@ static int __init ne3210_eisa_probe (struct device *device)
131 port_index = inb(ioaddr + NE3210_CFG2) >> 6; 127 port_index = inb(ioaddr + NE3210_CFG2) >> 6;
132 for(i = 0; i < ETHER_ADDR_LEN; i++) 128 for(i = 0; i < ETHER_ADDR_LEN; i++)
133 dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i); 129 dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
134 printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %s.\n", 130 printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
135 edev->slot, ifmap[port_index], print_mac(mac, dev->dev_addr)); 131 edev->slot, ifmap[port_index], dev->dev_addr);
136 132
137 /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ 133 /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */
138 dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07]; 134 dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07];
@@ -200,11 +196,8 @@ static int __init ne3210_eisa_probe (struct device *device)
200 ei_status.block_output = &ne3210_block_output; 196 ei_status.block_output = &ne3210_block_output;
201 ei_status.get_8390_hdr = &ne3210_get_8390_hdr; 197 ei_status.get_8390_hdr = &ne3210_get_8390_hdr;
202 198
203 dev->open = &ne3210_open; 199 dev->netdev_ops = &ei_netdev_ops;
204 dev->stop = &ne3210_close; 200
205#ifdef CONFIG_NET_POLL_CONTROLLER
206 dev->poll_controller = ei_poll;
207#endif
208 dev->if_port = ifmap_val[port_index]; 201 dev->if_port = ifmap_val[port_index];
209 202
210 if ((retval = register_netdev (dev))) 203 if ((retval = register_netdev (dev)))
@@ -321,22 +314,6 @@ static void ne3210_block_output(struct net_device *dev, int count,
321 memcpy_toio(shmem, buf, count); 314 memcpy_toio(shmem, buf, count);
322} 315}
323 316
324static int ne3210_open(struct net_device *dev)
325{
326 ei_open(dev);
327 return 0;
328}
329
330static int ne3210_close(struct net_device *dev)
331{
332
333 if (ei_debug > 1)
334 printk("%s: Shutting down ethercard.\n", dev->name);
335
336 ei_close(dev);
337 return 0;
338}
339
340static struct eisa_device_id ne3210_ids[] = { 317static struct eisa_device_id ne3210_ids[] = {
341 { "EGL0101" }, 318 { "EGL0101" },
342 { "NVL1801" }, 319 { "NVL1801" },
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 9681618c3232..d304d38cd5d1 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -307,17 +307,14 @@ static ssize_t show_remote_ip(struct netconsole_target *nt, char *buf)
307static ssize_t show_local_mac(struct netconsole_target *nt, char *buf) 307static ssize_t show_local_mac(struct netconsole_target *nt, char *buf)
308{ 308{
309 struct net_device *dev = nt->np.dev; 309 struct net_device *dev = nt->np.dev;
310 static const u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
310 311
311 DECLARE_MAC_BUF(mac); 312 return snprintf(buf, PAGE_SIZE, "%pM\n", dev ? dev->dev_addr : bcast);
312 return snprintf(buf, PAGE_SIZE, "%s\n", dev ?
313 print_mac(mac, dev->dev_addr) : "ff:ff:ff:ff:ff:ff");
314} 313}
315 314
316static ssize_t show_remote_mac(struct netconsole_target *nt, char *buf) 315static ssize_t show_remote_mac(struct netconsole_target *nt, char *buf)
317{ 316{
318 DECLARE_MAC_BUF(mac); 317 return snprintf(buf, PAGE_SIZE, "%pM\n", nt->np.remote_mac);
319 return snprintf(buf, PAGE_SIZE, "%s\n",
320 print_mac(mac, nt->np.remote_mac));
321} 318}
322 319
323/* 320/*
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index b289a0a2b945..1861d5bbd96b 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -165,7 +165,6 @@ static void netx_eth_receive(struct net_device *ndev)
165 pfifo_push(EMPTY_PTR_FIFO(priv->id), 165 pfifo_push(EMPTY_PTR_FIFO(priv->id),
166 FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno)); 166 FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno));
167 167
168 ndev->last_rx = jiffies;
169 skb->protocol = eth_type_trans(skb, ndev); 168 skb->protocol = eth_type_trans(skb, ndev);
170 netif_rx(skb); 169 netif_rx(skb);
171 ndev->stats.rx_packets++; 170 ndev->stats.rx_packets++;
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index b974ca0fc530..e45ce2951729 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -275,11 +275,11 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
275 } else 275 } else
276 return -EOPNOTSUPP; 276 return -EOPNOTSUPP;
277 277
278 if (netif_running(dev)) { 278 if (!netif_running(dev))
279 dev->stop(dev); 279 return 0;
280 dev->open(dev); 280
281 } 281 dev->netdev_ops->ndo_stop(dev);
282 return 0; 282 return dev->netdev_ops->ndo_open(dev);
283} 283}
284 284
285static int netxen_nic_get_regs_len(struct net_device *dev) 285static int netxen_nic_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 84978f80f396..aa6e603bfcbf 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -537,7 +537,7 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter,
537static int nx_p3_sre_macaddr_change(struct net_device *dev, 537static int nx_p3_sre_macaddr_change(struct net_device *dev,
538 u8 *addr, unsigned op) 538 u8 *addr, unsigned op)
539{ 539{
540 struct netxen_adapter *adapter = (struct netxen_adapter *)dev->priv; 540 struct netxen_adapter *adapter = netdev_priv(dev);
541 nx_nic_req_t req; 541 nx_nic_req_t req;
542 nx_mac_req_t mac_req; 542 nx_mac_req_t mac_req;
543 int rv; 543 int rv;
@@ -1459,7 +1459,7 @@ static int netxen_nic_pci_mem_read_direct(struct netxen_adapter *adapter,
1459 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); 1459 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
1460 else 1460 else
1461 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); 1461 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
1462 if (mem_ptr == 0UL) { 1462 if (mem_ptr == NULL) {
1463 *(uint8_t *)data = 0; 1463 *(uint8_t *)data = 0;
1464 return -1; 1464 return -1;
1465 } 1465 }
@@ -1533,7 +1533,7 @@ netxen_nic_pci_mem_write_direct(struct netxen_adapter *adapter, u64 off,
1533 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); 1533 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
1534 else 1534 else
1535 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); 1535 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
1536 if (mem_ptr == 0UL) 1536 if (mem_ptr == NULL)
1537 return -1; 1537 return -1;
1538 addr = mem_ptr; 1538 addr = mem_ptr;
1539 addr += start & (PAGE_SIZE - 1); 1539 addr += start & (PAGE_SIZE - 1);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 5bba675d0504..d924468e506e 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1285,9 +1285,7 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
1285 } 1285 }
1286 adapter->stats.rxdropped++; 1286 adapter->stats.rxdropped++;
1287 } else { 1287 } else {
1288
1289 netif_receive_skb(skb); 1288 netif_receive_skb(skb);
1290 netdev->last_rx = jiffies;
1291 1289
1292 adapter->stats.no_rcv++; 1290 adapter->stats.no_rcv++;
1293 adapter->stats.rxbytes += length; 1291 adapter->stats.rxbytes += length;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 6ef3f0d84bcf..ba01524b5531 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -439,7 +439,6 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
439 int i; 439 int i;
440 unsigned char *p; 440 unsigned char *p;
441 __le64 mac_addr; 441 __le64 mac_addr;
442 DECLARE_MAC_BUF(mac);
443 struct net_device *netdev = adapter->netdev; 442 struct net_device *netdev = adapter->netdev;
444 struct pci_dev *pdev = adapter->pdev; 443 struct pci_dev *pdev = adapter->pdev;
445 444
@@ -462,15 +461,39 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
462 461
463 /* set station address */ 462 /* set station address */
464 463
465 if (!is_valid_ether_addr(netdev->perm_addr)) { 464 if (!is_valid_ether_addr(netdev->perm_addr))
466 dev_warn(&pdev->dev, "Bad MAC address %s.\n", 465 dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
467 print_mac(mac, netdev->dev_addr)); 466 else
468 } else
469 adapter->macaddr_set(adapter, netdev->dev_addr); 467 adapter->macaddr_set(adapter, netdev->dev_addr);
470 468
471 return 0; 469 return 0;
472} 470}
473 471
472static void netxen_set_multicast_list(struct net_device *dev)
473{
474 struct netxen_adapter *adapter = netdev_priv(dev);
475
476 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
477 netxen_p3_nic_set_multi(dev);
478 else
479 netxen_p2_nic_set_multi(dev);
480}
481
482static const struct net_device_ops netxen_netdev_ops = {
483 .ndo_open = netxen_nic_open,
484 .ndo_stop = netxen_nic_close,
485 .ndo_start_xmit = netxen_nic_xmit_frame,
486 .ndo_get_stats = netxen_nic_get_stats,
487 .ndo_validate_addr = eth_validate_addr,
488 .ndo_set_multicast_list = netxen_set_multicast_list,
489 .ndo_set_mac_address = netxen_nic_set_mac,
490 .ndo_change_mtu = netxen_nic_change_mtu,
491 .ndo_tx_timeout = netxen_tx_timeout,
492#ifdef CONFIG_NET_POLL_CONTROLLER
493 .ndo_poll_controller = netxen_nic_poll_controller,
494#endif
495};
496
474/* 497/*
475 * netxen_nic_probe() 498 * netxen_nic_probe()
476 * 499 *
@@ -543,7 +566,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
543 566
544 SET_NETDEV_DEV(netdev, &pdev->dev); 567 SET_NETDEV_DEV(netdev, &pdev->dev);
545 568
546 adapter = netdev->priv; 569 adapter = netdev_priv(netdev);
547 adapter->netdev = netdev; 570 adapter->netdev = netdev;
548 adapter->pdev = pdev; 571 adapter->pdev = pdev;
549 adapter->ahw.pci_func = pci_func_id; 572 adapter->ahw.pci_func = pci_func_id;
@@ -682,25 +705,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
682 else 705 else
683 adapter->max_mc_count = 16; 706 adapter->max_mc_count = 16;
684 707
685 netdev->open = netxen_nic_open; 708 netdev->netdev_ops = &netxen_netdev_ops;
686 netdev->stop = netxen_nic_close;
687 netdev->hard_start_xmit = netxen_nic_xmit_frame;
688 netdev->get_stats = netxen_nic_get_stats;
689 if (NX_IS_REVISION_P3(revision_id))
690 netdev->set_multicast_list = netxen_p3_nic_set_multi;
691 else
692 netdev->set_multicast_list = netxen_p2_nic_set_multi;
693 netdev->set_mac_address = netxen_nic_set_mac;
694 netdev->change_mtu = netxen_nic_change_mtu;
695 netdev->tx_timeout = netxen_tx_timeout;
696 netdev->watchdog_timeo = 2*HZ; 709 netdev->watchdog_timeo = 2*HZ;
697 710
698 netxen_nic_change_mtu(netdev, netdev->mtu); 711 netxen_nic_change_mtu(netdev, netdev->mtu);
699 712
700 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); 713 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
701#ifdef CONFIG_NET_POLL_CONTROLLER 714
702 netdev->poll_controller = netxen_nic_poll_controller;
703#endif
704 /* ScatterGather support */ 715 /* ScatterGather support */
705 netdev->features = NETIF_F_SG; 716 netdev->features = NETIF_F_SG;
706 netdev->features |= NETIF_F_IP_CSUM; 717 netdev->features |= NETIF_F_IP_CSUM;
@@ -988,7 +999,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
988 */ 999 */
989static int netxen_nic_open(struct net_device *netdev) 1000static int netxen_nic_open(struct net_device *netdev)
990{ 1001{
991 struct netxen_adapter *adapter = (struct netxen_adapter *)netdev->priv; 1002 struct netxen_adapter *adapter = netdev_priv(netdev);
992 int err = 0; 1003 int err = 0;
993 int ctx, ring; 1004 int ctx, ring;
994 irq_handler_t handler; 1005 irq_handler_t handler;
@@ -1077,7 +1088,7 @@ static int netxen_nic_open(struct net_device *netdev)
1077 1088
1078 netxen_nic_set_link_parameters(adapter); 1089 netxen_nic_set_link_parameters(adapter);
1079 1090
1080 netdev->set_multicast_list(netdev); 1091 netxen_set_multicast_list(netdev);
1081 if (adapter->set_mtu) 1092 if (adapter->set_mtu)
1082 adapter->set_mtu(adapter, netdev->mtu); 1093 adapter->set_mtu(adapter, netdev->mtu);
1083 1094
@@ -1572,7 +1583,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
1572 } 1583 }
1573 1584
1574 if ((work_done < budget) && tx_complete) { 1585 if ((work_done < budget) && tx_complete) {
1575 netif_rx_complete(adapter->netdev, &adapter->napi); 1586 netif_rx_complete(&adapter->napi);
1576 netxen_nic_enable_int(adapter); 1587 netxen_nic_enable_int(adapter);
1577 } 1588 }
1578 1589
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index 27f07f6a45b1..c3b9c83b32fe 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -608,7 +608,6 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
608 int phy = adapter->physical_port; 608 int phy = adapter->physical_port;
609 unsigned char mac_addr[6]; 609 unsigned char mac_addr[6];
610 int i; 610 int i;
611 DECLARE_MAC_BUF(mac);
612 611
613 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 612 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
614 return 0; 613 return 0;
@@ -636,10 +635,8 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter,
636 if (i == 10) { 635 if (i == 10) {
637 printk(KERN_ERR "%s: cannot set Mac addr for %s\n", 636 printk(KERN_ERR "%s: cannot set Mac addr for %s\n",
638 netxen_nic_driver_name, adapter->netdev->name); 637 netxen_nic_driver_name, adapter->netdev->name);
639 printk(KERN_ERR "MAC address set: %s.\n", 638 printk(KERN_ERR "MAC address set: %pM.\n", addr);
640 print_mac(mac, addr)); 639 printk(KERN_ERR "MAC address get: %pM.\n", mac_addr);
641 printk(KERN_ERR "MAC address get: %s.\n",
642 print_mac(mac, mac_addr));
643 } 640 }
644 return 0; 641 return 0;
645} 642}
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 8e0ca9f4e404..539e18ab485c 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -203,7 +203,6 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
203 unsigned int data = 0; 203 unsigned int data = 0;
204 int boguscount = 40; 204 int boguscount = 40;
205 int err = -ENODEV; 205 int err = -ENODEV;
206 DECLARE_MAC_BUF(mac);
207 206
208 dev->base_addr = ioaddr; 207 dev->base_addr = ioaddr;
209 dev->irq = irq; 208 dev->irq = irq;
@@ -271,7 +270,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
271 outw(i, IE_GP); 270 outw(i, IE_GP);
272 dev->dev_addr[i] = inb(IE_SAPROM); 271 dev->dev_addr[i] = inb(IE_SAPROM);
273 } 272 }
274 printk("%s ", print_mac(mac, dev->dev_addr)); 273 printk("%pM ", dev->dev_addr);
275 274
276 PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name)); 275 PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name));
277 276
@@ -329,7 +328,7 @@ static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
329 outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */ 328 outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */
330 } 329 }
331 printk("-> bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE); 330 printk("-> bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE);
332 memset(dev->priv, 0, sizeof(struct ni5010_local)); 331 memset(netdev_priv(dev), 0, sizeof(struct ni5010_local));
333 332
334 dev->open = ni5010_open; 333 dev->open = ni5010_open;
335 dev->stop = ni5010_close; 334 dev->stop = ni5010_close;
@@ -570,7 +569,6 @@ static void ni5010_rx(struct net_device *dev)
570 569
571 skb->protocol = eth_type_trans(skb,dev); 570 skb->protocol = eth_type_trans(skb,dev);
572 netif_rx(skb); 571 netif_rx(skb);
573 dev->last_rx = jiffies;
574 dev->stats.rx_packets++; 572 dev->stats.rx_packets++;
575 dev->stats.rx_bytes += i_pkt_size; 573 dev->stats.rx_bytes += i_pkt_size;
576 574
@@ -768,12 +766,3 @@ module_init(ni5010_init_module);
768module_exit(ni5010_cleanup_module); 766module_exit(ni5010_cleanup_module);
769#endif /* MODULE */ 767#endif /* MODULE */
770MODULE_LICENSE("GPL"); 768MODULE_LICENSE("GPL");
771
772/*
773 * Local variables:
774 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c ni5010.c"
775 * version-control: t
776 * kept-new-versions: 5
777 * tab-width: 4
778 * End:
779 */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index b9a882d362da..a8bcc00c3302 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -9,8 +9,6 @@
9 * [feel free to mail ....] 9 * [feel free to mail ....]
10 * 10 *
11 * when using as module: (no autoprobing!) 11 * when using as module: (no autoprobing!)
12 * compile with:
13 * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ -DMODULE -c ni52.c
14 * run with e.g: 12 * run with e.g:
15 * insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000 13 * insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000
16 * 14 *
@@ -214,7 +212,7 @@ struct priv {
214/* wait for command with timeout: */ 212/* wait for command with timeout: */
215static void wait_for_scb_cmd(struct net_device *dev) 213static void wait_for_scb_cmd(struct net_device *dev)
216{ 214{
217 struct priv *p = dev->priv; 215 struct priv *p = netdev_priv(dev);
218 int i; 216 int i;
219 for (i = 0; i < 16384; i++) { 217 for (i = 0; i < 16384; i++) {
220 if (readb(&p->scb->cmd_cuc) == 0) 218 if (readb(&p->scb->cmd_cuc) == 0)
@@ -233,7 +231,7 @@ static void wait_for_scb_cmd(struct net_device *dev)
233 231
234static void wait_for_scb_cmd_ruc(struct net_device *dev) 232static void wait_for_scb_cmd_ruc(struct net_device *dev)
235{ 233{
236 struct priv *p = dev->priv; 234 struct priv *p = netdev_priv(dev);
237 int i; 235 int i;
238 for (i = 0; i < 16384; i++) { 236 for (i = 0; i < 16384; i++) {
239 if (readb(&p->scb->cmd_ruc) == 0) 237 if (readb(&p->scb->cmd_ruc) == 0)
@@ -298,7 +296,7 @@ static int ni52_open(struct net_device *dev)
298static int check_iscp(struct net_device *dev, void __iomem *addr) 296static int check_iscp(struct net_device *dev, void __iomem *addr)
299{ 297{
300 struct iscp_struct __iomem *iscp = addr; 298 struct iscp_struct __iomem *iscp = addr;
301 struct priv *p = dev->priv; 299 struct priv *p = netdev_priv(dev);
302 memset_io(iscp, 0, sizeof(struct iscp_struct)); 300 memset_io(iscp, 0, sizeof(struct iscp_struct));
303 301
304 writel(make24(iscp), &p->scp->iscp); 302 writel(make24(iscp), &p->scp->iscp);
@@ -318,7 +316,7 @@ static int check_iscp(struct net_device *dev, void __iomem *addr)
318 */ 316 */
319static int check586(struct net_device *dev, unsigned size) 317static int check586(struct net_device *dev, unsigned size)
320{ 318{
321 struct priv *p = dev->priv; 319 struct priv *p = netdev_priv(dev);
322 int i; 320 int i;
323 321
324 p->mapped = ioremap(dev->mem_start, size); 322 p->mapped = ioremap(dev->mem_start, size);
@@ -354,7 +352,7 @@ Enodev:
354 */ 352 */
355static void alloc586(struct net_device *dev) 353static void alloc586(struct net_device *dev)
356{ 354{
357 struct priv *p = (struct priv *) dev->priv; 355 struct priv *p = netdev_priv(dev);
358 356
359 ni_reset586(); 357 ni_reset586();
360 mdelay(32); 358 mdelay(32);
@@ -400,7 +398,7 @@ struct net_device * __init ni52_probe(int unit)
400 if (!dev) 398 if (!dev)
401 return ERR_PTR(-ENOMEM); 399 return ERR_PTR(-ENOMEM);
402 400
403 p = dev->priv; 401 p = netdev_priv(dev);
404 402
405 if (unit >= 0) { 403 if (unit >= 0) {
406 sprintf(dev->name, "eth%d", unit); 404 sprintf(dev->name, "eth%d", unit);
@@ -446,7 +444,7 @@ out:
446static int __init ni52_probe1(struct net_device *dev, int ioaddr) 444static int __init ni52_probe1(struct net_device *dev, int ioaddr)
447{ 445{
448 int i, size, retval; 446 int i, size, retval;
449 struct priv *priv = dev->priv; 447 struct priv *priv = netdev_priv(dev);
450 448
451 dev->base_addr = ioaddr; 449 dev->base_addr = ioaddr;
452 dev->irq = irq; 450 dev->irq = irq;
@@ -588,7 +586,7 @@ static int init586(struct net_device *dev)
588{ 586{
589 void __iomem *ptr; 587 void __iomem *ptr;
590 int i, result = 0; 588 int i, result = 0;
591 struct priv *p = (struct priv *)dev->priv; 589 struct priv *p = netdev_priv(dev);
592 struct configure_cmd_struct __iomem *cfg_cmd; 590 struct configure_cmd_struct __iomem *cfg_cmd;
593 struct iasetup_cmd_struct __iomem *ias_cmd; 591 struct iasetup_cmd_struct __iomem *ias_cmd;
594 struct tdr_cmd_struct __iomem *tdr_cmd; 592 struct tdr_cmd_struct __iomem *tdr_cmd;
@@ -829,7 +827,7 @@ static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr)
829 struct rfd_struct __iomem *rfd = ptr; 827 struct rfd_struct __iomem *rfd = ptr;
830 struct rbd_struct __iomem *rbd; 828 struct rbd_struct __iomem *rbd;
831 int i; 829 int i;
832 struct priv *p = (struct priv *) dev->priv; 830 struct priv *p = netdev_priv(dev);
833 831
834 memset_io(rfd, 0, 832 memset_io(rfd, 0,
835 sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd)); 833 sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd));
@@ -878,7 +876,7 @@ static irqreturn_t ni52_interrupt(int irq, void *dev_id)
878 int cnt = 0; 876 int cnt = 0;
879 struct priv *p; 877 struct priv *p;
880 878
881 p = (struct priv *) dev->priv; 879 p = netdev_priv(dev);
882 880
883 if (debuglevel > 1) 881 if (debuglevel > 1)
884 printk("I"); 882 printk("I");
@@ -950,7 +948,7 @@ static void ni52_rcv_int(struct net_device *dev)
950 unsigned short totlen; 948 unsigned short totlen;
951 struct sk_buff *skb; 949 struct sk_buff *skb;
952 struct rbd_struct __iomem *rbd; 950 struct rbd_struct __iomem *rbd;
953 struct priv *p = (struct priv *)dev->priv; 951 struct priv *p = netdev_priv(dev);
954 952
955 if (debuglevel > 0) 953 if (debuglevel > 0)
956 printk("R"); 954 printk("R");
@@ -970,7 +968,6 @@ static void ni52_rcv_int(struct net_device *dev)
970 memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen); 968 memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen);
971 skb->protocol = eth_type_trans(skb, dev); 969 skb->protocol = eth_type_trans(skb, dev);
972 netif_rx(skb); 970 netif_rx(skb);
973 dev->last_rx = jiffies;
974 p->stats.rx_packets++; 971 p->stats.rx_packets++;
975 p->stats.rx_bytes += totlen; 972 p->stats.rx_bytes += totlen;
976 } else 973 } else
@@ -1040,7 +1037,7 @@ static void ni52_rcv_int(struct net_device *dev)
1040 1037
1041static void ni52_rnr_int(struct net_device *dev) 1038static void ni52_rnr_int(struct net_device *dev)
1042{ 1039{
1043 struct priv *p = (struct priv *) dev->priv; 1040 struct priv *p = netdev_priv(dev);
1044 1041
1045 p->stats.rx_errors++; 1042 p->stats.rx_errors++;
1046 1043
@@ -1065,7 +1062,7 @@ static void ni52_rnr_int(struct net_device *dev)
1065static void ni52_xmt_int(struct net_device *dev) 1062static void ni52_xmt_int(struct net_device *dev)
1066{ 1063{
1067 int status; 1064 int status;
1068 struct priv *p = (struct priv *) dev->priv; 1065 struct priv *p = netdev_priv(dev);
1069 1066
1070 if (debuglevel > 0) 1067 if (debuglevel > 0)
1071 printk("X"); 1068 printk("X");
@@ -1113,7 +1110,7 @@ static void ni52_xmt_int(struct net_device *dev)
1113 1110
1114static void startrecv586(struct net_device *dev) 1111static void startrecv586(struct net_device *dev)
1115{ 1112{
1116 struct priv *p = (struct priv *) dev->priv; 1113 struct priv *p = netdev_priv(dev);
1117 1114
1118 wait_for_scb_cmd(dev); 1115 wait_for_scb_cmd(dev);
1119 wait_for_scb_cmd_ruc(dev); 1116 wait_for_scb_cmd_ruc(dev);
@@ -1126,7 +1123,7 @@ static void startrecv586(struct net_device *dev)
1126 1123
1127static void ni52_timeout(struct net_device *dev) 1124static void ni52_timeout(struct net_device *dev)
1128{ 1125{
1129 struct priv *p = (struct priv *) dev->priv; 1126 struct priv *p = netdev_priv(dev);
1130#ifndef NO_NOPCOMMANDS 1127#ifndef NO_NOPCOMMANDS
1131 if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */ 1128 if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */
1132 netif_wake_queue(dev); 1129 netif_wake_queue(dev);
@@ -1177,7 +1174,7 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
1177#ifndef NO_NOPCOMMANDS 1174#ifndef NO_NOPCOMMANDS
1178 int next_nop; 1175 int next_nop;
1179#endif 1176#endif
1180 struct priv *p = (struct priv *) dev->priv; 1177 struct priv *p = netdev_priv(dev);
1181 1178
1182 if (skb->len > XMIT_BUFF_SIZE) { 1179 if (skb->len > XMIT_BUFF_SIZE) {
1183 printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len); 1180 printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len);
@@ -1274,7 +1271,7 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
1274 1271
1275static struct net_device_stats *ni52_get_stats(struct net_device *dev) 1272static struct net_device_stats *ni52_get_stats(struct net_device *dev)
1276{ 1273{
1277 struct priv *p = (struct priv *) dev->priv; 1274 struct priv *p = netdev_priv(dev);
1278 unsigned short crc, aln, rsc, ovrn; 1275 unsigned short crc, aln, rsc, ovrn;
1279 1276
1280 /* Get error-statistics from the ni82586 */ 1277 /* Get error-statistics from the ni82586 */
@@ -1337,7 +1334,7 @@ int __init init_module(void)
1337 1334
1338void __exit cleanup_module(void) 1335void __exit cleanup_module(void)
1339{ 1336{
1340 struct priv *p = dev_ni52->priv; 1337 struct priv *p = netdev_priv(dev_ni52);
1341 unregister_netdev(dev_ni52); 1338 unregister_netdev(dev_ni52);
1342 iounmap(p->mapped); 1339 iounmap(p->mapped);
1343 release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE); 1340 release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE);
@@ -1346,7 +1343,3 @@ void __exit cleanup_module(void)
1346#endif /* MODULE */ 1343#endif /* MODULE */
1347 1344
1348MODULE_LICENSE("GPL"); 1345MODULE_LICENSE("GPL");
1349
1350/*
1351 * END: linux/drivers/net/ni52.c
1352 */
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 3edc971d0eca..254057275e0e 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -7,8 +7,6 @@
7 * EtherBlaster. (probably it also works with every full NE2100 7 * EtherBlaster. (probably it also works with every full NE2100
8 * compatible card) 8 * compatible card)
9 * 9 *
10 * To compile as module, type:
11 * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ -DMODULE -c ni65.c
12 * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7 10 * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
13 * 11 *
14 * This is an extension to the Linux operating system, and is covered by the 12 * This is an extension to the Linux operating system, and is covered by the
@@ -295,7 +293,7 @@ static void ni65_set_performance(struct priv *p)
295 */ 293 */
296static int ni65_open(struct net_device *dev) 294static int ni65_open(struct net_device *dev)
297{ 295{
298 struct priv *p = (struct priv *) dev->priv; 296 struct priv *p = dev->ml_priv;
299 int irqval = request_irq(dev->irq, &ni65_interrupt,0, 297 int irqval = request_irq(dev->irq, &ni65_interrupt,0,
300 cards[p->cardno].cardname,dev); 298 cards[p->cardno].cardname,dev);
301 if (irqval) { 299 if (irqval) {
@@ -321,7 +319,7 @@ static int ni65_open(struct net_device *dev)
321 */ 319 */
322static int ni65_close(struct net_device *dev) 320static int ni65_close(struct net_device *dev)
323{ 321{
324 struct priv *p = (struct priv *) dev->priv; 322 struct priv *p = dev->ml_priv;
325 323
326 netif_stop_queue(dev); 324 netif_stop_queue(dev);
327 325
@@ -345,7 +343,7 @@ static int ni65_close(struct net_device *dev)
345 343
346static void cleanup_card(struct net_device *dev) 344static void cleanup_card(struct net_device *dev)
347{ 345{
348 struct priv *p = (struct priv *) dev->priv; 346 struct priv *p = dev->ml_priv;
349 disable_dma(dev->dma); 347 disable_dma(dev->dma);
350 free_dma(dev->dma); 348 free_dma(dev->dma);
351 release_region(dev->base_addr, cards[p->cardno].total_size); 349 release_region(dev->base_addr, cards[p->cardno].total_size);
@@ -444,7 +442,7 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
444 release_region(ioaddr, cards[i].total_size); 442 release_region(ioaddr, cards[i].total_size);
445 return j; 443 return j;
446 } 444 }
447 p = (struct priv *) dev->priv; 445 p = dev->ml_priv;
448 p->cmdr_addr = ioaddr + cards[i].cmd_offset; 446 p->cmdr_addr = ioaddr + cards[i].cmd_offset;
449 p->cardno = i; 447 p->cardno = i;
450 spin_lock_init(&p->ring_lock); 448 spin_lock_init(&p->ring_lock);
@@ -647,8 +645,8 @@ static int ni65_alloc_buffer(struct net_device *dev)
647 if(!ptr) 645 if(!ptr)
648 return -ENOMEM; 646 return -ENOMEM;
649 647
650 p = dev->priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7); 648 p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
651 memset((char *) dev->priv,0,sizeof(struct priv)); 649 memset((char *)p, 0, sizeof(struct priv));
652 p->self = ptr; 650 p->self = ptr;
653 651
654 for(i=0;i<TMDNUM;i++) 652 for(i=0;i<TMDNUM;i++)
@@ -790,7 +788,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
790static int ni65_lance_reinit(struct net_device *dev) 788static int ni65_lance_reinit(struct net_device *dev)
791{ 789{
792 int i; 790 int i;
793 struct priv *p = (struct priv *) dev->priv; 791 struct priv *p = dev->ml_priv;
794 unsigned long flags; 792 unsigned long flags;
795 793
796 p->lock = 0; 794 p->lock = 0;
@@ -876,7 +874,7 @@ static irqreturn_t ni65_interrupt(int irq, void * dev_id)
876 struct priv *p; 874 struct priv *p;
877 int bcnt = 32; 875 int bcnt = 32;
878 876
879 p = (struct priv *) dev->priv; 877 p = dev->ml_priv;
880 878
881 spin_lock(&p->ring_lock); 879 spin_lock(&p->ring_lock);
882 880
@@ -899,7 +897,7 @@ static irqreturn_t ni65_interrupt(int irq, void * dev_id)
899 897
900 if(csr0 & CSR0_ERR) 898 if(csr0 & CSR0_ERR)
901 { 899 {
902 struct priv *p = (struct priv *) dev->priv; 900 struct priv *p = dev->ml_priv;
903 if(debuglevel > 1) 901 if(debuglevel > 1)
904 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0); 902 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
905 if(csr0 & CSR0_BABL) 903 if(csr0 & CSR0_BABL)
@@ -924,7 +922,7 @@ static irqreturn_t ni65_interrupt(int irq, void * dev_id)
924 int j; 922 int j;
925 for(j=0;j<RMDNUM;j++) 923 for(j=0;j<RMDNUM;j++)
926 { 924 {
927 struct priv *p = (struct priv *) dev->priv; 925 struct priv *p = dev->ml_priv;
928 int i,k,num1,num2; 926 int i,k,num1,num2;
929 for(i=RMDNUM-1;i>0;i--) { 927 for(i=RMDNUM-1;i>0;i--) {
930 num2 = (p->rmdnum + i) & (RMDNUM-1); 928 num2 = (p->rmdnum + i) & (RMDNUM-1);
@@ -982,7 +980,7 @@ static irqreturn_t ni65_interrupt(int irq, void * dev_id)
982 */ 980 */
983static void ni65_xmit_intr(struct net_device *dev,int csr0) 981static void ni65_xmit_intr(struct net_device *dev,int csr0)
984{ 982{
985 struct priv *p = (struct priv *) dev->priv; 983 struct priv *p = dev->ml_priv;
986 984
987 while(p->xmit_queued) 985 while(p->xmit_queued)
988 { 986 {
@@ -1049,7 +1047,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1049 struct rmd *rmdp; 1047 struct rmd *rmdp;
1050 int rmdstat,len; 1048 int rmdstat,len;
1051 int cnt=0; 1049 int cnt=0;
1052 struct priv *p = (struct priv *) dev->priv; 1050 struct priv *p = dev->ml_priv;
1053 1051
1054 rmdp = p->rmdhead + p->rmdnum; 1052 rmdp = p->rmdhead + p->rmdnum;
1055 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN)) 1053 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
@@ -1113,7 +1111,6 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1113 p->stats.rx_bytes += len; 1111 p->stats.rx_bytes += len;
1114 skb->protocol=eth_type_trans(skb,dev); 1112 skb->protocol=eth_type_trans(skb,dev);
1115 netif_rx(skb); 1113 netif_rx(skb);
1116 dev->last_rx = jiffies;
1117 } 1114 }
1118 else 1115 else
1119 { 1116 {
@@ -1140,7 +1137,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1140static void ni65_timeout(struct net_device *dev) 1137static void ni65_timeout(struct net_device *dev)
1141{ 1138{
1142 int i; 1139 int i;
1143 struct priv *p = (struct priv *) dev->priv; 1140 struct priv *p = dev->ml_priv;
1144 1141
1145 printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name); 1142 printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
1146 for(i=0;i<TMDNUM;i++) 1143 for(i=0;i<TMDNUM;i++)
@@ -1157,7 +1154,7 @@ static void ni65_timeout(struct net_device *dev)
1157 1154
1158static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev) 1155static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
1159{ 1156{
1160 struct priv *p = (struct priv *) dev->priv; 1157 struct priv *p = dev->ml_priv;
1161 1158
1162 netif_stop_queue(dev); 1159 netif_stop_queue(dev);
1163 1160
@@ -1222,7 +1219,7 @@ static struct net_device_stats *ni65_get_stats(struct net_device *dev)
1222 1219
1223#if 0 1220#if 0
1224 int i; 1221 int i;
1225 struct priv *p = (struct priv *) dev->priv; 1222 struct priv *p = dev->ml_priv;
1226 for(i=0;i<RMDNUM;i++) 1223 for(i=0;i<RMDNUM;i++)
1227 { 1224 {
1228 struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1)); 1225 struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));
@@ -1231,7 +1228,7 @@ static struct net_device_stats *ni65_get_stats(struct net_device *dev)
1231 printk("\n"); 1228 printk("\n");
1232#endif 1229#endif
1233 1230
1234 return &((struct priv *) dev->priv)->stats; 1231 return &((struct priv *)dev->ml_priv)->stats;
1235} 1232}
1236 1233
1237static void set_multicast_list(struct net_device *dev) 1234static void set_multicast_list(struct net_device *dev)
@@ -1266,7 +1263,3 @@ void __exit cleanup_module(void)
1266#endif /* MODULE */ 1263#endif /* MODULE */
1267 1264
1268MODULE_LICENSE("GPL"); 1265MODULE_LICENSE("GPL");
1269
1270/*
1271 * END of ni65.c
1272 */
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 1b6f548c4411..0c0b752315ca 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -448,7 +448,7 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
448 struct niu_link_config *lp = &np->link_config; 448 struct niu_link_config *lp = &np->link_config;
449 u16 pll_cfg, pll_sts; 449 u16 pll_cfg, pll_sts;
450 int max_retry = 100; 450 int max_retry = 100;
451 u64 sig, mask, val; 451 u64 uninitialized_var(sig), mask, val;
452 u32 tx_cfg, rx_cfg; 452 u32 tx_cfg, rx_cfg;
453 unsigned long i; 453 unsigned long i;
454 int err; 454 int err;
@@ -547,7 +547,7 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
547 struct niu_link_config *lp = &np->link_config; 547 struct niu_link_config *lp = &np->link_config;
548 u32 tx_cfg, rx_cfg, pll_cfg, pll_sts; 548 u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
549 int max_retry = 100; 549 int max_retry = 100;
550 u64 sig, mask, val; 550 u64 uninitialized_var(sig), mask, val;
551 unsigned long i; 551 unsigned long i;
552 int err; 552 int err;
553 553
@@ -738,7 +738,7 @@ static int esr_write_glue0(struct niu *np, unsigned long chan, u32 val)
738 738
739static int esr_reset(struct niu *np) 739static int esr_reset(struct niu *np)
740{ 740{
741 u32 reset; 741 u32 uninitialized_var(reset);
742 int err; 742 int err;
743 743
744 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR, 744 err = mdio_write(np, np->port, NIU_ESR_DEV_ADDR,
@@ -3392,8 +3392,6 @@ static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp)
3392 skb->protocol = eth_type_trans(skb, np->dev); 3392 skb->protocol = eth_type_trans(skb, np->dev);
3393 netif_receive_skb(skb); 3393 netif_receive_skb(skb);
3394 3394
3395 np->dev->last_rx = jiffies;
3396
3397 return num_rcr; 3395 return num_rcr;
3398} 3396}
3399 3397
@@ -3529,6 +3527,57 @@ out:
3529 } 3527 }
3530} 3528}
3531 3529
3530static inline void niu_sync_rx_discard_stats(struct niu *np,
3531 struct rx_ring_info *rp,
3532 const int limit)
3533{
3534 /* This elaborate scheme is needed for reading the RX discard
3535 * counters, as they are only 16-bit and can overflow quickly,
3536 * and because the overflow indication bit is not usable as
3537 * the counter value does not wrap, but remains at max value
3538 * 0xFFFF.
3539 *
3540 * In theory and in practice counters can be lost in between
3541 * reading nr64() and clearing the counter nw64(). For this
3542 * reason, the number of counter clearings nw64() is
3543 * limited/reduced though the limit parameter.
3544 */
3545 int rx_channel = rp->rx_channel;
3546 u32 misc, wred;
3547
3548 /* RXMISC (Receive Miscellaneous Discard Count), covers the
3549 * following discard events: IPP (Input Port Process),
3550 * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
3551 * Block Ring) prefetch buffer is empty.
3552 */
3553 misc = nr64(RXMISC(rx_channel));
3554 if (unlikely((misc & RXMISC_COUNT) > limit)) {
3555 nw64(RXMISC(rx_channel), 0);
3556 rp->rx_errors += misc & RXMISC_COUNT;
3557
3558 if (unlikely(misc & RXMISC_OFLOW))
3559 dev_err(np->device, "rx-%d: Counter overflow "
3560 "RXMISC discard\n", rx_channel);
3561
3562 niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n",
3563 np->dev->name, rx_channel, misc, misc-limit);
3564 }
3565
3566 /* WRED (Weighted Random Early Discard) by hardware */
3567 wred = nr64(RED_DIS_CNT(rx_channel));
3568 if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
3569 nw64(RED_DIS_CNT(rx_channel), 0);
3570 rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
3571
3572 if (unlikely(wred & RED_DIS_CNT_OFLOW))
3573 dev_err(np->device, "rx-%d: Counter overflow "
3574 "WRED discard\n", rx_channel);
3575
3576 niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n",
3577 np->dev->name, rx_channel, wred, wred-limit);
3578 }
3579}
3580
3532static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget) 3581static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget)
3533{ 3582{
3534 int qlen, rcr_done = 0, work_done = 0; 3583 int qlen, rcr_done = 0, work_done = 0;
@@ -3569,6 +3618,10 @@ static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget)
3569 3618
3570 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); 3619 nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
3571 3620
3621 /* Only sync discards stats when qlen indicate potential for drops */
3622 if (qlen > 10)
3623 niu_sync_rx_discard_stats(np, rp, 0x7FFF);
3624
3572 return work_done; 3625 return work_done;
3573} 3626}
3574 3627
@@ -3616,7 +3669,7 @@ static int niu_poll(struct napi_struct *napi, int budget)
3616 work_done = niu_poll_core(np, lp, budget); 3669 work_done = niu_poll_core(np, lp, budget);
3617 3670
3618 if (work_done < budget) { 3671 if (work_done < budget) {
3619 netif_rx_complete(np->dev, napi); 3672 netif_rx_complete(napi);
3620 niu_ldg_rearm(np, lp, 1); 3673 niu_ldg_rearm(np, lp, 1);
3621 } 3674 }
3622 return work_done; 3675 return work_done;
@@ -4035,12 +4088,12 @@ static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
4035static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp, 4088static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
4036 u64 v0, u64 v1, u64 v2) 4089 u64 v0, u64 v1, u64 v2)
4037{ 4090{
4038 if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) { 4091 if (likely(netif_rx_schedule_prep(&lp->napi))) {
4039 lp->v0 = v0; 4092 lp->v0 = v0;
4040 lp->v1 = v1; 4093 lp->v1 = v1;
4041 lp->v2 = v2; 4094 lp->v2 = v2;
4042 __niu_fastpath_interrupt(np, lp->ldg_num, v0); 4095 __niu_fastpath_interrupt(np, lp->ldg_num, v0);
4043 __netif_rx_schedule(np->dev, &lp->napi); 4096 __netif_rx_schedule(&lp->napi);
4044 } 4097 }
4045} 4098}
4046 4099
@@ -5849,17 +5902,42 @@ static void niu_stop_hw(struct niu *np)
5849 niu_reset_rx_channels(np); 5902 niu_reset_rx_channels(np);
5850} 5903}
5851 5904
5905static void niu_set_irq_name(struct niu *np)
5906{
5907 int port = np->port;
5908 int i, j = 1;
5909
5910 sprintf(np->irq_name[0], "%s:MAC", np->dev->name);
5911
5912 if (port == 0) {
5913 sprintf(np->irq_name[1], "%s:MIF", np->dev->name);
5914 sprintf(np->irq_name[2], "%s:SYSERR", np->dev->name);
5915 j = 3;
5916 }
5917
5918 for (i = 0; i < np->num_ldg - j; i++) {
5919 if (i < np->num_rx_rings)
5920 sprintf(np->irq_name[i+j], "%s-rx-%d",
5921 np->dev->name, i);
5922 else if (i < np->num_tx_rings + np->num_rx_rings)
5923 sprintf(np->irq_name[i+j], "%s-tx-%d", np->dev->name,
5924 i - np->num_rx_rings);
5925 }
5926}
5927
5852static int niu_request_irq(struct niu *np) 5928static int niu_request_irq(struct niu *np)
5853{ 5929{
5854 int i, j, err; 5930 int i, j, err;
5855 5931
5932 niu_set_irq_name(np);
5933
5856 err = 0; 5934 err = 0;
5857 for (i = 0; i < np->num_ldg; i++) { 5935 for (i = 0; i < np->num_ldg; i++) {
5858 struct niu_ldg *lp = &np->ldg[i]; 5936 struct niu_ldg *lp = &np->ldg[i];
5859 5937
5860 err = request_irq(lp->irq, niu_interrupt, 5938 err = request_irq(lp->irq, niu_interrupt,
5861 IRQF_SHARED | IRQF_SAMPLE_RANDOM, 5939 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
5862 np->dev->name, lp); 5940 np->irq_name[i], lp);
5863 if (err) 5941 if (err)
5864 goto out_free_irqs; 5942 goto out_free_irqs;
5865 5943
@@ -6050,15 +6128,17 @@ static void niu_get_rx_stats(struct niu *np)
6050 for (i = 0; i < np->num_rx_rings; i++) { 6128 for (i = 0; i < np->num_rx_rings; i++) {
6051 struct rx_ring_info *rp = &np->rx_rings[i]; 6129 struct rx_ring_info *rp = &np->rx_rings[i];
6052 6130
6131 niu_sync_rx_discard_stats(np, rp, 0);
6132
6053 pkts += rp->rx_packets; 6133 pkts += rp->rx_packets;
6054 bytes += rp->rx_bytes; 6134 bytes += rp->rx_bytes;
6055 dropped += rp->rx_dropped; 6135 dropped += rp->rx_dropped;
6056 errors += rp->rx_errors; 6136 errors += rp->rx_errors;
6057 } 6137 }
6058 np->net_stats.rx_packets = pkts; 6138 np->dev->stats.rx_packets = pkts;
6059 np->net_stats.rx_bytes = bytes; 6139 np->dev->stats.rx_bytes = bytes;
6060 np->net_stats.rx_dropped = dropped; 6140 np->dev->stats.rx_dropped = dropped;
6061 np->net_stats.rx_errors = errors; 6141 np->dev->stats.rx_errors = errors;
6062} 6142}
6063 6143
6064static void niu_get_tx_stats(struct niu *np) 6144static void niu_get_tx_stats(struct niu *np)
@@ -6074,9 +6154,9 @@ static void niu_get_tx_stats(struct niu *np)
6074 bytes += rp->tx_bytes; 6154 bytes += rp->tx_bytes;
6075 errors += rp->tx_errors; 6155 errors += rp->tx_errors;
6076 } 6156 }
6077 np->net_stats.tx_packets = pkts; 6157 np->dev->stats.tx_packets = pkts;
6078 np->net_stats.tx_bytes = bytes; 6158 np->dev->stats.tx_bytes = bytes;
6079 np->net_stats.tx_errors = errors; 6159 np->dev->stats.tx_errors = errors;
6080} 6160}
6081 6161
6082static struct net_device_stats *niu_get_stats(struct net_device *dev) 6162static struct net_device_stats *niu_get_stats(struct net_device *dev)
@@ -6086,7 +6166,7 @@ static struct net_device_stats *niu_get_stats(struct net_device *dev)
6086 niu_get_rx_stats(np); 6166 niu_get_rx_stats(np);
6087 niu_get_tx_stats(np); 6167 niu_get_tx_stats(np);
6088 6168
6089 return &np->net_stats; 6169 return &dev->stats;
6090} 6170}
6091 6171
6092static void niu_load_hash_xmac(struct niu *np, u16 *hash) 6172static void niu_load_hash_xmac(struct niu *np, u16 *hash)
@@ -6991,6 +7071,8 @@ static void niu_get_ethtool_stats(struct net_device *dev,
6991 for (i = 0; i < np->num_rx_rings; i++) { 7071 for (i = 0; i < np->num_rx_rings; i++) {
6992 struct rx_ring_info *rp = &np->rx_rings[i]; 7072 struct rx_ring_info *rp = &np->rx_rings[i];
6993 7073
7074 niu_sync_rx_discard_stats(np, rp, 0);
7075
6994 data[0] = rp->rx_channel; 7076 data[0] = rp->rx_channel;
6995 data[1] = rp->rx_packets; 7077 data[1] = rp->rx_packets;
6996 data[2] = rp->rx_bytes; 7078 data[2] = rp->rx_bytes;
@@ -8824,7 +8906,7 @@ static u64 niu_pci_map_page(struct device *dev, struct page *page,
8824static void niu_pci_unmap_page(struct device *dev, u64 dma_address, 8906static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
8825 size_t size, enum dma_data_direction direction) 8907 size_t size, enum dma_data_direction direction)
8826{ 8908{
8827 return dma_unmap_page(dev, dma_address, size, direction); 8909 dma_unmap_page(dev, dma_address, size, direction);
8828} 8910}
8829 8911
8830static u64 niu_pci_map_single(struct device *dev, void *cpu_addr, 8912static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,
@@ -8891,28 +8973,31 @@ static struct net_device * __devinit niu_alloc_and_init(
8891 return dev; 8973 return dev;
8892} 8974}
8893 8975
8976static const struct net_device_ops niu_netdev_ops = {
8977 .ndo_open = niu_open,
8978 .ndo_stop = niu_close,
8979 .ndo_start_xmit = niu_start_xmit,
8980 .ndo_get_stats = niu_get_stats,
8981 .ndo_set_multicast_list = niu_set_rx_mode,
8982 .ndo_validate_addr = eth_validate_addr,
8983 .ndo_set_mac_address = niu_set_mac_addr,
8984 .ndo_do_ioctl = niu_ioctl,
8985 .ndo_tx_timeout = niu_tx_timeout,
8986 .ndo_change_mtu = niu_change_mtu,
8987};
8988
8894static void __devinit niu_assign_netdev_ops(struct net_device *dev) 8989static void __devinit niu_assign_netdev_ops(struct net_device *dev)
8895{ 8990{
8896 dev->open = niu_open; 8991 dev->netdev_ops = &niu_netdev_ops;
8897 dev->stop = niu_close;
8898 dev->get_stats = niu_get_stats;
8899 dev->set_multicast_list = niu_set_rx_mode;
8900 dev->set_mac_address = niu_set_mac_addr;
8901 dev->do_ioctl = niu_ioctl;
8902 dev->tx_timeout = niu_tx_timeout;
8903 dev->hard_start_xmit = niu_start_xmit;
8904 dev->ethtool_ops = &niu_ethtool_ops; 8992 dev->ethtool_ops = &niu_ethtool_ops;
8905 dev->watchdog_timeo = NIU_TX_TIMEOUT; 8993 dev->watchdog_timeo = NIU_TX_TIMEOUT;
8906 dev->change_mtu = niu_change_mtu;
8907} 8994}
8908 8995
8909static void __devinit niu_device_announce(struct niu *np) 8996static void __devinit niu_device_announce(struct niu *np)
8910{ 8997{
8911 struct net_device *dev = np->dev; 8998 struct net_device *dev = np->dev;
8912 DECLARE_MAC_BUF(mac);
8913 8999
8914 pr_info("%s: NIU Ethernet %s\n", 9000 pr_info("%s: NIU Ethernet %pM\n", dev->name, dev->dev_addr);
8915 dev->name, print_mac(mac, dev->dev_addr));
8916 9001
8917 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) { 9002 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
8918 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 9003 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 180ca8ae93de..e1a7392e8d70 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -3243,12 +3243,12 @@ struct niu {
3243#define NIU_FLAGS_XMAC 0x00010000 /* 0=BMAC 1=XMAC */ 3243#define NIU_FLAGS_XMAC 0x00010000 /* 0=BMAC 1=XMAC */
3244 3244
3245 u32 msg_enable; 3245 u32 msg_enable;
3246 char irq_name[NIU_NUM_RXCHAN+NIU_NUM_TXCHAN+3][IFNAMSIZ + 6];
3246 3247
3247 /* Protects hw programming, and ring state. */ 3248 /* Protects hw programming, and ring state. */
3248 spinlock_t lock; 3249 spinlock_t lock;
3249 3250
3250 const struct niu_ops *ops; 3251 const struct niu_ops *ops;
3251 struct net_device_stats net_stats;
3252 union niu_mac_stats mac_stats; 3252 union niu_mac_stats mac_stats;
3253 3253
3254 struct rx_ring_info *rx_rings; 3254 struct rx_ring_info *rx_rings;
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index ff449619f047..46b0772489e4 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1948,14 +1948,25 @@ static void ns83820_probe_phy(struct net_device *ndev)
1948} 1948}
1949#endif 1949#endif
1950 1950
1951static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_device_id *id) 1951static const struct net_device_ops netdev_ops = {
1952 .ndo_open = ns83820_open,
1953 .ndo_stop = ns83820_stop,
1954 .ndo_start_xmit = ns83820_hard_start_xmit,
1955 .ndo_get_stats = ns83820_get_stats,
1956 .ndo_change_mtu = ns83820_change_mtu,
1957 .ndo_set_multicast_list = ns83820_set_multicast,
1958 .ndo_validate_addr = eth_validate_addr,
1959 .ndo_tx_timeout = ns83820_tx_timeout,
1960};
1961
1962static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
1963 const struct pci_device_id *id)
1952{ 1964{
1953 struct net_device *ndev; 1965 struct net_device *ndev;
1954 struct ns83820 *dev; 1966 struct ns83820 *dev;
1955 long addr; 1967 long addr;
1956 int err; 1968 int err;
1957 int using_dac = 0; 1969 int using_dac = 0;
1958 DECLARE_MAC_BUF(mac);
1959 1970
1960 /* See if we can set the dma mask early on; failure is fatal. */ 1971 /* See if we can set the dma mask early on; failure is fatal. */
1961 if (sizeof(dma_addr_t) == 8 && 1972 if (sizeof(dma_addr_t) == 8 &&
@@ -2041,14 +2052,8 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
2041 ndev->name, le32_to_cpu(readl(dev->base + 0x22c)), 2052 ndev->name, le32_to_cpu(readl(dev->base + 0x22c)),
2042 pci_dev->subsystem_vendor, pci_dev->subsystem_device); 2053 pci_dev->subsystem_vendor, pci_dev->subsystem_device);
2043 2054
2044 ndev->open = ns83820_open; 2055 ndev->netdev_ops = &netdev_ops;
2045 ndev->stop = ns83820_stop;
2046 ndev->hard_start_xmit = ns83820_hard_start_xmit;
2047 ndev->get_stats = ns83820_get_stats;
2048 ndev->change_mtu = ns83820_change_mtu;
2049 ndev->set_multicast_list = ns83820_set_multicast;
2050 SET_ETHTOOL_OPS(ndev, &ops); 2056 SET_ETHTOOL_OPS(ndev, &ops);
2051 ndev->tx_timeout = ns83820_tx_timeout;
2052 ndev->watchdog_timeo = 5 * HZ; 2057 ndev->watchdog_timeo = 5 * HZ;
2053 pci_set_drvdata(pci_dev, ndev); 2058 pci_set_drvdata(pci_dev, ndev);
2054 2059
@@ -2220,12 +2225,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_
2220 ndev->features |= NETIF_F_HIGHDMA; 2225 ndev->features |= NETIF_F_HIGHDMA;
2221 } 2226 }
2222 2227
2223 printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %s io=0x%08lx irq=%d f=%s\n", 2228 printk(KERN_INFO "%s: ns83820 v" VERSION ": DP83820 v%u.%u: %pM io=0x%08lx irq=%d f=%s\n",
2224 ndev->name, 2229 ndev->name,
2225 (unsigned)readl(dev->base + SRR) >> 8, 2230 (unsigned)readl(dev->base + SRR) >> 8,
2226 (unsigned)readl(dev->base + SRR) & 0xff, 2231 (unsigned)readl(dev->base + SRR) & 0xff,
2227 print_mac(mac, ndev->dev_addr), 2232 ndev->dev_addr, addr, pci_dev->irq,
2228 addr, pci_dev->irq,
2229 (ndev->features & NETIF_F_HIGHDMA) ? "h,sg" : "sg" 2233 (ndev->features & NETIF_F_HIGHDMA) ? "h,sg" : "sg"
2230 ); 2234 );
2231 2235
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index edc0fd588985..dcd199045613 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -971,7 +971,7 @@ static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
971 if (*chan->status & PAS_STATUS_ERROR) 971 if (*chan->status & PAS_STATUS_ERROR)
972 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC; 972 reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
973 973
974 netif_rx_schedule(dev, &mac->napi); 974 netif_rx_schedule(&mac->napi);
975 975
976 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg); 976 write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
977 977
@@ -1011,7 +1011,7 @@ static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
1011 1011
1012 mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2); 1012 mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);
1013 1013
1014 netif_rx_schedule(mac->netdev, &mac->napi); 1014 netif_rx_schedule(&mac->napi);
1015 1015
1016 if (reg) 1016 if (reg)
1017 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg); 1017 write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
@@ -1105,7 +1105,8 @@ static int pasemi_mac_phy_init(struct net_device *dev)
1105 goto err; 1105 goto err;
1106 1106
1107 phy_id = *prop; 1107 phy_id = *prop;
1108 snprintf(mac->phy_id, BUS_ID_SIZE, "%x:%02x", (int)r.start, phy_id); 1108 snprintf(mac->phy_id, sizeof(mac->phy_id), "%x:%02x",
1109 (int)r.start, phy_id);
1109 1110
1110 of_node_put(phy_dn); 1111 of_node_put(phy_dn);
1111 1112
@@ -1640,7 +1641,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget)
1640 pkts = pasemi_mac_clean_rx(rx_ring(mac), budget); 1641 pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
1641 if (pkts < budget) { 1642 if (pkts < budget) {
1642 /* all done, no more packets present */ 1643 /* all done, no more packets present */
1643 netif_rx_complete(dev, napi); 1644 netif_rx_complete(napi);
1644 1645
1645 pasemi_mac_restart_rx_intr(mac); 1646 pasemi_mac_restart_rx_intr(mac);
1646 pasemi_mac_restart_tx_intr(mac); 1647 pasemi_mac_restart_tx_intr(mac);
@@ -1742,7 +1743,6 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1742 struct net_device *dev; 1743 struct net_device *dev;
1743 struct pasemi_mac *mac; 1744 struct pasemi_mac *mac;
1744 int err; 1745 int err;
1745 DECLARE_MAC_BUF(mac_buf);
1746 1746
1747 err = pci_enable_device(pdev); 1747 err = pci_enable_device(pdev);
1748 if (err) 1748 if (err)
@@ -1849,9 +1849,9 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1849 err); 1849 err);
1850 goto out; 1850 goto out;
1851 } else if netif_msg_probe(mac) 1851 } else if netif_msg_probe(mac)
1852 printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %s\n", 1852 printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %pM\n",
1853 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI", 1853 dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
1854 mac->dma_if, print_mac(mac_buf, dev->dev_addr)); 1854 mac->dma_if, dev->dev_addr);
1855 1855
1856 return err; 1856 return err;
1857 1857
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index 5e8df3afea64..064a4fe1dd90 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -109,7 +109,7 @@ static void
109pasemi_mac_ethtool_get_ringparam(struct net_device *netdev, 109pasemi_mac_ethtool_get_ringparam(struct net_device *netdev,
110 struct ethtool_ringparam *ering) 110 struct ethtool_ringparam *ering)
111{ 111{
112 struct pasemi_mac *mac = netdev->priv; 112 struct pasemi_mac *mac = netdev_priv(netdev);
113 113
114 ering->tx_max_pending = TX_RING_SIZE/2; 114 ering->tx_max_pending = TX_RING_SIZE/2;
115 ering->tx_pending = RING_USED(mac->tx)/2; 115 ering->tx_pending = RING_USED(mac->tx)/2;
@@ -130,7 +130,7 @@ static int pasemi_mac_get_sset_count(struct net_device *netdev, int sset)
130static void pasemi_mac_get_ethtool_stats(struct net_device *netdev, 130static void pasemi_mac_get_ethtool_stats(struct net_device *netdev,
131 struct ethtool_stats *stats, u64 *data) 131 struct ethtool_stats *stats, u64 *data)
132{ 132{
133 struct pasemi_mac *mac = netdev->priv; 133 struct pasemi_mac *mac = netdev_priv(netdev);
134 int i; 134 int i;
135 135
136 data[0] = pasemi_read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)) 136 data[0] = pasemi_read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if))
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 0a575fef29e6..c95fd72c3bb9 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -737,7 +737,6 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
737 int i, addr_len, option; 737 int i, addr_len, option;
738 void *ioaddr = NULL; 738 void *ioaddr = NULL;
739 static int board_idx = -1; 739 static int board_idx = -1;
740 DECLARE_MAC_BUF(mac);
741 740
742/* when built into the kernel, we only print version if device is found */ 741/* when built into the kernel, we only print version if device is found */
743#ifndef MODULE 742#ifndef MODULE
@@ -782,7 +781,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
782 dev->irq = pdev->irq; 781 dev->irq = pdev->irq;
783 dev->base_addr = (unsigned long) ioaddr; 782 dev->base_addr = (unsigned long) ioaddr;
784 783
785 /* dev->priv/tp zeroed and aligned in alloc_etherdev */ 784 /* netdev_priv()/tp zeroed and aligned in alloc_etherdev */
786 tp = netdev_priv(dev); 785 tp = netdev_priv(dev);
787 786
788 /* note: tp->chipset set in netdrv_init_board */ 787 /* note: tp->chipset set in netdrv_init_board */
@@ -797,11 +796,11 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
797 796
798 tp->phys[0] = 32; 797 tp->phys[0] = 32;
799 798
800 printk (KERN_INFO "%s: %s at 0x%lx, %sIRQ %d\n", 799 printk (KERN_INFO "%s: %s at 0x%lx, %pM IRQ %d\n",
801 dev->name, 800 dev->name,
802 board_info[ent->driver_data].name, 801 board_info[ent->driver_data].name,
803 dev->base_addr, 802 dev->base_addr,
804 print_mac(mac, dev->dev_addr), 803 dev->dev_addr,
805 dev->irq); 804 dev->irq);
806 805
807 printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n", 806 printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n",
@@ -1566,7 +1565,6 @@ static void netdrv_rx_interrupt (struct net_device *dev,
1566 1565
1567 skb->protocol = eth_type_trans (skb, dev); 1566 skb->protocol = eth_type_trans (skb, dev);
1568 netif_rx (skb); 1567 netif_rx (skb);
1569 dev->last_rx = jiffies;
1570 dev->stats.rx_bytes += pkt_size; 1568 dev->stats.rx_bytes += pkt_size;
1571 dev->stats.rx_packets++; 1569 dev->stats.rx_packets++;
1572 } else { 1570 } else {
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 08c4dd896077..e5cb6b1f0ebd 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -345,7 +345,6 @@ static int tc574_config(struct pcmcia_device *link)
345 __be16 *phys_addr; 345 __be16 *phys_addr;
346 char *cardname; 346 char *cardname;
347 __u32 config; 347 __u32 config;
348 DECLARE_MAC_BUF(mac);
349 348
350 phys_addr = (__be16 *)dev->dev_addr; 349 phys_addr = (__be16 *)dev->dev_addr;
351 350
@@ -463,9 +462,9 @@ static int tc574_config(struct pcmcia_device *link)
463 strcpy(lp->node.dev_name, dev->name); 462 strcpy(lp->node.dev_name, dev->name);
464 463
465 printk(KERN_INFO "%s: %s at io %#3lx, irq %d, " 464 printk(KERN_INFO "%s: %s at io %#3lx, irq %d, "
466 "hw_addr %s.\n", 465 "hw_addr %pM.\n",
467 dev->name, cardname, dev->base_addr, dev->irq, 466 dev->name, cardname, dev->base_addr, dev->irq,
468 print_mac(mac, dev->dev_addr)); 467 dev->dev_addr);
469 printk(" %dK FIFO split %s Rx:Tx, %sMII interface.\n", 468 printk(" %dK FIFO split %s Rx:Tx, %sMII interface.\n",
470 8 << config & Ram_size, 469 8 << config & Ram_size,
471 ram_split[(config & Ram_split) >> Ram_split_shift], 470 ram_split[(config & Ram_split) >> Ram_split_shift],
@@ -1062,7 +1061,6 @@ static int el3_rx(struct net_device *dev, int worklimit)
1062 ((pkt_len+3)>>2)); 1061 ((pkt_len+3)>>2));
1063 skb->protocol = eth_type_trans(skb, dev); 1062 skb->protocol = eth_type_trans(skb, dev);
1064 netif_rx(skb); 1063 netif_rx(skb);
1065 dev->last_rx = jiffies;
1066 dev->stats.rx_packets++; 1064 dev->stats.rx_packets++;
1067 dev->stats.rx_bytes += pkt_len; 1065 dev->stats.rx_bytes += pkt_len;
1068 } else { 1066 } else {
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index c235cdba69c6..73ecc657999d 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -255,7 +255,6 @@ static int tc589_config(struct pcmcia_device *link)
255 int last_fn, last_ret, i, j, multi = 0, fifo; 255 int last_fn, last_ret, i, j, multi = 0, fifo;
256 unsigned int ioaddr; 256 unsigned int ioaddr;
257 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 257 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
258 DECLARE_MAC_BUF(mac);
259 258
260 DEBUG(0, "3c589_config(0x%p)\n", link); 259 DEBUG(0, "3c589_config(0x%p)\n", link);
261 260
@@ -333,9 +332,9 @@ static int tc589_config(struct pcmcia_device *link)
333 strcpy(lp->node.dev_name, dev->name); 332 strcpy(lp->node.dev_name, dev->name);
334 333
335 printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, " 334 printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, "
336 "hw_addr %s\n", 335 "hw_addr %pM\n",
337 dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq, 336 dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq,
338 print_mac(mac, dev->dev_addr)); 337 dev->dev_addr);
339 printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n", 338 printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n",
340 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], 339 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
341 if_names[dev->if_port]); 340 if_names[dev->if_port]);
@@ -884,7 +883,6 @@ static int el3_rx(struct net_device *dev)
884 (pkt_len+3)>>2); 883 (pkt_len+3)>>2);
885 skb->protocol = eth_type_trans(skb, dev); 884 skb->protocol = eth_type_trans(skb, dev);
886 netif_rx(skb); 885 netif_rx(skb);
887 dev->last_rx = jiffies;
888 dev->stats.rx_packets++; 886 dev->stats.rx_packets++;
889 dev->stats.rx_bytes += pkt_len; 887 dev->stats.rx_bytes += pkt_len;
890 } else { 888 } else {
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 0418045166c3..0afa72095810 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -321,7 +321,6 @@ static int axnet_config(struct pcmcia_device *link)
321 struct net_device *dev = link->priv; 321 struct net_device *dev = link->priv;
322 axnet_dev_t *info = PRIV(dev); 322 axnet_dev_t *info = PRIV(dev);
323 int i, j, last_ret, last_fn; 323 int i, j, last_ret, last_fn;
324 DECLARE_MAC_BUF(mac);
325 324
326 DEBUG(0, "axnet_config(0x%p)\n", link); 325 DEBUG(0, "axnet_config(0x%p)\n", link);
327 326
@@ -397,10 +396,10 @@ static int axnet_config(struct pcmcia_device *link)
397 strcpy(info->node.dev_name, dev->name); 396 strcpy(info->node.dev_name, dev->name);
398 397
399 printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, " 398 printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, "
400 "hw_addr %s\n", 399 "hw_addr %pM\n",
401 dev->name, ((info->flags & IS_AX88790) ? 7 : 1), 400 dev->name, ((info->flags & IS_AX88790) ? 7 : 1),
402 dev->base_addr, dev->irq, 401 dev->base_addr, dev->irq,
403 print_mac(mac, dev->dev_addr)); 402 dev->dev_addr);
404 if (info->phy_id != -1) { 403 if (info->phy_id != -1) {
405 DEBUG(0, " MII transceiver at index %d, status %x.\n", info->phy_id, j); 404 DEBUG(0, " MII transceiver at index %d, status %x.\n", info->phy_id, j);
406 } else { 405 } else {
@@ -906,7 +905,7 @@ int ei_debug = 1;
906/* Index to functions. */ 905/* Index to functions. */
907static void ei_tx_intr(struct net_device *dev); 906static void ei_tx_intr(struct net_device *dev);
908static void ei_tx_err(struct net_device *dev); 907static void ei_tx_err(struct net_device *dev);
909static void ei_tx_timeout(struct net_device *dev); 908static void axnet_tx_timeout(struct net_device *dev);
910static void ei_receive(struct net_device *dev); 909static void ei_receive(struct net_device *dev);
911static void ei_rx_overrun(struct net_device *dev); 910static void ei_rx_overrun(struct net_device *dev);
912 911
@@ -957,9 +956,9 @@ static int ax_open(struct net_device *dev)
957 956
958#ifdef HAVE_TX_TIMEOUT 957#ifdef HAVE_TX_TIMEOUT
959 /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout 958 /* The card I/O part of the driver (e.g. 3c503) can hook a Tx timeout
960 wrapper that does e.g. media check & then calls ei_tx_timeout. */ 959 wrapper that does e.g. media check & then calls axnet_tx_timeout. */
961 if (dev->tx_timeout == NULL) 960 if (dev->tx_timeout == NULL)
962 dev->tx_timeout = ei_tx_timeout; 961 dev->tx_timeout = axnet_tx_timeout;
963 if (dev->watchdog_timeo <= 0) 962 if (dev->watchdog_timeo <= 0)
964 dev->watchdog_timeo = TX_TIMEOUT; 963 dev->watchdog_timeo = TX_TIMEOUT;
965#endif 964#endif
@@ -1003,14 +1002,14 @@ static int ax_close(struct net_device *dev)
1003} 1002}
1004 1003
1005/** 1004/**
1006 * ei_tx_timeout - handle transmit time out condition 1005 * axnet_tx_timeout - handle transmit time out condition
1007 * @dev: network device which has apparently fallen asleep 1006 * @dev: network device which has apparently fallen asleep
1008 * 1007 *
1009 * Called by kernel when device never acknowledges a transmit has 1008 * Called by kernel when device never acknowledges a transmit has
1010 * completed (or failed) - i.e. never posted a Tx related interrupt. 1009 * completed (or failed) - i.e. never posted a Tx related interrupt.
1011 */ 1010 */
1012 1011
1013static void ei_tx_timeout(struct net_device *dev) 1012static void axnet_tx_timeout(struct net_device *dev)
1014{ 1013{
1015 long e8390_base = dev->base_addr; 1014 long e8390_base = dev->base_addr;
1016 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1015 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
@@ -1047,14 +1046,14 @@ static void ei_tx_timeout(struct net_device *dev)
1047} 1046}
1048 1047
1049/** 1048/**
1050 * ei_start_xmit - begin packet transmission 1049 * axnet_start_xmit - begin packet transmission
1051 * @skb: packet to be sent 1050 * @skb: packet to be sent
1052 * @dev: network device to which packet is sent 1051 * @dev: network device to which packet is sent
1053 * 1052 *
1054 * Sends a packet to an 8390 network device. 1053 * Sends a packet to an 8390 network device.
1055 */ 1054 */
1056 1055
1057static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev) 1056static int axnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1058{ 1057{
1059 long e8390_base = dev->base_addr; 1058 long e8390_base = dev->base_addr;
1060 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1059 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
@@ -1493,7 +1492,6 @@ static void ei_receive(struct net_device *dev)
1493 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); 1492 ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame));
1494 skb->protocol=eth_type_trans(skb,dev); 1493 skb->protocol=eth_type_trans(skb,dev);
1495 netif_rx(skb); 1494 netif_rx(skb);
1496 dev->last_rx = jiffies;
1497 dev->stats.rx_packets++; 1495 dev->stats.rx_packets++;
1498 dev->stats.rx_bytes += pkt_len; 1496 dev->stats.rx_bytes += pkt_len;
1499 if (pkt_stat & ENRSR_PHY) 1497 if (pkt_stat & ENRSR_PHY)
@@ -1720,7 +1718,7 @@ static void axdev_setup(struct net_device *dev)
1720 ei_local = (struct ei_device *)netdev_priv(dev); 1718 ei_local = (struct ei_device *)netdev_priv(dev);
1721 spin_lock_init(&ei_local->page_lock); 1719 spin_lock_init(&ei_local->page_lock);
1722 1720
1723 dev->hard_start_xmit = &ei_start_xmit; 1721 dev->hard_start_xmit = &axnet_start_xmit;
1724 dev->get_stats = get_stats; 1722 dev->get_stats = get_stats;
1725 dev->set_multicast_list = &set_multicast_list; 1723 dev->set_multicast_list = &set_multicast_list;
1726 1724
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 831090c75622..7b5c77b7bd27 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -155,7 +155,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
155 if (!dev) 155 if (!dev)
156 goto fail_alloc_dev; 156 goto fail_alloc_dev;
157 157
158 lp = dev->priv; 158 lp = netdev_priv(dev);
159 lp->timeout = timeout; 159 lp->timeout = timeout;
160 lp->backplane = backplane; 160 lp->backplane = backplane;
161 lp->clockp = clockp; 161 lp->clockp = clockp;
@@ -303,7 +303,7 @@ static int com20020_config(struct pcmcia_device *link)
303 goto failed; 303 goto failed;
304 } 304 }
305 305
306 lp = dev->priv; 306 lp = netdev_priv(dev);
307 lp->card_name = "PCMCIA COM20020"; 307 lp->card_name = "PCMCIA COM20020";
308 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */ 308 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
309 309
@@ -364,7 +364,7 @@ static int com20020_resume(struct pcmcia_device *link)
364 364
365 if (link->open) { 365 if (link->open) {
366 int ioaddr = dev->base_addr; 366 int ioaddr = dev->base_addr;
367 struct arcnet_local *lp = dev->priv; 367 struct arcnet_local *lp = netdev_priv(dev);
368 ARCRESET; 368 ARCRESET;
369 } 369 }
370 370
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 69d916daa7bb..69dcfbbabe82 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -125,6 +125,7 @@ typedef struct local_info_t {
125 u_short tx_queue_len; 125 u_short tx_queue_len;
126 cardtype_t cardtype; 126 cardtype_t cardtype;
127 u_short sent; 127 u_short sent;
128 u_char __iomem *base;
128} local_info_t; 129} local_info_t;
129 130
130#define MC_FILTERBREAK 64 131#define MC_FILTERBREAK 64
@@ -242,6 +243,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
242 lp = netdev_priv(dev); 243 lp = netdev_priv(dev);
243 link->priv = dev; 244 link->priv = dev;
244 lp->p_dev = link; 245 lp->p_dev = link;
246 lp->base = NULL;
245 247
246 /* The io structure describes IO port mapping */ 248 /* The io structure describes IO port mapping */
247 link->io.NumPorts1 = 32; 249 link->io.NumPorts1 = 32;
@@ -348,7 +350,6 @@ static int fmvj18x_config(struct pcmcia_device *link)
348 cardtype_t cardtype; 350 cardtype_t cardtype;
349 char *card_name = "unknown"; 351 char *card_name = "unknown";
350 u_char *node_id; 352 u_char *node_id;
351 DECLARE_MAC_BUF(mac);
352 353
353 DEBUG(0, "fmvj18x_config(0x%p)\n", link); 354 DEBUG(0, "fmvj18x_config(0x%p)\n", link);
354 355
@@ -443,8 +444,10 @@ static int fmvj18x_config(struct pcmcia_device *link)
443 dev->irq = link->irq.AssignedIRQ; 444 dev->irq = link->irq.AssignedIRQ;
444 dev->base_addr = link->io.BasePort1; 445 dev->base_addr = link->io.BasePort1;
445 446
446 if (link->io.BasePort2 != 0) 447 if (link->io.BasePort2 != 0) {
447 fmvj18x_setup_mfc(link); 448 ret = fmvj18x_setup_mfc(link);
449 if (ret != 0) goto failed;
450 }
448 451
449 ioaddr = dev->base_addr; 452 ioaddr = dev->base_addr;
450 453
@@ -539,9 +542,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
539 542
540 /* print current configuration */ 543 /* print current configuration */
541 printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, " 544 printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, "
542 "hw_addr %s\n", 545 "hw_addr %pM\n",
543 dev->name, card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2", 546 dev->name, card_name, sram_config == 0 ? "4K TX*2" : "8K TX*2",
544 dev->base_addr, dev->irq, print_mac(mac, dev->dev_addr)); 547 dev->base_addr, dev->irq, dev->dev_addr);
545 548
546 return 0; 549 return 0;
547 550
@@ -611,10 +614,10 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
611{ 614{
612 win_req_t req; 615 win_req_t req;
613 memreq_t mem; 616 memreq_t mem;
614 u_char __iomem *base; 617 int i;
615 int i, j;
616 struct net_device *dev = link->priv; 618 struct net_device *dev = link->priv;
617 unsigned int ioaddr; 619 unsigned int ioaddr;
620 local_info_t *lp = netdev_priv(dev);
618 621
619 /* Allocate a small memory window */ 622 /* Allocate a small memory window */
620 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; 623 req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
@@ -626,25 +629,32 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
626 return -1; 629 return -1;
627 } 630 }
628 631
629 base = ioremap(req.Base, req.Size); 632 lp->base = ioremap(req.Base, req.Size);
633 if (lp->base == NULL) {
634 printk(KERN_NOTICE "fmvj18x_cs: ioremap failed\n");
635 return -1;
636 }
637
630 mem.Page = 0; 638 mem.Page = 0;
631 mem.CardOffset = 0; 639 mem.CardOffset = 0;
632 pcmcia_map_mem_page(link->win, &mem); 640 i = pcmcia_map_mem_page(link->win, &mem);
633 641 if (i != 0) {
642 iounmap(lp->base);
643 lp->base = NULL;
644 cs_error(link, MapMemPage, i);
645 return -1;
646 }
647
634 ioaddr = dev->base_addr; 648 ioaddr = dev->base_addr;
635 writeb(0x47, base+0x800); /* Config Option Register of LAN */ 649 writeb(0x47, lp->base+0x800); /* Config Option Register of LAN */
636 writeb(0x0, base+0x802); /* Config and Status Register */ 650 writeb(0x0, lp->base+0x802); /* Config and Status Register */
637 651
638 writeb(ioaddr & 0xff, base+0x80a); /* I/O Base(Low) of LAN */ 652 writeb(ioaddr & 0xff, lp->base+0x80a); /* I/O Base(Low) of LAN */
639 writeb((ioaddr >> 8) & 0xff, base+0x80c); /* I/O Base(High) of LAN */ 653 writeb((ioaddr >> 8) & 0xff, lp->base+0x80c); /* I/O Base(High) of LAN */
640 654
641 writeb(0x45, base+0x820); /* Config Option Register of Modem */ 655 writeb(0x45, lp->base+0x820); /* Config Option Register of Modem */
642 writeb(0x8, base+0x822); /* Config and Status Register */ 656 writeb(0x8, lp->base+0x822); /* Config and Status Register */
643 657
644 iounmap(base);
645 j = pcmcia_release_window(link->win);
646 if (j != 0)
647 cs_error(link, ReleaseWindow, j);
648 return 0; 658 return 0;
649 659
650} 660}
@@ -652,8 +662,25 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
652 662
653static void fmvj18x_release(struct pcmcia_device *link) 663static void fmvj18x_release(struct pcmcia_device *link)
654{ 664{
655 DEBUG(0, "fmvj18x_release(0x%p)\n", link); 665
656 pcmcia_disable_device(link); 666 struct net_device *dev = link->priv;
667 local_info_t *lp = netdev_priv(dev);
668 u_char __iomem *tmp;
669 int j;
670
671 DEBUG(0, "fmvj18x_release(0x%p)\n", link);
672
673 if (lp->base != NULL) {
674 tmp = lp->base;
675 lp->base = NULL; /* set NULL before iounmap */
676 iounmap(tmp);
677 j = pcmcia_release_window(link->win);
678 if (j != 0)
679 cs_error(link, ReleaseWindow, j);
680 }
681
682 pcmcia_disable_device(link);
683
657} 684}
658 685
659static int fmvj18x_suspend(struct pcmcia_device *link) 686static int fmvj18x_suspend(struct pcmcia_device *link)
@@ -784,6 +811,13 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
784 811
785 outb(D_TX_INTR, ioaddr + TX_INTR); 812 outb(D_TX_INTR, ioaddr + TX_INTR);
786 outb(D_RX_INTR, ioaddr + RX_INTR); 813 outb(D_RX_INTR, ioaddr + RX_INTR);
814
815 if (lp->base != NULL) {
816 /* Ack interrupt for multifunction card */
817 writeb(0x01, lp->base+0x802);
818 writeb(0x09, lp->base+0x822);
819 }
820
787 return IRQ_HANDLED; 821 return IRQ_HANDLED;
788 822
789} /* fjn_interrupt */ 823} /* fjn_interrupt */
@@ -1036,7 +1070,6 @@ static void fjn_rx(struct net_device *dev)
1036#endif 1070#endif
1037 1071
1038 netif_rx(skb); 1072 netif_rx(skb);
1039 dev->last_rx = jiffies;
1040 lp->stats.rx_packets++; 1073 lp->stats.rx_packets++;
1041 lp->stats.rx_bytes += pkt_len; 1074 lp->stats.rx_bytes += pkt_len;
1042 } 1075 }
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 448cd40aeba5..ec7c588c9ae5 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -659,7 +659,6 @@ static int nmclan_config(struct pcmcia_device *link)
659 u_char buf[64]; 659 u_char buf[64];
660 int i, last_ret, last_fn; 660 int i, last_ret, last_fn;
661 unsigned int ioaddr; 661 unsigned int ioaddr;
662 DECLARE_MAC_BUF(mac);
663 662
664 DEBUG(0, "nmclan_config(0x%p)\n", link); 663 DEBUG(0, "nmclan_config(0x%p)\n", link);
665 664
@@ -719,9 +718,9 @@ static int nmclan_config(struct pcmcia_device *link)
719 strcpy(lp->node.dev_name, dev->name); 718 strcpy(lp->node.dev_name, dev->name);
720 719
721 printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port," 720 printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port,"
722 " hw_addr %s\n", 721 " hw_addr %pM\n",
723 dev->name, dev->base_addr, dev->irq, if_names[dev->if_port], 722 dev->name, dev->base_addr, dev->irq, if_names[dev->if_port],
724 print_mac(mac, dev->dev_addr)); 723 dev->dev_addr);
725 return 0; 724 return 0;
726 725
727cs_failed: 726cs_failed:
@@ -1193,7 +1192,6 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
1193 1192
1194 netif_rx(skb); /* Send the packet to the upper (protocol) layers. */ 1193 netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
1195 1194
1196 dev->last_rx = jiffies;
1197 lp->linux_stats.rx_packets++; 1195 lp->linux_stats.rx_packets++;
1198 lp->linux_stats.rx_bytes += pkt_len; 1196 lp->linux_stats.rx_bytes += pkt_len;
1199 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */ 1197 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index ce486f094492..c38ed777f0a8 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -554,7 +554,6 @@ static int pcnet_config(struct pcmcia_device *link)
554 int last_ret, last_fn, start_pg, stop_pg, cm_offset; 554 int last_ret, last_fn, start_pg, stop_pg, cm_offset;
555 int has_shmem = 0; 555 int has_shmem = 0;
556 hw_info_t *local_hw_info; 556 hw_info_t *local_hw_info;
557 DECLARE_MAC_BUF(mac);
558 557
559 DEBUG(0, "pcnet_config(0x%p)\n", link); 558 DEBUG(0, "pcnet_config(0x%p)\n", link);
560 559
@@ -675,7 +674,7 @@ static int pcnet_config(struct pcmcia_device *link)
675 printk (" mem %#5lx,", dev->mem_start); 674 printk (" mem %#5lx,", dev->mem_start);
676 if (info->flags & HAS_MISC_REG) 675 if (info->flags & HAS_MISC_REG)
677 printk(" %s xcvr,", if_names[dev->if_port]); 676 printk(" %s xcvr,", if_names[dev->if_port]);
678 printk(" hw_addr %s\n", print_mac(mac, dev->dev_addr)); 677 printk(" hw_addr %pM\n", dev->dev_addr);
679 return 0; 678 return 0;
680 679
681cs_failed: 680cs_failed:
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index c74d6656d266..fccd53ef3c64 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -949,7 +949,6 @@ static int smc91c92_config(struct pcmcia_device *link)
949 int i, j, rev; 949 int i, j, rev;
950 unsigned int ioaddr; 950 unsigned int ioaddr;
951 u_long mir; 951 u_long mir;
952 DECLARE_MAC_BUF(mac);
953 952
954 DEBUG(0, "smc91c92_config(0x%p)\n", link); 953 DEBUG(0, "smc91c92_config(0x%p)\n", link);
955 954
@@ -1062,9 +1061,9 @@ static int smc91c92_config(struct pcmcia_device *link)
1062 strcpy(smc->node.dev_name, dev->name); 1061 strcpy(smc->node.dev_name, dev->name);
1063 1062
1064 printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, " 1063 printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, "
1065 "hw_addr %s\n", 1064 "hw_addr %pM\n",
1066 dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq, 1065 dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq,
1067 print_mac(mac, dev->dev_addr)); 1066 dev->dev_addr);
1068 1067
1069 if (rev > 0) { 1068 if (rev > 0) {
1070 if (mir & 0x3ff) 1069 if (mir & 0x3ff)
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index e1fd585e7131..fef7e1861d6a 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -772,7 +772,6 @@ xirc2ps_config(struct pcmcia_device * link)
772 int err, i; 772 int err, i;
773 u_char buf[64]; 773 u_char buf[64];
774 cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data; 774 cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data;
775 DECLARE_MAC_BUF(mac);
776 775
777 local->dingo_ccr = NULL; 776 local->dingo_ccr = NULL;
778 777
@@ -1051,9 +1050,9 @@ xirc2ps_config(struct pcmcia_device * link)
1051 strcpy(local->node.dev_name, dev->name); 1050 strcpy(local->node.dev_name, dev->name);
1052 1051
1053 /* give some infos about the hardware */ 1052 /* give some infos about the hardware */
1054 printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %s\n", 1053 printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n",
1055 dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq, 1054 dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq,
1056 print_mac(mac, dev->dev_addr)); 1055 dev->dev_addr);
1057 1056
1058 return 0; 1057 return 0;
1059 1058
@@ -1243,7 +1242,6 @@ xirc2ps_interrupt(int irq, void *dev_id)
1243 } 1242 }
1244 skb->protocol = eth_type_trans(skb, dev); 1243 skb->protocol = eth_type_trans(skb, dev);
1245 netif_rx(skb); 1244 netif_rx(skb);
1246 dev->last_rx = jiffies;
1247 lp->stats.rx_packets++; 1245 lp->stats.rx_packets++;
1248 lp->stats.rx_bytes += pktlen; 1246 lp->stats.rx_bytes += pktlen;
1249 if (!(rsr & PhyPkt)) 1247 if (!(rsr & PhyPkt))
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index ca8c0e037400..044b7b07f5f4 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1246,7 +1246,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
1246 dev->stats.rx_bytes += skb->len; 1246 dev->stats.rx_bytes += skb->len;
1247 skb->protocol = eth_type_trans(skb, dev); 1247 skb->protocol = eth_type_trans(skb, dev);
1248 netif_receive_skb(skb); 1248 netif_receive_skb(skb);
1249 dev->last_rx = jiffies;
1250 dev->stats.rx_packets++; 1249 dev->stats.rx_packets++;
1251 return; 1250 return;
1252} 1251}
@@ -1398,7 +1397,7 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
1398 if (work_done < budget) { 1397 if (work_done < budget) {
1399 spin_lock_irqsave(&lp->lock, flags); 1398 spin_lock_irqsave(&lp->lock, flags);
1400 1399
1401 __netif_rx_complete(dev, napi); 1400 __netif_rx_complete(napi);
1402 1401
1403 /* clear interrupt masks */ 1402 /* clear interrupt masks */
1404 val = lp->a.read_csr(ioaddr, CSR3); 1403 val = lp->a.read_csr(ioaddr, CSR3);
@@ -1747,8 +1746,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1747 memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); 1746 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1748 1747
1749 if (pcnet32_debug & NETIF_MSG_PROBE) { 1748 if (pcnet32_debug & NETIF_MSG_PROBE) {
1750 DECLARE_MAC_BUF(mac); 1749 printk(" %pM", dev->dev_addr);
1751 printk(" %s", print_mac(mac, dev->dev_addr));
1752 1750
1753 /* Version 0x2623 and 0x2624 */ 1751 /* Version 0x2623 and 0x2624 */
1754 if (((chip_version + 1) & 0xfffe) == 0x2624) { 1752 if (((chip_version + 1) & 0xfffe) == 0x2624) {
@@ -2588,14 +2586,14 @@ pcnet32_interrupt(int irq, void *dev_id)
2588 dev->name, csr0); 2586 dev->name, csr0);
2589 /* unlike for the lance, there is no restart needed */ 2587 /* unlike for the lance, there is no restart needed */
2590 } 2588 }
2591 if (netif_rx_schedule_prep(dev, &lp->napi)) { 2589 if (netif_rx_schedule_prep(&lp->napi)) {
2592 u16 val; 2590 u16 val;
2593 /* set interrupt masks */ 2591 /* set interrupt masks */
2594 val = lp->a.read_csr(ioaddr, CSR3); 2592 val = lp->a.read_csr(ioaddr, CSR3);
2595 val |= 0x5f00; 2593 val |= 0x5f00;
2596 lp->a.write_csr(ioaddr, CSR3, val); 2594 lp->a.write_csr(ioaddr, CSR3, val);
2597 mmiowb(); 2595 mmiowb();
2598 __netif_rx_schedule(dev, &lp->napi); 2596 __netif_rx_schedule(&lp->napi);
2599 break; 2597 break;
2600 } 2598 }
2601 csr0 = lp->a.read_csr(ioaddr, CSR0); 2599 csr0 = lp->a.read_csr(ioaddr, CSR0);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d55932acd887..de9cf5136fdc 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -66,6 +66,22 @@ config REALTEK_PHY
66 ---help--- 66 ---help---
67 Supports the Realtek 821x PHY. 67 Supports the Realtek 821x PHY.
68 68
69config NATIONAL_PHY
70 tristate "Drivers for National Semiconductor PHYs"
71 ---help---
72 Currently supports the DP83865 PHY.
73
74config STE10XP
75 depends on PHYLIB
76 tristate "Driver for STMicroelectronics STe10Xp PHYs"
77 ---help---
78 This is the driver for the STe100p and STe101p PHYs.
79
80config LSI_ET1011C_PHY
81 tristate "Driver for LSI ET1011C PHY"
82 ---help---
83 Supports the LSI ET1011C PHY.
84
69config FIXED_PHY 85config FIXED_PHY
70 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" 86 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
71 depends on PHYLIB=y 87 depends on PHYLIB=y
@@ -84,10 +100,13 @@ config MDIO_BITBANG
84 100
85 If in doubt, say N. 101 If in doubt, say N.
86 102
87config MDIO_OF_GPIO 103config MDIO_GPIO
88 tristate "Support for GPIO lib-based bitbanged MDIO buses" 104 tristate "Support for GPIO lib-based bitbanged MDIO buses"
89 depends on MDIO_BITBANG && OF_GPIO 105 depends on MDIO_BITBANG && GENERIC_GPIO
90 ---help--- 106 ---help---
91 Supports GPIO lib-based MDIO busses. 107 Supports GPIO lib-based MDIO busses.
92 108
109 To compile this driver as a module, choose M here: the module
110 will be called mdio-gpio.
111
93endif # PHYLIB 112endif # PHYLIB
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index eee329fa6f53..3a1bfefefbc3 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -13,6 +13,9 @@ obj-$(CONFIG_VITESSE_PHY) += vitesse.o
13obj-$(CONFIG_BROADCOM_PHY) += broadcom.o 13obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
14obj-$(CONFIG_ICPLUS_PHY) += icplus.o 14obj-$(CONFIG_ICPLUS_PHY) += icplus.o
15obj-$(CONFIG_REALTEK_PHY) += realtek.o 15obj-$(CONFIG_REALTEK_PHY) += realtek.o
16obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
16obj-$(CONFIG_FIXED_PHY) += fixed.o 17obj-$(CONFIG_FIXED_PHY) += fixed.o
17obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o 18obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
18obj-$(CONFIG_MDIO_OF_GPIO) += mdio-ofgpio.o 19obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
20obj-$(CONFIG_NATIONAL_PHY) += national.o
21obj-$(CONFIG_STE10XP) += ste10Xp.o
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 4b4dc98ad165..190efc3301c6 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -17,6 +17,8 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/phy.h> 18#include <linux/phy.h>
19 19
20#define PHY_ID_BCM50610 0x0143bd60
21
20#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ 22#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
21#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ 23#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
22#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */ 24#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
@@ -54,6 +56,21 @@
54#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0) 56#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
55 57
56/* 58/*
59 * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
60 */
61#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
62#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
63#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
64
65#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
66#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
67#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
68#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
69
70#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
71
72
73/*
57 * Broadcom LED source encodings. These are used in BCM5461, BCM5481, 74 * Broadcom LED source encodings. These are used in BCM5461, BCM5481,
58 * BCM5482, and possibly some others. 75 * BCM5482, and possibly some others.
59 */ 76 */
@@ -88,6 +105,24 @@
88#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ 105#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
89 106
90/* 107/*
108 * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
109 */
110#define MII_BCM54XX_EXP_AADJ1CH0 0x001f
111#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200
112#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100
113#define MII_BCM54XX_EXP_AADJ1CH3 0x601f
114#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002
115#define MII_BCM54XX_EXP_EXP08 0x0F08
116#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
117#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
118#define MII_BCM54XX_EXP_EXP75 0x0f75
119#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
120#define MII_BCM54XX_EXP_EXP96 0x0f96
121#define MII_BCM54XX_EXP_EXP96_MYST 0x0010
122#define MII_BCM54XX_EXP_EXP97 0x0f97
123#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
124
125/*
91 * BCM5482: Secondary SerDes registers 126 * BCM5482: Secondary SerDes registers
92 */ 127 */
93#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */ 128#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */
@@ -128,40 +163,93 @@ static int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow, u16 val)
128 MII_BCM54XX_SHD_DATA(val)); 163 MII_BCM54XX_SHD_DATA(val));
129} 164}
130 165
131/* 166/* Indirect register access functions for the Expansion Registers */
132 * Indirect register access functions for the Expansion Registers 167static int bcm54xx_exp_read(struct phy_device *phydev, u8 regnum)
133 * and Secondary SerDes registers (when sec_serdes=1).
134 */
135static int bcm54xx_exp_read(struct phy_device *phydev,
136 int sec_serdes, u8 regnum)
137{ 168{
138 int val; 169 int val;
139 170
140 phy_write(phydev, MII_BCM54XX_EXP_SEL, 171 val = phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum);
141 (sec_serdes ? MII_BCM54XX_EXP_SEL_SSD : 172 if (val < 0)
142 MII_BCM54XX_EXP_SEL_ER) | 173 return val;
143 regnum); 174
144 val = phy_read(phydev, MII_BCM54XX_EXP_DATA); 175 val = phy_read(phydev, MII_BCM54XX_EXP_DATA);
145 phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum); 176
177 /* Restore default value. It's O.K. if this write fails. */
178 phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
146 179
147 return val; 180 return val;
148} 181}
149 182
150static int bcm54xx_exp_write(struct phy_device *phydev, 183static int bcm54xx_exp_write(struct phy_device *phydev, u16 regnum, u16 val)
151 int sec_serdes, u8 regnum, u16 val)
152{ 184{
153 int ret; 185 int ret;
154 186
155 phy_write(phydev, MII_BCM54XX_EXP_SEL, 187 ret = phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum);
156 (sec_serdes ? MII_BCM54XX_EXP_SEL_SSD : 188 if (ret < 0)
157 MII_BCM54XX_EXP_SEL_ER) | 189 return ret;
158 regnum); 190
159 ret = phy_write(phydev, MII_BCM54XX_EXP_DATA, val); 191 ret = phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
160 phy_write(phydev, MII_BCM54XX_EXP_SEL, regnum); 192
193 /* Restore default value. It's O.K. if this write fails. */
194 phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
161 195
162 return ret; 196 return ret;
163} 197}
164 198
199static int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val)
200{
201 return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val);
202}
203
204static int bcm50610_a0_workaround(struct phy_device *phydev)
205{
206 int err;
207
208 err = bcm54xx_auxctl_write(phydev,
209 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
210 MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
211 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
212 if (err < 0)
213 return err;
214
215 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
216 MII_BCM54XX_EXP_EXP08_RJCT_2MHZ |
217 MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE);
218 if (err < 0)
219 goto error;
220
221 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0,
222 MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN |
223 MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF);
224 if (err < 0)
225 goto error;
226
227 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3,
228 MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ);
229 if (err < 0)
230 goto error;
231
232 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75,
233 MII_BCM54XX_EXP_EXP75_VDACCTRL);
234 if (err < 0)
235 goto error;
236
237 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96,
238 MII_BCM54XX_EXP_EXP96_MYST);
239 if (err < 0)
240 goto error;
241
242 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97,
243 MII_BCM54XX_EXP_EXP97_MYST);
244
245error:
246 bcm54xx_auxctl_write(phydev,
247 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
248 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
249
250 return err;
251}
252
165static int bcm54xx_config_init(struct phy_device *phydev) 253static int bcm54xx_config_init(struct phy_device *phydev)
166{ 254{
167 int reg, err; 255 int reg, err;
@@ -183,6 +271,13 @@ static int bcm54xx_config_init(struct phy_device *phydev)
183 err = phy_write(phydev, MII_BCM54XX_IMR, reg); 271 err = phy_write(phydev, MII_BCM54XX_IMR, reg);
184 if (err < 0) 272 if (err < 0)
185 return err; 273 return err;
274
275 if (phydev->drv->phy_id == PHY_ID_BCM50610) {
276 err = bcm50610_a0_workaround(phydev);
277 if (err < 0)
278 return err;
279 }
280
186 return 0; 281 return 0;
187} 282}
188 283
@@ -205,18 +300,27 @@ static int bcm5482_config_init(struct phy_device *phydev)
205 /* 300 /*
206 * Enable SGMII slave mode and auto-detection 301 * Enable SGMII slave mode and auto-detection
207 */ 302 */
208 reg = bcm54xx_exp_read(phydev, 1, BCM5482_SSD_SGMII_SLAVE); 303 reg = BCM5482_SSD_SGMII_SLAVE | MII_BCM54XX_EXP_SEL_SSD;
209 bcm54xx_exp_write(phydev, 1, BCM5482_SSD_SGMII_SLAVE, 304 err = bcm54xx_exp_read(phydev, reg);
210 reg | 305 if (err < 0)
211 BCM5482_SSD_SGMII_SLAVE_EN | 306 return err;
212 BCM5482_SSD_SGMII_SLAVE_AD); 307 err = bcm54xx_exp_write(phydev, reg, err |
308 BCM5482_SSD_SGMII_SLAVE_EN |
309 BCM5482_SSD_SGMII_SLAVE_AD);
310 if (err < 0)
311 return err;
213 312
214 /* 313 /*
215 * Disable secondary SerDes powerdown 314 * Disable secondary SerDes powerdown
216 */ 315 */
217 reg = bcm54xx_exp_read(phydev, 1, BCM5482_SSD_1000BX_CTL); 316 reg = BCM5482_SSD_1000BX_CTL | MII_BCM54XX_EXP_SEL_SSD;
218 bcm54xx_exp_write(phydev, 1, BCM5482_SSD_1000BX_CTL, 317 err = bcm54xx_exp_read(phydev, reg);
219 reg & ~BCM5482_SSD_1000BX_CTL_PWRDOWN); 318 if (err < 0)
319 return err;
320 err = bcm54xx_exp_write(phydev, reg,
321 err & ~BCM5482_SSD_1000BX_CTL_PWRDOWN);
322 if (err < 0)
323 return err;
220 324
221 /* 325 /*
222 * Select 1000BASE-X register set (primary SerDes) 326 * Select 1000BASE-X register set (primary SerDes)
@@ -335,7 +439,8 @@ static struct phy_driver bcm5411_driver = {
335 .phy_id = 0x00206070, 439 .phy_id = 0x00206070,
336 .phy_id_mask = 0xfffffff0, 440 .phy_id_mask = 0xfffffff0,
337 .name = "Broadcom BCM5411", 441 .name = "Broadcom BCM5411",
338 .features = PHY_GBIT_FEATURES, 442 .features = PHY_GBIT_FEATURES |
443 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
339 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 444 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
340 .config_init = bcm54xx_config_init, 445 .config_init = bcm54xx_config_init,
341 .config_aneg = genphy_config_aneg, 446 .config_aneg = genphy_config_aneg,
@@ -349,7 +454,8 @@ static struct phy_driver bcm5421_driver = {
349 .phy_id = 0x002060e0, 454 .phy_id = 0x002060e0,
350 .phy_id_mask = 0xfffffff0, 455 .phy_id_mask = 0xfffffff0,
351 .name = "Broadcom BCM5421", 456 .name = "Broadcom BCM5421",
352 .features = PHY_GBIT_FEATURES, 457 .features = PHY_GBIT_FEATURES |
458 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
353 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 459 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
354 .config_init = bcm54xx_config_init, 460 .config_init = bcm54xx_config_init,
355 .config_aneg = genphy_config_aneg, 461 .config_aneg = genphy_config_aneg,
@@ -363,7 +469,8 @@ static struct phy_driver bcm5461_driver = {
363 .phy_id = 0x002060c0, 469 .phy_id = 0x002060c0,
364 .phy_id_mask = 0xfffffff0, 470 .phy_id_mask = 0xfffffff0,
365 .name = "Broadcom BCM5461", 471 .name = "Broadcom BCM5461",
366 .features = PHY_GBIT_FEATURES, 472 .features = PHY_GBIT_FEATURES |
473 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
367 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 474 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
368 .config_init = bcm54xx_config_init, 475 .config_init = bcm54xx_config_init,
369 .config_aneg = genphy_config_aneg, 476 .config_aneg = genphy_config_aneg,
@@ -377,7 +484,8 @@ static struct phy_driver bcm5464_driver = {
377 .phy_id = 0x002060b0, 484 .phy_id = 0x002060b0,
378 .phy_id_mask = 0xfffffff0, 485 .phy_id_mask = 0xfffffff0,
379 .name = "Broadcom BCM5464", 486 .name = "Broadcom BCM5464",
380 .features = PHY_GBIT_FEATURES, 487 .features = PHY_GBIT_FEATURES |
488 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
381 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 489 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
382 .config_init = bcm54xx_config_init, 490 .config_init = bcm54xx_config_init,
383 .config_aneg = genphy_config_aneg, 491 .config_aneg = genphy_config_aneg,
@@ -391,7 +499,8 @@ static struct phy_driver bcm5481_driver = {
391 .phy_id = 0x0143bca0, 499 .phy_id = 0x0143bca0,
392 .phy_id_mask = 0xfffffff0, 500 .phy_id_mask = 0xfffffff0,
393 .name = "Broadcom BCM5481", 501 .name = "Broadcom BCM5481",
394 .features = PHY_GBIT_FEATURES, 502 .features = PHY_GBIT_FEATURES |
503 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
395 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 504 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
396 .config_init = bcm54xx_config_init, 505 .config_init = bcm54xx_config_init,
397 .config_aneg = bcm5481_config_aneg, 506 .config_aneg = bcm5481_config_aneg,
@@ -405,7 +514,8 @@ static struct phy_driver bcm5482_driver = {
405 .phy_id = 0x0143bcb0, 514 .phy_id = 0x0143bcb0,
406 .phy_id_mask = 0xfffffff0, 515 .phy_id_mask = 0xfffffff0,
407 .name = "Broadcom BCM5482", 516 .name = "Broadcom BCM5482",
408 .features = PHY_GBIT_FEATURES, 517 .features = PHY_GBIT_FEATURES |
518 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
409 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 519 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
410 .config_init = bcm5482_config_init, 520 .config_init = bcm5482_config_init,
411 .config_aneg = genphy_config_aneg, 521 .config_aneg = genphy_config_aneg,
@@ -415,6 +525,36 @@ static struct phy_driver bcm5482_driver = {
415 .driver = { .owner = THIS_MODULE }, 525 .driver = { .owner = THIS_MODULE },
416}; 526};
417 527
528static struct phy_driver bcm50610_driver = {
529 .phy_id = PHY_ID_BCM50610,
530 .phy_id_mask = 0xfffffff0,
531 .name = "Broadcom BCM50610",
532 .features = PHY_GBIT_FEATURES |
533 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
534 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
535 .config_init = bcm54xx_config_init,
536 .config_aneg = genphy_config_aneg,
537 .read_status = genphy_read_status,
538 .ack_interrupt = bcm54xx_ack_interrupt,
539 .config_intr = bcm54xx_config_intr,
540 .driver = { .owner = THIS_MODULE },
541};
542
543static struct phy_driver bcm57780_driver = {
544 .phy_id = 0x03625d90,
545 .phy_id_mask = 0xfffffff0,
546 .name = "Broadcom BCM57780",
547 .features = PHY_GBIT_FEATURES |
548 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
549 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
550 .config_init = bcm54xx_config_init,
551 .config_aneg = genphy_config_aneg,
552 .read_status = genphy_read_status,
553 .ack_interrupt = bcm54xx_ack_interrupt,
554 .config_intr = bcm54xx_config_intr,
555 .driver = { .owner = THIS_MODULE },
556};
557
418static int __init broadcom_init(void) 558static int __init broadcom_init(void)
419{ 559{
420 int ret; 560 int ret;
@@ -437,8 +577,18 @@ static int __init broadcom_init(void)
437 ret = phy_driver_register(&bcm5482_driver); 577 ret = phy_driver_register(&bcm5482_driver);
438 if (ret) 578 if (ret)
439 goto out_5482; 579 goto out_5482;
580 ret = phy_driver_register(&bcm50610_driver);
581 if (ret)
582 goto out_50610;
583 ret = phy_driver_register(&bcm57780_driver);
584 if (ret)
585 goto out_57780;
440 return ret; 586 return ret;
441 587
588out_57780:
589 phy_driver_unregister(&bcm50610_driver);
590out_50610:
591 phy_driver_unregister(&bcm5482_driver);
442out_5482: 592out_5482:
443 phy_driver_unregister(&bcm5481_driver); 593 phy_driver_unregister(&bcm5481_driver);
444out_5481: 594out_5481:
@@ -455,6 +605,8 @@ out_5411:
455 605
456static void __exit broadcom_exit(void) 606static void __exit broadcom_exit(void)
457{ 607{
608 phy_driver_unregister(&bcm57780_driver);
609 phy_driver_unregister(&bcm50610_driver);
458 phy_driver_unregister(&bcm5482_driver); 610 phy_driver_unregister(&bcm5482_driver);
459 phy_driver_unregister(&bcm5481_driver); 611 phy_driver_unregister(&bcm5481_driver);
460 phy_driver_unregister(&bcm5464_driver); 612 phy_driver_unregister(&bcm5464_driver);
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
new file mode 100644
index 000000000000..b031fa21f1aa
--- /dev/null
+++ b/drivers/net/phy/et1011c.c
@@ -0,0 +1,113 @@
1/*
2 * drivers/net/phy/et1011c.c
3 *
4 * Driver for LSI ET1011C PHYs
5 *
6 * Author: Chaithrika U S
7 *
8 * Copyright (c) 2008 Texas Instruments
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/unistd.h>
20#include <linux/slab.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/mii.h>
31#include <linux/ethtool.h>
32#include <linux/phy.h>
33#include <linux/io.h>
34#include <linux/uaccess.h>
35#include <asm/irq.h>
36
37#define ET1011C_STATUS_REG (0x1A)
38#define ET1011C_CONFIG_REG (0x16)
39#define ET1011C_SPEED_MASK (0x0300)
40#define ET1011C_GIGABIT_SPEED (0x0200)
41#define ET1011C_TX_FIFO_MASK (0x3000)
42#define ET1011C_TX_FIFO_DEPTH_8 (0x0000)
43#define ET1011C_TX_FIFO_DEPTH_16 (0x1000)
44#define ET1011C_INTERFACE_MASK (0x0007)
45#define ET1011C_GMII_INTERFACE (0x0002)
46#define ET1011C_SYS_CLK_EN (0x01 << 4)
47
48
49MODULE_DESCRIPTION("LSI ET1011C PHY driver");
50MODULE_AUTHOR("Chaithrika U S");
51MODULE_LICENSE("GPL");
52
53static int et1011c_config_aneg(struct phy_device *phydev)
54{
55 int ctl = 0;
56 ctl = phy_read(phydev, MII_BMCR);
57 if (ctl < 0)
58 return ctl;
59 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 |
60 BMCR_ANENABLE);
61 /* First clear the PHY */
62 phy_write(phydev, MII_BMCR, ctl | BMCR_RESET);
63
64 return genphy_config_aneg(phydev);
65}
66
67static int et1011c_read_status(struct phy_device *phydev)
68{
69 int ret;
70 u32 val;
71 static int speed;
72 ret = genphy_read_status(phydev);
73
74 if (speed != phydev->speed) {
75 speed = phydev->speed;
76 val = phy_read(phydev, ET1011C_STATUS_REG);
77 if ((val & ET1011C_SPEED_MASK) ==
78 ET1011C_GIGABIT_SPEED) {
79 val = phy_read(phydev, ET1011C_CONFIG_REG);
80 val &= ~ET1011C_TX_FIFO_MASK;
81 phy_write(phydev, ET1011C_CONFIG_REG, val\
82 | ET1011C_GMII_INTERFACE\
83 | ET1011C_SYS_CLK_EN\
84 | ET1011C_TX_FIFO_DEPTH_16);
85
86 }
87 }
88 return ret;
89}
90
91static struct phy_driver et1011c_driver = {
92 .phy_id = 0x0282f014,
93 .name = "ET1011C",
94 .phy_id_mask = 0xfffffff0,
95 .features = (PHY_BASIC_FEATURES | SUPPORTED_1000baseT_Full),
96 .flags = PHY_POLL,
97 .config_aneg = et1011c_config_aneg,
98 .read_status = et1011c_read_status,
99 .driver = { .owner = THIS_MODULE,},
100};
101
102static int __init et1011c_init(void)
103{
104 return phy_driver_register(&et1011c_driver);
105}
106
107static void __exit et1011c_exit(void)
108{
109 phy_driver_unregister(&et1011c_driver);
110}
111
112module_init(et1011c_init);
113module_exit(et1011c_exit);
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
new file mode 100644
index 000000000000..a439ebeb4319
--- /dev/null
+++ b/drivers/net/phy/mdio-gpio.c
@@ -0,0 +1,296 @@
1/*
2 * GPIO based MDIO bitbang driver.
3 * Supports OpenFirmware.
4 *
5 * Copyright (c) 2008 CSE Semaphore Belgium.
6 * by Laurent Pinchart <laurentp@cse-semaphore.com>
7 *
8 * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
9 *
10 * Based on earlier work by
11 *
12 * Copyright (c) 2003 Intracom S.A.
13 * by Pantelis Antoniou <panto@intracom.gr>
14 *
15 * 2005 (c) MontaVista Software, Inc.
16 * Vitaly Bordug <vbordug@ru.mvista.com>
17 *
18 * This file is licensed under the terms of the GNU General Public License
19 * version 2. This program is licensed "as is" without any warranty of any
20 * kind, whether express or implied.
21 */
22
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/interrupt.h>
27#include <linux/platform_device.h>
28#include <linux/gpio.h>
29#include <linux/mdio-gpio.h>
30
31#ifdef CONFIG_OF_GPIO
32#include <linux/of_gpio.h>
33#include <linux/of_platform.h>
34#endif
35
36struct mdio_gpio_info {
37 struct mdiobb_ctrl ctrl;
38 int mdc, mdio;
39};
40
41static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
42{
43 struct mdio_gpio_info *bitbang =
44 container_of(ctrl, struct mdio_gpio_info, ctrl);
45
46 if (dir)
47 gpio_direction_output(bitbang->mdio, 1);
48 else
49 gpio_direction_input(bitbang->mdio);
50}
51
52static int mdio_get(struct mdiobb_ctrl *ctrl)
53{
54 struct mdio_gpio_info *bitbang =
55 container_of(ctrl, struct mdio_gpio_info, ctrl);
56
57 return gpio_get_value(bitbang->mdio);
58}
59
60static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
61{
62 struct mdio_gpio_info *bitbang =
63 container_of(ctrl, struct mdio_gpio_info, ctrl);
64
65 gpio_set_value(bitbang->mdio, what);
66}
67
68static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
69{
70 struct mdio_gpio_info *bitbang =
71 container_of(ctrl, struct mdio_gpio_info, ctrl);
72
73 gpio_set_value(bitbang->mdc, what);
74}
75
76static struct mdiobb_ops mdio_gpio_ops = {
77 .owner = THIS_MODULE,
78 .set_mdc = mdc_set,
79 .set_mdio_dir = mdio_dir,
80 .set_mdio_data = mdio_set,
81 .get_mdio_data = mdio_get,
82};
83
84static int __devinit mdio_gpio_bus_init(struct device *dev,
85 struct mdio_gpio_platform_data *pdata,
86 int bus_id)
87{
88 struct mii_bus *new_bus;
89 struct mdio_gpio_info *bitbang;
90 int ret = -ENOMEM;
91 int i;
92
93 bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL);
94 if (!bitbang)
95 goto out;
96
97 bitbang->ctrl.ops = &mdio_gpio_ops;
98 bitbang->mdc = pdata->mdc;
99 bitbang->mdio = pdata->mdio;
100
101 new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
102 if (!new_bus)
103 goto out_free_bitbang;
104
105 new_bus->name = "GPIO Bitbanged MDIO",
106
107 ret = -ENODEV;
108
109 new_bus->phy_mask = pdata->phy_mask;
110 new_bus->irq = pdata->irqs;
111 new_bus->parent = dev;
112
113 if (new_bus->phy_mask == ~0)
114 goto out_free_bus;
115
116 for (i = 0; i < PHY_MAX_ADDR; i++)
117 if (!new_bus->irq[i])
118 new_bus->irq[i] = PHY_POLL;
119
120 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", bus_id);
121
122 if (gpio_request(bitbang->mdc, "mdc"))
123 goto out_free_bus;
124
125 if (gpio_request(bitbang->mdio, "mdio"))
126 goto out_free_mdc;
127
128 dev_set_drvdata(dev, new_bus);
129
130 ret = mdiobus_register(new_bus);
131 if (ret)
132 goto out_free_all;
133
134 return 0;
135
136out_free_all:
137 dev_set_drvdata(dev, NULL);
138 gpio_free(bitbang->mdio);
139out_free_mdc:
140 gpio_free(bitbang->mdc);
141out_free_bus:
142 free_mdio_bitbang(new_bus);
143out_free_bitbang:
144 kfree(bitbang);
145out:
146 return ret;
147}
148
149static void __devexit mdio_gpio_bus_destroy(struct device *dev)
150{
151 struct mii_bus *bus = dev_get_drvdata(dev);
152 struct mdio_gpio_info *bitbang = bus->priv;
153
154 mdiobus_unregister(bus);
155 free_mdio_bitbang(bus);
156 dev_set_drvdata(dev, NULL);
157 gpio_free(bitbang->mdc);
158 gpio_free(bitbang->mdio);
159 kfree(bitbang);
160}
161
162static int __devinit mdio_gpio_probe(struct platform_device *pdev)
163{
164 struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data;
165
166 if (!pdata)
167 return -ENODEV;
168
169 return mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id);
170}
171
172static int __devexit mdio_gpio_remove(struct platform_device *pdev)
173{
174 mdio_gpio_bus_destroy(&pdev->dev);
175
176 return 0;
177}
178
179#ifdef CONFIG_OF_GPIO
180static void __devinit add_phy(struct mdio_gpio_platform_data *pdata,
181 struct device_node *np)
182{
183 const u32 *data;
184 int len, id, irq;
185
186 data = of_get_property(np, "reg", &len);
187 if (!data || len != 4)
188 return;
189
190 id = *data;
191 pdata->phy_mask &= ~(1 << id);
192
193 irq = of_irq_to_resource(np, 0, NULL);
194 if (irq)
195 pdata->irqs[id] = irq;
196}
197
198static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
199 const struct of_device_id *match)
200{
201 struct device_node *np = NULL;
202 struct mdio_gpio_platform_data *pdata;
203
204 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
205 if (!pdata)
206 return -ENOMEM;
207
208 pdata->mdc = of_get_gpio(ofdev->node, 0);
209 pdata->mdio = of_get_gpio(ofdev->node, 1);
210
211 if (pdata->mdc < 0 || pdata->mdio < 0)
212 goto out_free;
213
214 while ((np = of_get_next_child(ofdev->node, np)))
215 if (!strcmp(np->type, "ethernet-phy"))
216 add_phy(pdata, np);
217
218 return mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc);
219
220out_free:
221 kfree(pdata);
222 return -ENODEV;
223}
224
225static int __devexit mdio_ofgpio_remove(struct of_device *ofdev)
226{
227 mdio_gpio_bus_destroy(&ofdev->dev);
228 kfree(ofdev->dev.platform_data);
229
230 return 0;
231}
232
233static struct of_device_id mdio_ofgpio_match[] = {
234 {
235 .compatible = "virtual,mdio-gpio",
236 },
237 {},
238};
239
240static struct of_platform_driver mdio_ofgpio_driver = {
241 .name = "mdio-gpio",
242 .match_table = mdio_ofgpio_match,
243 .probe = mdio_ofgpio_probe,
244 .remove = __devexit_p(mdio_ofgpio_remove),
245};
246
247static inline int __init mdio_ofgpio_init(void)
248{
249 return of_register_platform_driver(&mdio_ofgpio_driver);
250}
251
252static inline void __exit mdio_ofgpio_exit(void)
253{
254 of_unregister_platform_driver(&mdio_ofgpio_driver);
255}
256#else
257static inline int __init mdio_ofgpio_init(void) { return 0; }
258static inline void __exit mdio_ofgpio_exit(void) { }
259#endif /* CONFIG_OF_GPIO */
260
261static struct platform_driver mdio_gpio_driver = {
262 .probe = mdio_gpio_probe,
263 .remove = __devexit_p(mdio_gpio_remove),
264 .driver = {
265 .name = "mdio-gpio",
266 .owner = THIS_MODULE,
267 },
268};
269
270static int __init mdio_gpio_init(void)
271{
272 int ret;
273
274 ret = mdio_ofgpio_init();
275 if (ret)
276 return ret;
277
278 ret = platform_driver_register(&mdio_gpio_driver);
279 if (ret)
280 mdio_ofgpio_exit();
281
282 return ret;
283}
284module_init(mdio_gpio_init);
285
286static void __exit mdio_gpio_exit(void)
287{
288 platform_driver_unregister(&mdio_gpio_driver);
289 mdio_ofgpio_exit();
290}
291module_exit(mdio_gpio_exit);
292
293MODULE_ALIAS("platform:mdio-gpio");
294MODULE_AUTHOR("Laurent Pinchart, Paulius Zaleckas");
295MODULE_LICENSE("GPL");
296MODULE_DESCRIPTION("Generic driver for MDIO bus emulation using GPIO");
diff --git a/drivers/net/phy/mdio-ofgpio.c b/drivers/net/phy/mdio-ofgpio.c
deleted file mode 100644
index 2ff97754e574..000000000000
--- a/drivers/net/phy/mdio-ofgpio.c
+++ /dev/null
@@ -1,204 +0,0 @@
1/*
2 * OpenFirmware GPIO based MDIO bitbang driver.
3 *
4 * Copyright (c) 2008 CSE Semaphore Belgium.
5 * by Laurent Pinchart <laurentp@cse-semaphore.com>
6 *
7 * Based on earlier work by
8 *
9 * Copyright (c) 2003 Intracom S.A.
10 * by Pantelis Antoniou <panto@intracom.gr>
11 *
12 * 2005 (c) MontaVista Software, Inc.
13 * Vitaly Bordug <vbordug@ru.mvista.com>
14 *
15 * This file is licensed under the terms of the GNU General Public License
16 * version 2. This program is licensed "as is" without any warranty of any
17 * kind, whether express or implied.
18 */
19
20#include <linux/module.h>
21#include <linux/slab.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/mdio-bitbang.h>
25#include <linux/of_gpio.h>
26#include <linux/of_platform.h>
27
28struct mdio_gpio_info {
29 struct mdiobb_ctrl ctrl;
30 int mdc, mdio;
31};
32
33static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
34{
35 struct mdio_gpio_info *bitbang =
36 container_of(ctrl, struct mdio_gpio_info, ctrl);
37
38 if (dir)
39 gpio_direction_output(bitbang->mdio, 1);
40 else
41 gpio_direction_input(bitbang->mdio);
42}
43
44static int mdio_read(struct mdiobb_ctrl *ctrl)
45{
46 struct mdio_gpio_info *bitbang =
47 container_of(ctrl, struct mdio_gpio_info, ctrl);
48
49 return gpio_get_value(bitbang->mdio);
50}
51
52static void mdio(struct mdiobb_ctrl *ctrl, int what)
53{
54 struct mdio_gpio_info *bitbang =
55 container_of(ctrl, struct mdio_gpio_info, ctrl);
56
57 gpio_set_value(bitbang->mdio, what);
58}
59
60static void mdc(struct mdiobb_ctrl *ctrl, int what)
61{
62 struct mdio_gpio_info *bitbang =
63 container_of(ctrl, struct mdio_gpio_info, ctrl);
64
65 gpio_set_value(bitbang->mdc, what);
66}
67
68static struct mdiobb_ops mdio_gpio_ops = {
69 .owner = THIS_MODULE,
70 .set_mdc = mdc,
71 .set_mdio_dir = mdio_dir,
72 .set_mdio_data = mdio,
73 .get_mdio_data = mdio_read,
74};
75
76static int __devinit mdio_ofgpio_bitbang_init(struct mii_bus *bus,
77 struct device_node *np)
78{
79 struct mdio_gpio_info *bitbang = bus->priv;
80
81 bitbang->mdc = of_get_gpio(np, 0);
82 bitbang->mdio = of_get_gpio(np, 1);
83
84 if (bitbang->mdc < 0 || bitbang->mdio < 0)
85 return -ENODEV;
86
87 snprintf(bus->id, MII_BUS_ID_SIZE, "%x", bitbang->mdc);
88 return 0;
89}
90
91static void __devinit add_phy(struct mii_bus *bus, struct device_node *np)
92{
93 const u32 *data;
94 int len, id, irq;
95
96 data = of_get_property(np, "reg", &len);
97 if (!data || len != 4)
98 return;
99
100 id = *data;
101 bus->phy_mask &= ~(1 << id);
102
103 irq = of_irq_to_resource(np, 0, NULL);
104 if (irq != NO_IRQ)
105 bus->irq[id] = irq;
106}
107
108static int __devinit mdio_ofgpio_probe(struct of_device *ofdev,
109 const struct of_device_id *match)
110{
111 struct device_node *np = NULL;
112 struct mii_bus *new_bus;
113 struct mdio_gpio_info *bitbang;
114 int ret = -ENOMEM;
115 int i;
116
117 bitbang = kzalloc(sizeof(struct mdio_gpio_info), GFP_KERNEL);
118 if (!bitbang)
119 goto out;
120
121 bitbang->ctrl.ops = &mdio_gpio_ops;
122
123 new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
124 if (!new_bus)
125 goto out_free_bitbang;
126
127 new_bus->name = "GPIO Bitbanged MII",
128
129 ret = mdio_ofgpio_bitbang_init(new_bus, ofdev->node);
130 if (ret)
131 goto out_free_bus;
132
133 new_bus->phy_mask = ~0;
134 new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
135 if (!new_bus->irq)
136 goto out_free_bus;
137
138 for (i = 0; i < PHY_MAX_ADDR; i++)
139 new_bus->irq[i] = -1;
140
141 while ((np = of_get_next_child(ofdev->node, np)))
142 if (!strcmp(np->type, "ethernet-phy"))
143 add_phy(new_bus, np);
144
145 new_bus->parent = &ofdev->dev;
146 dev_set_drvdata(&ofdev->dev, new_bus);
147
148 ret = mdiobus_register(new_bus);
149 if (ret)
150 goto out_free_irqs;
151
152 return 0;
153
154out_free_irqs:
155 dev_set_drvdata(&ofdev->dev, NULL);
156 kfree(new_bus->irq);
157out_free_bus:
158 free_mdio_bitbang(new_bus);
159out_free_bitbang:
160 kfree(bitbang);
161out:
162 return ret;
163}
164
165static int mdio_ofgpio_remove(struct of_device *ofdev)
166{
167 struct mii_bus *bus = dev_get_drvdata(&ofdev->dev);
168 struct mdio_gpio_info *bitbang = bus->priv;
169
170 mdiobus_unregister(bus);
171 kfree(bus->irq);
172 free_mdio_bitbang(bus);
173 dev_set_drvdata(&ofdev->dev, NULL);
174 kfree(bitbang);
175
176 return 0;
177}
178
179static struct of_device_id mdio_ofgpio_match[] = {
180 {
181 .compatible = "virtual,mdio-gpio",
182 },
183 {},
184};
185
186static struct of_platform_driver mdio_ofgpio_driver = {
187 .name = "mdio-gpio",
188 .match_table = mdio_ofgpio_match,
189 .probe = mdio_ofgpio_probe,
190 .remove = mdio_ofgpio_remove,
191};
192
193static int mdio_ofgpio_init(void)
194{
195 return of_register_platform_driver(&mdio_ofgpio_driver);
196}
197
198static void mdio_ofgpio_exit(void)
199{
200 of_unregister_platform_driver(&mdio_ofgpio_driver);
201}
202
203module_init(mdio_ofgpio_init);
204module_exit(mdio_ofgpio_exit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 289fc267edf3..11adf6ed4628 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -63,7 +63,9 @@ EXPORT_SYMBOL(mdiobus_alloc);
63static void mdiobus_release(struct device *d) 63static void mdiobus_release(struct device *d)
64{ 64{
65 struct mii_bus *bus = to_mii_bus(d); 65 struct mii_bus *bus = to_mii_bus(d);
66 BUG_ON(bus->state != MDIOBUS_RELEASED); 66 BUG_ON(bus->state != MDIOBUS_RELEASED &&
67 /* for compatibility with error handling in drivers */
68 bus->state != MDIOBUS_ALLOCATED);
67 kfree(bus); 69 kfree(bus);
68} 70}
69 71
@@ -83,8 +85,7 @@ static struct class mdio_bus_class = {
83 */ 85 */
84int mdiobus_register(struct mii_bus *bus) 86int mdiobus_register(struct mii_bus *bus)
85{ 87{
86 int i; 88 int i, err;
87 int err = 0;
88 89
89 if (NULL == bus || NULL == bus->name || 90 if (NULL == bus || NULL == bus->name ||
90 NULL == bus->read || 91 NULL == bus->read ||
@@ -97,7 +98,7 @@ int mdiobus_register(struct mii_bus *bus)
97 bus->dev.parent = bus->parent; 98 bus->dev.parent = bus->parent;
98 bus->dev.class = &mdio_bus_class; 99 bus->dev.class = &mdio_bus_class;
99 bus->dev.groups = NULL; 100 bus->dev.groups = NULL;
100 memcpy(bus->dev.bus_id, bus->id, MII_BUS_ID_SIZE); 101 dev_set_name(&bus->dev, bus->id);
101 102
102 err = device_register(&bus->dev); 103 err = device_register(&bus->dev);
103 if (err) { 104 if (err) {
@@ -116,16 +117,23 @@ int mdiobus_register(struct mii_bus *bus)
116 struct phy_device *phydev; 117 struct phy_device *phydev;
117 118
118 phydev = mdiobus_scan(bus, i); 119 phydev = mdiobus_scan(bus, i);
119 if (IS_ERR(phydev)) 120 if (IS_ERR(phydev)) {
120 err = PTR_ERR(phydev); 121 err = PTR_ERR(phydev);
122 goto error;
123 }
121 } 124 }
122 } 125 }
123 126
124 if (!err) 127 bus->state = MDIOBUS_REGISTERED;
125 bus->state = MDIOBUS_REGISTERED;
126
127 pr_info("%s: probed\n", bus->name); 128 pr_info("%s: probed\n", bus->name);
129 return 0;
128 130
131error:
132 while (--i >= 0) {
133 if (bus->phy_map[i])
134 device_unregister(&bus->phy_map[i]->dev);
135 }
136 device_del(&bus->dev);
129 return err; 137 return err;
130} 138}
131EXPORT_SYMBOL(mdiobus_register); 139EXPORT_SYMBOL(mdiobus_register);
@@ -192,7 +200,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
192 200
193 phydev->dev.parent = bus->parent; 201 phydev->dev.parent = bus->parent;
194 phydev->dev.bus = &mdio_bus_type; 202 phydev->dev.bus = &mdio_bus_type;
195 snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, addr); 203 dev_set_name(&phydev->dev, PHY_ID_FMT, bus->id, addr);
196 204
197 phydev->bus = bus; 205 phydev->bus = bus;
198 206
@@ -285,9 +293,12 @@ static int mdio_bus_suspend(struct device * dev, pm_message_t state)
285{ 293{
286 int ret = 0; 294 int ret = 0;
287 struct device_driver *drv = dev->driver; 295 struct device_driver *drv = dev->driver;
296 struct phy_driver *phydrv = to_phy_driver(drv);
297 struct phy_device *phydev = to_phy_device(dev);
288 298
289 if (drv && drv->suspend) 299 if ((!device_may_wakeup(phydev->dev.parent)) &&
290 ret = drv->suspend(dev, state); 300 (phydrv && phydrv->suspend))
301 ret = phydrv->suspend(phydev);
291 302
292 return ret; 303 return ret;
293} 304}
@@ -296,9 +307,12 @@ static int mdio_bus_resume(struct device * dev)
296{ 307{
297 int ret = 0; 308 int ret = 0;
298 struct device_driver *drv = dev->driver; 309 struct device_driver *drv = dev->driver;
310 struct phy_driver *phydrv = to_phy_driver(drv);
311 struct phy_device *phydev = to_phy_device(dev);
299 312
300 if (drv && drv->resume) 313 if ((!device_may_wakeup(phydev->dev.parent)) &&
301 ret = drv->resume(dev); 314 (phydrv && phydrv->resume))
315 ret = phydrv->resume(phydev);
302 316
303 return ret; 317 return ret;
304} 318}
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
new file mode 100644
index 000000000000..6c636eb72089
--- /dev/null
+++ b/drivers/net/phy/national.c
@@ -0,0 +1,155 @@
1/*
2 * drivers/net/phy/national.c
3 *
4 * Driver for National Semiconductor PHYs
5 *
6 * Author: Stuart Menefy <stuart.menefy@st.com>
7 * Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
8 *
9 * Copyright (c) 2008 STMicroelectronics Limited
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/mii.h>
21#include <linux/ethtool.h>
22#include <linux/phy.h>
23#include <linux/netdevice.h>
24
25/* DP83865 phy identifier values */
26#define DP83865_PHY_ID 0x20005c7a
27
28#define DP83865_INT_MASK_REG 0x15
29#define DP83865_INT_MASK_STATUS 0x14
30
31#define DP83865_INT_REMOTE_FAULT 0x0008
32#define DP83865_INT_ANE_COMPLETED 0x0010
33#define DP83865_INT_LINK_CHANGE 0xe000
34#define DP83865_INT_MASK_DEFAULT (DP83865_INT_REMOTE_FAULT | \
35 DP83865_INT_ANE_COMPLETED | \
36 DP83865_INT_LINK_CHANGE)
37
38/* Advanced proprietary configuration */
39#define NS_EXP_MEM_CTL 0x16
40#define NS_EXP_MEM_DATA 0x1d
41#define NS_EXP_MEM_ADD 0x1e
42
43#define LED_CTRL_REG 0x13
44#define AN_FALLBACK_AN 0x0001
45#define AN_FALLBACK_CRC 0x0002
46#define AN_FALLBACK_IE 0x0004
47#define ALL_FALLBACK_ON (AN_FALLBACK_AN | AN_FALLBACK_CRC | AN_FALLBACK_IE)
48
49enum hdx_loopback {
50 hdx_loopback_on = 0,
51 hdx_loopback_off = 1,
52};
53
54static u8 ns_exp_read(struct phy_device *phydev, u16 reg)
55{
56 phy_write(phydev, NS_EXP_MEM_ADD, reg);
57 return phy_read(phydev, NS_EXP_MEM_DATA);
58}
59
60static void ns_exp_write(struct phy_device *phydev, u16 reg, u8 data)
61{
62 phy_write(phydev, NS_EXP_MEM_ADD, reg);
63 phy_write(phydev, NS_EXP_MEM_DATA, data);
64}
65
66static int ns_config_intr(struct phy_device *phydev)
67{
68 int err;
69
70 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
71 err = phy_write(phydev, DP83865_INT_MASK_REG,
72 DP83865_INT_MASK_DEFAULT);
73 else
74 err = phy_write(phydev, DP83865_INT_MASK_REG, 0);
75
76 return err;
77}
78
79static int ns_ack_interrupt(struct phy_device *phydev)
80{
81 int ret = phy_read(phydev, DP83865_INT_MASK_STATUS);
82 if (ret < 0)
83 return ret;
84
85 return 0;
86}
87
88static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
89{
90 int bmcr = phy_read(phydev, MII_BMCR);
91
92 phy_write(phydev, MII_BMCR, (bmcr | BMCR_PDOWN));
93
94 /* Enable 8 bit expended memory read/write (no auto increment) */
95 phy_write(phydev, NS_EXP_MEM_CTL, 0);
96 phy_write(phydev, NS_EXP_MEM_ADD, 0x1C0);
97 phy_write(phydev, NS_EXP_MEM_DATA, 0x0008);
98 phy_write(phydev, MII_BMCR, (bmcr & ~BMCR_PDOWN));
99 phy_write(phydev, LED_CTRL_REG, mode);
100 return;
101}
102
103static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
104{
105 if (disable)
106 ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1);
107 else
108 ns_exp_write(phydev, 0x1c0,
109 ns_exp_read(phydev, 0x1c0) & 0xfffe);
110
111 printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n",
112 (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
113
114 return;
115}
116
117static int ns_config_init(struct phy_device *phydev)
118{
119 ns_giga_speed_fallback(phydev, ALL_FALLBACK_ON);
120 /* In the latest MAC or switches design, the 10 Mbps loopback
121 is desired to be turned off. */
122 ns_10_base_t_hdx_loopack(phydev, hdx_loopback_off);
123 return ns_ack_interrupt(phydev);
124}
125
126static struct phy_driver dp83865_driver = {
127 .phy_id = DP83865_PHY_ID,
128 .phy_id_mask = 0xfffffff0,
129 .name = "NatSemi DP83865",
130 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause,
131 .flags = PHY_HAS_INTERRUPT,
132 .config_init = ns_config_init,
133 .config_aneg = genphy_config_aneg,
134 .read_status = genphy_read_status,
135 .ack_interrupt = ns_ack_interrupt,
136 .config_intr = ns_config_intr,
137 .driver = {.owner = THIS_MODULE,}
138};
139
140static int __init ns_init(void)
141{
142 return phy_driver_register(&dp83865_driver);
143}
144
145static void __exit ns_exit(void)
146{
147 phy_driver_unregister(&dp83865_driver);
148}
149
150MODULE_DESCRIPTION("NatSemi PHY driver");
151MODULE_AUTHOR("Stuart Menefy");
152MODULE_LICENSE("GPL");
153
154module_init(ns_init);
155module_exit(ns_exit);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index df4e6257d4a7..e4ede6080c9d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -45,7 +45,7 @@
45 */ 45 */
46void phy_print_status(struct phy_device *phydev) 46void phy_print_status(struct phy_device *phydev)
47{ 47{
48 pr_info("PHY: %s - Link is %s", phydev->dev.bus_id, 48 pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
49 phydev->link ? "Up" : "Down"); 49 phydev->link ? "Up" : "Down");
50 if (phydev->link) 50 if (phydev->link)
51 printk(" - %d/%s", phydev->speed, 51 printk(" - %d/%s", phydev->speed,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 25acbbde4a60..e35460165bf7 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -74,7 +74,7 @@ int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
74 if (!fixup) 74 if (!fixup)
75 return -ENOMEM; 75 return -ENOMEM;
76 76
77 strncpy(fixup->bus_id, bus_id, BUS_ID_SIZE); 77 strlcpy(fixup->bus_id, bus_id, sizeof(fixup->bus_id));
78 fixup->phy_uid = phy_uid; 78 fixup->phy_uid = phy_uid;
79 fixup->phy_uid_mask = phy_uid_mask; 79 fixup->phy_uid_mask = phy_uid_mask;
80 fixup->run = run; 80 fixup->run = run;
@@ -109,7 +109,7 @@ EXPORT_SYMBOL(phy_register_fixup_for_id);
109 */ 109 */
110static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup) 110static int phy_needs_fixup(struct phy_device *phydev, struct phy_fixup *fixup)
111{ 111{
112 if (strcmp(fixup->bus_id, phydev->dev.bus_id) != 0) 112 if (strcmp(fixup->bus_id, dev_name(&phydev->dev)) != 0)
113 if (strcmp(fixup->bus_id, PHY_ANY_ID) != 0) 113 if (strcmp(fixup->bus_id, PHY_ANY_ID) != 0)
114 return 0; 114 return 0;
115 115
@@ -232,7 +232,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
232 return NULL; 232 return NULL;
233 233
234 /* 234 /*
235 * Broken hardware is sometimes missing the pull down resistor on the 235 * Broken hardware is sometimes missing the pull-up resistor on the
236 * MDIO line, which results in reads to non-existent devices returning 236 * MDIO line, which results in reads to non-existent devices returning
237 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent 237 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
238 * device as well. 238 * device as well.
@@ -517,23 +517,6 @@ int genphy_setup_forced(struct phy_device *phydev)
517 517
518 err = phy_write(phydev, MII_BMCR, ctl); 518 err = phy_write(phydev, MII_BMCR, ctl);
519 519
520 if (err < 0)
521 return err;
522
523 /*
524 * Run the fixups on this PHY, just in case the
525 * board code needs to change something after a reset
526 */
527 err = phy_scan_fixups(phydev);
528
529 if (err < 0)
530 return err;
531
532 /* We just reset the device, so we'd better configure any
533 * settings the PHY requires to operate */
534 if (phydev->drv->config_init)
535 err = phydev->drv->config_init(phydev);
536
537 return err; 520 return err;
538} 521}
539 522
@@ -779,7 +762,35 @@ static int genphy_config_init(struct phy_device *phydev)
779 762
780 return 0; 763 return 0;
781} 764}
765int genphy_suspend(struct phy_device *phydev)
766{
767 int value;
782 768
769 mutex_lock(&phydev->lock);
770
771 value = phy_read(phydev, MII_BMCR);
772 phy_write(phydev, MII_BMCR, (value | BMCR_PDOWN));
773
774 mutex_unlock(&phydev->lock);
775
776 return 0;
777}
778EXPORT_SYMBOL(genphy_suspend);
779
780int genphy_resume(struct phy_device *phydev)
781{
782 int value;
783
784 mutex_lock(&phydev->lock);
785
786 value = phy_read(phydev, MII_BMCR);
787 phy_write(phydev, MII_BMCR, (value & ~BMCR_PDOWN));
788
789 mutex_unlock(&phydev->lock);
790
791 return 0;
792}
793EXPORT_SYMBOL(genphy_resume);
783 794
784/** 795/**
785 * phy_probe - probe and init a PHY device 796 * phy_probe - probe and init a PHY device
@@ -855,7 +866,6 @@ int phy_driver_register(struct phy_driver *new_driver)
855{ 866{
856 int retval; 867 int retval;
857 868
858 memset(&new_driver->driver, 0, sizeof(new_driver->driver));
859 new_driver->driver.name = new_driver->name; 869 new_driver->driver.name = new_driver->name;
860 new_driver->driver.bus = &mdio_bus_type; 870 new_driver->driver.bus = &mdio_bus_type;
861 new_driver->driver.probe = phy_probe; 871 new_driver->driver.probe = phy_probe;
@@ -890,6 +900,8 @@ static struct phy_driver genphy_driver = {
890 .features = 0, 900 .features = 0,
891 .config_aneg = genphy_config_aneg, 901 .config_aneg = genphy_config_aneg,
892 .read_status = genphy_read_status, 902 .read_status = genphy_read_status,
903 .suspend = genphy_suspend,
904 .resume = genphy_resume,
893 .driver = {.owner= THIS_MODULE, }, 905 .driver = {.owner= THIS_MODULE, },
894}; 906};
895 907
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 73baa7a3bb0e..c05d38d46350 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -126,6 +126,27 @@ static struct phy_driver lan8700_driver = {
126 .driver = { .owner = THIS_MODULE, } 126 .driver = { .owner = THIS_MODULE, }
127}; 127};
128 128
129static struct phy_driver lan911x_int_driver = {
130 .phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */
131 .phy_id_mask = 0xfffffff0,
132 .name = "SMSC LAN911x Internal PHY",
133
134 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
135 | SUPPORTED_Asym_Pause),
136 .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
137
138 /* basic functions */
139 .config_aneg = genphy_config_aneg,
140 .read_status = genphy_read_status,
141 .config_init = smsc_phy_config_init,
142
143 /* IRQ related */
144 .ack_interrupt = smsc_phy_ack_interrupt,
145 .config_intr = smsc_phy_config_intr,
146
147 .driver = { .owner = THIS_MODULE, }
148};
149
129static int __init smsc_init(void) 150static int __init smsc_init(void)
130{ 151{
131 int ret; 152 int ret;
@@ -142,8 +163,14 @@ static int __init smsc_init(void)
142 if (ret) 163 if (ret)
143 goto err3; 164 goto err3;
144 165
166 ret = phy_driver_register (&lan911x_int_driver);
167 if (ret)
168 goto err4;
169
145 return 0; 170 return 0;
146 171
172err4:
173 phy_driver_unregister (&lan8700_driver);
147err3: 174err3:
148 phy_driver_unregister (&lan8187_driver); 175 phy_driver_unregister (&lan8187_driver);
149err2: 176err2:
@@ -154,6 +181,7 @@ err1:
154 181
155static void __exit smsc_exit(void) 182static void __exit smsc_exit(void)
156{ 183{
184 phy_driver_unregister (&lan911x_int_driver);
157 phy_driver_unregister (&lan8700_driver); 185 phy_driver_unregister (&lan8700_driver);
158 phy_driver_unregister (&lan8187_driver); 186 phy_driver_unregister (&lan8187_driver);
159 phy_driver_unregister (&lan83c185_driver); 187 phy_driver_unregister (&lan83c185_driver);
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
new file mode 100644
index 000000000000..6bdb0d53aaf9
--- /dev/null
+++ b/drivers/net/phy/ste10Xp.c
@@ -0,0 +1,137 @@
1/*
2 * drivers/net/phy/ste10Xp.c
3 *
4 * Driver for STMicroelectronics STe10Xp PHYs
5 *
6 * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
7 *
8 * Copyright (c) 2008 STMicroelectronics Limited
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/moduleparam.h>
22#include <linux/interrupt.h>
23#include <linux/netdevice.h>
24#include <linux/ethtool.h>
25#include <linux/mii.h>
26#include <linux/phy.h>
27
28#define MII_XCIIS 0x11 /* Configuration Info IRQ & Status Reg */
29#define MII_XIE 0x12 /* Interrupt Enable Register */
30#define MII_XIE_DEFAULT_MASK 0x0070 /* ANE complete, Remote Fault, Link Down */
31
32#define STE101P_PHY_ID 0x00061c50
33#define STE100P_PHY_ID 0x1c040011
34
35static int ste10Xp_config_init(struct phy_device *phydev)
36{
37 int value, err;
38
39 /* Software Reset PHY */
40 value = phy_read(phydev, MII_BMCR);
41 if (value < 0)
42 return value;
43
44 value |= BMCR_RESET;
45 err = phy_write(phydev, MII_BMCR, value);
46 if (err < 0)
47 return err;
48
49 do {
50 value = phy_read(phydev, MII_BMCR);
51 } while (value & BMCR_RESET);
52
53 return 0;
54}
55
56static int ste10Xp_config_intr(struct phy_device *phydev)
57{
58 int err, value;
59
60 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
61 /* Enable all STe101P interrupts (PR12) */
62 err = phy_write(phydev, MII_XIE, MII_XIE_DEFAULT_MASK);
63 /* clear any pending interrupts */
64 if (err == 0) {
65 value = phy_read(phydev, MII_XCIIS);
66 if (value < 0)
67 err = value;
68 }
69 } else
70 err = phy_write(phydev, MII_XIE, 0);
71
72 return err;
73}
74
75static int ste10Xp_ack_interrupt(struct phy_device *phydev)
76{
77 int err = phy_read(phydev, MII_XCIIS);
78 if (err < 0)
79 return err;
80
81 return 0;
82}
83
84static struct phy_driver ste101p_pdriver = {
85 .phy_id = STE101P_PHY_ID,
86 .phy_id_mask = 0xfffffff0,
87 .name = "STe101p",
88 .features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
89 .flags = PHY_HAS_INTERRUPT,
90 .config_init = ste10Xp_config_init,
91 .config_aneg = genphy_config_aneg,
92 .read_status = genphy_read_status,
93 .ack_interrupt = ste10Xp_ack_interrupt,
94 .config_intr = ste10Xp_config_intr,
95 .suspend = genphy_suspend,
96 .resume = genphy_resume,
97 .driver = {.owner = THIS_MODULE,}
98};
99
100static struct phy_driver ste100p_pdriver = {
101 .phy_id = STE100P_PHY_ID,
102 .phy_id_mask = 0xffffffff,
103 .name = "STe100p",
104 .features = PHY_BASIC_FEATURES | SUPPORTED_Pause,
105 .flags = PHY_HAS_INTERRUPT,
106 .config_init = ste10Xp_config_init,
107 .config_aneg = genphy_config_aneg,
108 .read_status = genphy_read_status,
109 .ack_interrupt = ste10Xp_ack_interrupt,
110 .config_intr = ste10Xp_config_intr,
111 .suspend = genphy_suspend,
112 .resume = genphy_resume,
113 .driver = {.owner = THIS_MODULE,}
114};
115
116static int __init ste10Xp_init(void)
117{
118 int retval;
119
120 retval = phy_driver_register(&ste100p_pdriver);
121 if (retval < 0)
122 return retval;
123 return phy_driver_register(&ste101p_pdriver);
124}
125
126static void __exit ste10Xp_exit(void)
127{
128 phy_driver_unregister(&ste100p_pdriver);
129 phy_driver_unregister(&ste101p_pdriver);
130}
131
132module_init(ste10Xp_init);
133module_exit(ste10Xp_exit);
134
135MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver");
136MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
137MODULE_LICENSE("GPL");
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 1e965427b0e9..0c46d603b8fe 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -229,7 +229,7 @@ static inline void enable_parport_interrupts (struct net_device *dev)
229 if (dev->irq != -1) 229 if (dev->irq != -1)
230 { 230 {
231 struct parport *port = 231 struct parport *port =
232 ((struct net_local *)dev->priv)->pardev->port; 232 ((struct net_local *)netdev_priv(dev))->pardev->port;
233 port->ops->enable_irq (port); 233 port->ops->enable_irq (port);
234 } 234 }
235} 235}
@@ -239,7 +239,7 @@ static inline void disable_parport_interrupts (struct net_device *dev)
239 if (dev->irq != -1) 239 if (dev->irq != -1)
240 { 240 {
241 struct parport *port = 241 struct parport *port =
242 ((struct net_local *)dev->priv)->pardev->port; 242 ((struct net_local *)netdev_priv(dev))->pardev->port;
243 port->ops->disable_irq (port); 243 port->ops->disable_irq (port);
244 } 244 }
245} 245}
@@ -247,7 +247,7 @@ static inline void disable_parport_interrupts (struct net_device *dev)
247static inline void write_data (struct net_device *dev, unsigned char data) 247static inline void write_data (struct net_device *dev, unsigned char data)
248{ 248{
249 struct parport *port = 249 struct parport *port =
250 ((struct net_local *)dev->priv)->pardev->port; 250 ((struct net_local *)netdev_priv(dev))->pardev->port;
251 251
252 port->ops->write_data (port, data); 252 port->ops->write_data (port, data);
253} 253}
@@ -255,7 +255,7 @@ static inline void write_data (struct net_device *dev, unsigned char data)
255static inline unsigned char read_status (struct net_device *dev) 255static inline unsigned char read_status (struct net_device *dev)
256{ 256{
257 struct parport *port = 257 struct parport *port =
258 ((struct net_local *)dev->priv)->pardev->port; 258 ((struct net_local *)netdev_priv(dev))->pardev->port;
259 259
260 return port->ops->read_status (port); 260 return port->ops->read_status (port);
261} 261}
@@ -638,14 +638,14 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
638 638
639 case PLIP_PK_DATA: 639 case PLIP_PK_DATA:
640 lbuf = rcv->skb->data; 640 lbuf = rcv->skb->data;
641 do 641 do {
642 if (plip_receive(nibble_timeout, dev, 642 if (plip_receive(nibble_timeout, dev,
643 &rcv->nibble, &lbuf[rcv->byte])) 643 &rcv->nibble, &lbuf[rcv->byte]))
644 return TIMEOUT; 644 return TIMEOUT;
645 while (++rcv->byte < rcv->length.h); 645 } while (++rcv->byte < rcv->length.h);
646 do 646 do {
647 rcv->checksum += lbuf[--rcv->byte]; 647 rcv->checksum += lbuf[--rcv->byte];
648 while (rcv->byte); 648 } while (rcv->byte);
649 rcv->state = PLIP_PK_CHECKSUM; 649 rcv->state = PLIP_PK_CHECKSUM;
650 650
651 case PLIP_PK_CHECKSUM: 651 case PLIP_PK_CHECKSUM:
@@ -664,7 +664,6 @@ plip_receive_packet(struct net_device *dev, struct net_local *nl,
664 /* Inform the upper layer for the arrival of a packet. */ 664 /* Inform the upper layer for the arrival of a packet. */
665 rcv->skb->protocol=plip_type_trans(rcv->skb, dev); 665 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
666 netif_rx_ni(rcv->skb); 666 netif_rx_ni(rcv->skb);
667 dev->last_rx = jiffies;
668 dev->stats.rx_bytes += rcv->length.h; 667 dev->stats.rx_bytes += rcv->length.h;
669 dev->stats.rx_packets++; 668 dev->stats.rx_packets++;
670 rcv->skb = NULL; 669 rcv->skb = NULL;
@@ -817,14 +816,14 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
817 snd->checksum = 0; 816 snd->checksum = 0;
818 817
819 case PLIP_PK_DATA: 818 case PLIP_PK_DATA:
820 do 819 do {
821 if (plip_send(nibble_timeout, dev, 820 if (plip_send(nibble_timeout, dev,
822 &snd->nibble, lbuf[snd->byte])) 821 &snd->nibble, lbuf[snd->byte]))
823 return TIMEOUT; 822 return TIMEOUT;
824 while (++snd->byte < snd->length.h); 823 } while (++snd->byte < snd->length.h);
825 do 824 do {
826 snd->checksum += lbuf[--snd->byte]; 825 snd->checksum += lbuf[--snd->byte];
827 while (snd->byte); 826 } while (snd->byte);
828 snd->state = PLIP_PK_CHECKSUM; 827 snd->state = PLIP_PK_CHECKSUM;
829 828
830 case PLIP_PK_CHECKSUM: 829 case PLIP_PK_CHECKSUM:
@@ -1018,8 +1017,8 @@ plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1018 return ret; 1017 return ret;
1019} 1018}
1020 1019
1021int plip_hard_header_cache(const struct neighbour *neigh, 1020static int plip_hard_header_cache(const struct neighbour *neigh,
1022 struct hh_cache *hh) 1021 struct hh_cache *hh)
1023{ 1022{
1024 int ret; 1023 int ret;
1025 1024
@@ -1397,9 +1396,3 @@ static int __init plip_init (void)
1397module_init(plip_init); 1396module_init(plip_init);
1398module_exit(plip_cleanup_module); 1397module_exit(plip_cleanup_module);
1399MODULE_LICENSE("GPL"); 1398MODULE_LICENSE("GPL");
1400
1401/*
1402 * Local variables:
1403 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
1404 * End:
1405 */
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 451bdb57d6fc..6567fabd2e13 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -293,9 +293,6 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
293 err = -EFAULT; 293 err = -EFAULT;
294 switch (cmd) { 294 switch (cmd) {
295 case PPPIOCGCHAN: 295 case PPPIOCGCHAN:
296 err = -ENXIO;
297 if (!ap)
298 break;
299 err = -EFAULT; 296 err = -EFAULT;
300 if (put_user(ppp_channel_index(&ap->chan), p)) 297 if (put_user(ppp_channel_index(&ap->chan), p))
301 break; 298 break;
@@ -303,9 +300,6 @@ ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
303 break; 300 break;
304 301
305 case PPPIOCGUNIT: 302 case PPPIOCGUNIT:
306 err = -ENXIO;
307 if (!ap)
308 break;
309 err = -EFAULT; 303 err = -EFAULT;
310 if (put_user(ppp_unit_number(&ap->chan), p)) 304 if (put_user(ppp_unit_number(&ap->chan), p))
311 break; 305 break;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 714a23035de1..06b448285eb5 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -27,6 +27,7 @@
27#include <linux/kmod.h> 27#include <linux/kmod.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/list.h> 29#include <linux/list.h>
30#include <linux/idr.h>
30#include <linux/netdevice.h> 31#include <linux/netdevice.h>
31#include <linux/poll.h> 32#include <linux/poll.h>
32#include <linux/ppp_defs.h> 33#include <linux/ppp_defs.h>
@@ -173,35 +174,13 @@ struct channel {
173 */ 174 */
174 175
175/* 176/*
176 * A cardmap represents a mapping from unsigned integers to pointers,
177 * and provides a fast "find lowest unused number" operation.
178 * It uses a broad (32-way) tree with a bitmap at each level.
179 * It is designed to be space-efficient for small numbers of entries
180 * and time-efficient for large numbers of entries.
181 */
182#define CARDMAP_ORDER 5
183#define CARDMAP_WIDTH (1U << CARDMAP_ORDER)
184#define CARDMAP_MASK (CARDMAP_WIDTH - 1)
185
186struct cardmap {
187 int shift;
188 unsigned long inuse;
189 struct cardmap *parent;
190 void *ptr[CARDMAP_WIDTH];
191};
192static void *cardmap_get(struct cardmap *map, unsigned int nr);
193static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
194static unsigned int cardmap_find_first_free(struct cardmap *map);
195static void cardmap_destroy(struct cardmap **map);
196
197/*
198 * all_ppp_mutex protects the all_ppp_units mapping. 177 * all_ppp_mutex protects the all_ppp_units mapping.
199 * It also ensures that finding a ppp unit in the all_ppp_units map 178 * It also ensures that finding a ppp unit in the all_ppp_units map
200 * and updating its file.refcnt field is atomic. 179 * and updating its file.refcnt field is atomic.
201 */ 180 */
202static DEFINE_MUTEX(all_ppp_mutex); 181static DEFINE_MUTEX(all_ppp_mutex);
203static struct cardmap *all_ppp_units;
204static atomic_t ppp_unit_count = ATOMIC_INIT(0); 182static atomic_t ppp_unit_count = ATOMIC_INIT(0);
183static DEFINE_IDR(ppp_units_idr);
205 184
206/* 185/*
207 * all_channels_lock protects all_channels and last_channel_index, 186 * all_channels_lock protects all_channels and last_channel_index,
@@ -270,6 +249,9 @@ static struct channel *ppp_find_channel(int unit);
270static int ppp_connect_channel(struct channel *pch, int unit); 249static int ppp_connect_channel(struct channel *pch, int unit);
271static int ppp_disconnect_channel(struct channel *pch); 250static int ppp_disconnect_channel(struct channel *pch);
272static void ppp_destroy_channel(struct channel *pch); 251static void ppp_destroy_channel(struct channel *pch);
252static int unit_get(struct idr *p, void *ptr);
253static void unit_put(struct idr *p, int n);
254static void *unit_find(struct idr *p, int n);
273 255
274static struct class *ppp_class; 256static struct class *ppp_class;
275 257
@@ -887,7 +869,7 @@ out_chrdev:
887static int 869static int
888ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) 870ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
889{ 871{
890 struct ppp *ppp = (struct ppp *) dev->priv; 872 struct ppp *ppp = netdev_priv(dev);
891 int npi, proto; 873 int npi, proto;
892 unsigned char *pp; 874 unsigned char *pp;
893 875
@@ -932,7 +914,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
932static int 914static int
933ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 915ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
934{ 916{
935 struct ppp *ppp = dev->priv; 917 struct ppp *ppp = netdev_priv(dev);
936 int err = -EFAULT; 918 int err = -EFAULT;
937 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; 919 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
938 struct ppp_stats stats; 920 struct ppp_stats stats;
@@ -972,8 +954,14 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
972 return err; 954 return err;
973} 955}
974 956
957static const struct net_device_ops ppp_netdev_ops = {
958 .ndo_start_xmit = ppp_start_xmit,
959 .ndo_do_ioctl = ppp_net_ioctl,
960};
961
975static void ppp_setup(struct net_device *dev) 962static void ppp_setup(struct net_device *dev)
976{ 963{
964 dev->netdev_ops = &ppp_netdev_ops;
977 dev->hard_header_len = PPP_HDRLEN; 965 dev->hard_header_len = PPP_HDRLEN;
978 dev->mtu = PPP_MTU; 966 dev->mtu = PPP_MTU;
979 dev->addr_len = 0; 967 dev->addr_len = 0;
@@ -1684,7 +1672,6 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1684 skb->protocol = htons(npindex_to_ethertype[npi]); 1672 skb->protocol = htons(npindex_to_ethertype[npi]);
1685 skb_reset_mac_header(skb); 1673 skb_reset_mac_header(skb);
1686 netif_rx(skb); 1674 netif_rx(skb);
1687 ppp->dev->last_rx = jiffies;
1688 } 1675 }
1689 } 1676 }
1690 return; 1677 return;
@@ -2414,13 +2401,12 @@ ppp_create_interface(int unit, int *retp)
2414 int ret = -ENOMEM; 2401 int ret = -ENOMEM;
2415 int i; 2402 int i;
2416 2403
2417 ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL); 2404 dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup);
2418 if (!ppp)
2419 goto out;
2420 dev = alloc_netdev(0, "", ppp_setup);
2421 if (!dev) 2405 if (!dev)
2422 goto out1; 2406 goto out1;
2423 2407
2408 ppp = netdev_priv(dev);
2409 ppp->dev = dev;
2424 ppp->mru = PPP_MRU; 2410 ppp->mru = PPP_MRU;
2425 init_ppp_file(&ppp->file, INTERFACE); 2411 init_ppp_file(&ppp->file, INTERFACE);
2426 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 2412 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
@@ -2433,18 +2419,25 @@ ppp_create_interface(int unit, int *retp)
2433 ppp->minseq = -1; 2419 ppp->minseq = -1;
2434 skb_queue_head_init(&ppp->mrq); 2420 skb_queue_head_init(&ppp->mrq);
2435#endif /* CONFIG_PPP_MULTILINK */ 2421#endif /* CONFIG_PPP_MULTILINK */
2436 ppp->dev = dev;
2437 dev->priv = ppp;
2438
2439 dev->hard_start_xmit = ppp_start_xmit;
2440 dev->do_ioctl = ppp_net_ioctl;
2441 2422
2442 ret = -EEXIST; 2423 ret = -EEXIST;
2443 mutex_lock(&all_ppp_mutex); 2424 mutex_lock(&all_ppp_mutex);
2444 if (unit < 0) 2425
2445 unit = cardmap_find_first_free(all_ppp_units); 2426 if (unit < 0) {
2446 else if (cardmap_get(all_ppp_units, unit) != NULL) 2427 unit = unit_get(&ppp_units_idr, ppp);
2447 goto out2; /* unit already exists */ 2428 if (unit < 0) {
2429 *retp = unit;
2430 goto out2;
2431 }
2432 } else {
2433 if (unit_find(&ppp_units_idr, unit))
2434 goto out2; /* unit already exists */
2435 else {
2436 /* darn, someone is cheating us? */
2437 *retp = -EINVAL;
2438 goto out2;
2439 }
2440 }
2448 2441
2449 /* Initialize the new ppp unit */ 2442 /* Initialize the new ppp unit */
2450 ppp->file.index = unit; 2443 ppp->file.index = unit;
@@ -2452,29 +2445,22 @@ ppp_create_interface(int unit, int *retp)
2452 2445
2453 ret = register_netdev(dev); 2446 ret = register_netdev(dev);
2454 if (ret != 0) { 2447 if (ret != 0) {
2448 unit_put(&ppp_units_idr, unit);
2455 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", 2449 printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
2456 dev->name, ret); 2450 dev->name, ret);
2457 goto out2; 2451 goto out2;
2458 } 2452 }
2459 2453
2460 atomic_inc(&ppp_unit_count); 2454 atomic_inc(&ppp_unit_count);
2461 ret = cardmap_set(&all_ppp_units, unit, ppp);
2462 if (ret != 0)
2463 goto out3;
2464
2465 mutex_unlock(&all_ppp_mutex); 2455 mutex_unlock(&all_ppp_mutex);
2456
2466 *retp = 0; 2457 *retp = 0;
2467 return ppp; 2458 return ppp;
2468 2459
2469out3:
2470 atomic_dec(&ppp_unit_count);
2471 unregister_netdev(dev);
2472out2: 2460out2:
2473 mutex_unlock(&all_ppp_mutex); 2461 mutex_unlock(&all_ppp_mutex);
2474 free_netdev(dev); 2462 free_netdev(dev);
2475out1: 2463out1:
2476 kfree(ppp);
2477out:
2478 *retp = ret; 2464 *retp = ret;
2479 return NULL; 2465 return NULL;
2480} 2466}
@@ -2508,7 +2494,7 @@ static void ppp_shutdown_interface(struct ppp *ppp)
2508 } else 2494 } else
2509 ppp_unlock(ppp); 2495 ppp_unlock(ppp);
2510 2496
2511 cardmap_set(&all_ppp_units, ppp->file.index, NULL); 2497 unit_put(&ppp_units_idr, ppp->file.index);
2512 ppp->file.dead = 1; 2498 ppp->file.dead = 1;
2513 ppp->owner = NULL; 2499 ppp->owner = NULL;
2514 wake_up_interruptible(&ppp->file.rwait); 2500 wake_up_interruptible(&ppp->file.rwait);
@@ -2562,7 +2548,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
2562static struct ppp * 2548static struct ppp *
2563ppp_find_unit(int unit) 2549ppp_find_unit(int unit)
2564{ 2550{
2565 return cardmap_get(all_ppp_units, unit); 2551 return unit_find(&ppp_units_idr, unit);
2566} 2552}
2567 2553
2568/* 2554/*
@@ -2680,123 +2666,45 @@ static void __exit ppp_cleanup(void)
2680 /* should never happen */ 2666 /* should never happen */
2681 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 2667 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
2682 printk(KERN_ERR "PPP: removing module but units remain!\n"); 2668 printk(KERN_ERR "PPP: removing module but units remain!\n");
2683 cardmap_destroy(&all_ppp_units);
2684 unregister_chrdev(PPP_MAJOR, "ppp"); 2669 unregister_chrdev(PPP_MAJOR, "ppp");
2685 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 2670 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
2686 class_destroy(ppp_class); 2671 class_destroy(ppp_class);
2672 idr_destroy(&ppp_units_idr);
2687} 2673}
2688 2674
2689/* 2675/*
2690 * Cardmap implementation. 2676 * Units handling. Caller must protect concurrent access
2677 * by holding all_ppp_mutex
2691 */ 2678 */
2692static void *cardmap_get(struct cardmap *map, unsigned int nr) 2679
2680/* get new free unit number and associate pointer with it */
2681static int unit_get(struct idr *p, void *ptr)
2693{ 2682{
2694 struct cardmap *p; 2683 int unit, err;
2695 int i;
2696 2684
2697 for (p = map; p != NULL; ) { 2685again:
2698 if ((i = nr >> p->shift) >= CARDMAP_WIDTH) 2686 if (idr_pre_get(p, GFP_KERNEL) == 0) {
2699 return NULL; 2687 printk(KERN_ERR "Out of memory expanding drawable idr\n");
2700 if (p->shift == 0) 2688 return -ENOMEM;
2701 return p->ptr[i];
2702 nr &= ~(CARDMAP_MASK << p->shift);
2703 p = p->ptr[i];
2704 } 2689 }
2705 return NULL;
2706}
2707 2690
2708static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) 2691 err = idr_get_new_above(p, ptr, 0, &unit);
2709{ 2692 if (err == -EAGAIN)
2710 struct cardmap *p; 2693 goto again;
2711 int i;
2712 2694
2713 p = *pmap; 2695 return unit;
2714 if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
2715 do {
2716 /* need a new top level */
2717 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
2718 if (!np)
2719 goto enomem;
2720 np->ptr[0] = p;
2721 if (p != NULL) {
2722 np->shift = p->shift + CARDMAP_ORDER;
2723 p->parent = np;
2724 } else
2725 np->shift = 0;
2726 p = np;
2727 } while ((nr >> p->shift) >= CARDMAP_WIDTH);
2728 *pmap = p;
2729 }
2730 while (p->shift > 0) {
2731 i = (nr >> p->shift) & CARDMAP_MASK;
2732 if (p->ptr[i] == NULL) {
2733 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
2734 if (!np)
2735 goto enomem;
2736 np->shift = p->shift - CARDMAP_ORDER;
2737 np->parent = p;
2738 p->ptr[i] = np;
2739 }
2740 if (ptr == NULL)
2741 clear_bit(i, &p->inuse);
2742 p = p->ptr[i];
2743 }
2744 i = nr & CARDMAP_MASK;
2745 p->ptr[i] = ptr;
2746 if (ptr != NULL)
2747 set_bit(i, &p->inuse);
2748 else
2749 clear_bit(i, &p->inuse);
2750 return 0;
2751 enomem:
2752 return -ENOMEM;
2753} 2696}
2754 2697
2755static unsigned int cardmap_find_first_free(struct cardmap *map) 2698/* put unit number back to a pool */
2699static void unit_put(struct idr *p, int n)
2756{ 2700{
2757 struct cardmap *p; 2701 idr_remove(p, n);
2758 unsigned int nr = 0;
2759 int i;
2760
2761 if ((p = map) == NULL)
2762 return 0;
2763 for (;;) {
2764 i = find_first_zero_bit(&p->inuse, CARDMAP_WIDTH);
2765 if (i >= CARDMAP_WIDTH) {
2766 if (p->parent == NULL)
2767 return CARDMAP_WIDTH << p->shift;
2768 p = p->parent;
2769 i = (nr >> p->shift) & CARDMAP_MASK;
2770 set_bit(i, &p->inuse);
2771 continue;
2772 }
2773 nr = (nr & (~CARDMAP_MASK << p->shift)) | (i << p->shift);
2774 if (p->shift == 0 || p->ptr[i] == NULL)
2775 return nr;
2776 p = p->ptr[i];
2777 }
2778} 2702}
2779 2703
2780static void cardmap_destroy(struct cardmap **pmap) 2704/* get pointer associated with the number */
2705static void *unit_find(struct idr *p, int n)
2781{ 2706{
2782 struct cardmap *p, *np; 2707 return idr_find(p, n);
2783 int i;
2784
2785 for (p = *pmap; p != NULL; p = np) {
2786 if (p->shift != 0) {
2787 for (i = 0; i < CARDMAP_WIDTH; ++i)
2788 if (p->ptr[i] != NULL)
2789 break;
2790 if (i < CARDMAP_WIDTH) {
2791 np = p->ptr[i];
2792 p->ptr[i] = NULL;
2793 continue;
2794 }
2795 }
2796 np = p->parent;
2797 kfree(p);
2798 }
2799 *pmap = NULL;
2800} 2708}
2801 2709
2802/* Module/initialization stuff */ 2710/* Module/initialization stuff */
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 801d8f99d471..1e892b7b1f8c 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -333,9 +333,6 @@ ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
333 err = -EFAULT; 333 err = -EFAULT;
334 switch (cmd) { 334 switch (cmd) {
335 case PPPIOCGCHAN: 335 case PPPIOCGCHAN:
336 err = -ENXIO;
337 if (!ap)
338 break;
339 err = -EFAULT; 336 err = -EFAULT;
340 if (put_user(ppp_channel_index(&ap->chan), p)) 337 if (put_user(ppp_channel_index(&ap->chan), p))
341 break; 338 break;
@@ -343,9 +340,6 @@ ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
343 break; 340 break;
344 341
345 case PPPIOCGUNIT: 342 case PPPIOCGUNIT:
346 err = -ENXIO;
347 if (!ap)
348 break;
349 err = -EFAULT; 343 err = -EFAULT;
350 if (put_user(ppp_unit_number(&ap->chan), p)) 344 if (put_user(ppp_unit_number(&ap->chan), p))
351 break; 345 break;
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index b646e92134dc..c22b30533a14 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -958,7 +958,6 @@ static int pppoe_seq_show(struct seq_file *seq, void *v)
958{ 958{
959 struct pppox_sock *po; 959 struct pppox_sock *po;
960 char *dev_name; 960 char *dev_name;
961 DECLARE_MAC_BUF(mac);
962 961
963 if (v == SEQ_START_TOKEN) { 962 if (v == SEQ_START_TOKEN) {
964 seq_puts(seq, "Id Address Device\n"); 963 seq_puts(seq, "Id Address Device\n");
@@ -968,8 +967,8 @@ static int pppoe_seq_show(struct seq_file *seq, void *v)
968 po = v; 967 po = v;
969 dev_name = po->pppoe_pa.dev; 968 dev_name = po->pppoe_pa.dev;
970 969
971 seq_printf(seq, "%08X %s %8s\n", 970 seq_printf(seq, "%08X %pM %8s\n",
972 po->pppoe_pa.sid, print_mac(mac, po->pppoe_pa.remote), dev_name); 971 po->pppoe_pa.sid, po->pppoe_pa.remote, dev_name);
973out: 972out:
974 return 0; 973 return 0;
975} 974}
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index e98d9773158d..f1a946785c6a 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -489,6 +489,30 @@ out:
489 spin_unlock_bh(&session->reorder_q.lock); 489 spin_unlock_bh(&session->reorder_q.lock);
490} 490}
491 491
492static inline int pppol2tp_verify_udp_checksum(struct sock *sk,
493 struct sk_buff *skb)
494{
495 struct udphdr *uh = udp_hdr(skb);
496 u16 ulen = ntohs(uh->len);
497 struct inet_sock *inet;
498 __wsum psum;
499
500 if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
501 return 0;
502
503 inet = inet_sk(sk);
504 psum = csum_tcpudp_nofold(inet->saddr, inet->daddr, ulen,
505 IPPROTO_UDP, 0);
506
507 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
508 !csum_fold(csum_add(psum, skb->csum)))
509 return 0;
510
511 skb->csum = psum;
512
513 return __skb_checksum_complete(skb);
514}
515
492/* Internal receive frame. Do the real work of receiving an L2TP data frame 516/* Internal receive frame. Do the real work of receiving an L2TP data frame
493 * here. The skb is not on a list when we get here. 517 * here. The skb is not on a list when we get here.
494 * Returns 0 if the packet was a data packet and was successfully passed on. 518 * Returns 0 if the packet was a data packet and was successfully passed on.
@@ -509,6 +533,9 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
509 if (tunnel == NULL) 533 if (tunnel == NULL)
510 goto no_tunnel; 534 goto no_tunnel;
511 535
536 if (tunnel->sock && pppol2tp_verify_udp_checksum(tunnel->sock, skb))
537 goto discard_bad_csum;
538
512 /* UDP always verifies the packet length. */ 539 /* UDP always verifies the packet length. */
513 __skb_pull(skb, sizeof(struct udphdr)); 540 __skb_pull(skb, sizeof(struct udphdr));
514 541
@@ -725,6 +752,14 @@ discard:
725 752
726 return 0; 753 return 0;
727 754
755discard_bad_csum:
756 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
757 UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
758 tunnel->stats.rx_errors++;
759 kfree_skb(skb);
760
761 return 0;
762
728error: 763error:
729 /* Put UDP header back */ 764 /* Put UDP header back */
730 __skb_push(skb, sizeof(struct udphdr)); 765 __skb_push(skb, sizeof(struct udphdr));
@@ -851,7 +886,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
851 static const unsigned char ppph[2] = { 0xff, 0x03 }; 886 static const unsigned char ppph[2] = { 0xff, 0x03 };
852 struct sock *sk = sock->sk; 887 struct sock *sk = sock->sk;
853 struct inet_sock *inet; 888 struct inet_sock *inet;
854 __wsum csum = 0; 889 __wsum csum;
855 struct sk_buff *skb; 890 struct sk_buff *skb;
856 int error; 891 int error;
857 int hdr_len; 892 int hdr_len;
@@ -859,6 +894,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
859 struct pppol2tp_tunnel *tunnel; 894 struct pppol2tp_tunnel *tunnel;
860 struct udphdr *uh; 895 struct udphdr *uh;
861 unsigned int len; 896 unsigned int len;
897 struct sock *sk_tun;
898 u16 udp_len;
862 899
863 error = -ENOTCONN; 900 error = -ENOTCONN;
864 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) 901 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
@@ -870,7 +907,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
870 if (session == NULL) 907 if (session == NULL)
871 goto error; 908 goto error;
872 909
873 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); 910 sk_tun = session->tunnel_sock;
911 tunnel = pppol2tp_sock_to_tunnel(sk_tun);
874 if (tunnel == NULL) 912 if (tunnel == NULL)
875 goto error_put_sess; 913 goto error_put_sess;
876 914
@@ -893,11 +931,12 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
893 skb_reset_transport_header(skb); 931 skb_reset_transport_header(skb);
894 932
895 /* Build UDP header */ 933 /* Build UDP header */
896 inet = inet_sk(session->tunnel_sock); 934 inet = inet_sk(sk_tun);
935 udp_len = hdr_len + sizeof(ppph) + total_len;
897 uh = (struct udphdr *) skb->data; 936 uh = (struct udphdr *) skb->data;
898 uh->source = inet->sport; 937 uh->source = inet->sport;
899 uh->dest = inet->dport; 938 uh->dest = inet->dport;
900 uh->len = htons(hdr_len + sizeof(ppph) + total_len); 939 uh->len = htons(udp_len);
901 uh->check = 0; 940 uh->check = 0;
902 skb_put(skb, sizeof(struct udphdr)); 941 skb_put(skb, sizeof(struct udphdr));
903 942
@@ -919,8 +958,22 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
919 skb_put(skb, total_len); 958 skb_put(skb, total_len);
920 959
921 /* Calculate UDP checksum if configured to do so */ 960 /* Calculate UDP checksum if configured to do so */
922 if (session->tunnel_sock->sk_no_check != UDP_CSUM_NOXMIT) 961 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
923 csum = udp_csum_outgoing(sk, skb); 962 skb->ip_summed = CHECKSUM_NONE;
963 else if (!(skb->dst->dev->features & NETIF_F_V4_CSUM)) {
964 skb->ip_summed = CHECKSUM_COMPLETE;
965 csum = skb_checksum(skb, 0, udp_len, 0);
966 uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
967 udp_len, IPPROTO_UDP, csum);
968 if (uh->check == 0)
969 uh->check = CSUM_MANGLED_0;
970 } else {
971 skb->ip_summed = CHECKSUM_PARTIAL;
972 skb->csum_start = skb_transport_header(skb) - skb->head;
973 skb->csum_offset = offsetof(struct udphdr, check);
974 uh->check = ~csum_tcpudp_magic(inet->saddr, inet->daddr,
975 udp_len, IPPROTO_UDP, 0);
976 }
924 977
925 /* Debug */ 978 /* Debug */
926 if (session->send_seq) 979 if (session->send_seq)
@@ -1008,13 +1061,14 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1008 struct sock *sk = (struct sock *) chan->private; 1061 struct sock *sk = (struct sock *) chan->private;
1009 struct sock *sk_tun; 1062 struct sock *sk_tun;
1010 int hdr_len; 1063 int hdr_len;
1064 u16 udp_len;
1011 struct pppol2tp_session *session; 1065 struct pppol2tp_session *session;
1012 struct pppol2tp_tunnel *tunnel; 1066 struct pppol2tp_tunnel *tunnel;
1013 int rc; 1067 int rc;
1014 int headroom; 1068 int headroom;
1015 int data_len = skb->len; 1069 int data_len = skb->len;
1016 struct inet_sock *inet; 1070 struct inet_sock *inet;
1017 __wsum csum = 0; 1071 __wsum csum;
1018 struct udphdr *uh; 1072 struct udphdr *uh;
1019 unsigned int len; 1073 unsigned int len;
1020 int old_headroom; 1074 int old_headroom;
@@ -1060,6 +1114,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1060 /* Setup L2TP header */ 1114 /* Setup L2TP header */
1061 pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len)); 1115 pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len));
1062 1116
1117 udp_len = sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len;
1118
1063 /* Setup UDP header */ 1119 /* Setup UDP header */
1064 inet = inet_sk(sk_tun); 1120 inet = inet_sk(sk_tun);
1065 __skb_push(skb, sizeof(*uh)); 1121 __skb_push(skb, sizeof(*uh));
@@ -1067,13 +1123,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1067 uh = udp_hdr(skb); 1123 uh = udp_hdr(skb);
1068 uh->source = inet->sport; 1124 uh->source = inet->sport;
1069 uh->dest = inet->dport; 1125 uh->dest = inet->dport;
1070 uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len); 1126 uh->len = htons(udp_len);
1071 uh->check = 0; 1127 uh->check = 0;
1072 1128
1073 /* *BROKEN* Calculate UDP checksum if configured to do so */
1074 if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT)
1075 csum = udp_csum_outgoing(sk_tun, skb);
1076
1077 /* Debug */ 1129 /* Debug */
1078 if (session->send_seq) 1130 if (session->send_seq)
1079 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG, 1131 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
@@ -1108,6 +1160,24 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1108 skb->dst = dst_clone(__sk_dst_get(sk_tun)); 1160 skb->dst = dst_clone(__sk_dst_get(sk_tun));
1109 pppol2tp_skb_set_owner_w(skb, sk_tun); 1161 pppol2tp_skb_set_owner_w(skb, sk_tun);
1110 1162
1163 /* Calculate UDP checksum if configured to do so */
1164 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
1165 skb->ip_summed = CHECKSUM_NONE;
1166 else if (!(skb->dst->dev->features & NETIF_F_V4_CSUM)) {
1167 skb->ip_summed = CHECKSUM_COMPLETE;
1168 csum = skb_checksum(skb, 0, udp_len, 0);
1169 uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr,
1170 udp_len, IPPROTO_UDP, csum);
1171 if (uh->check == 0)
1172 uh->check = CSUM_MANGLED_0;
1173 } else {
1174 skb->ip_summed = CHECKSUM_PARTIAL;
1175 skb->csum_start = skb_transport_header(skb) - skb->head;
1176 skb->csum_offset = offsetof(struct udphdr, check);
1177 uh->check = ~csum_tcpudp_magic(inet->saddr, inet->daddr,
1178 udp_len, IPPROTO_UDP, 0);
1179 }
1180
1111 /* Queue the packet to IP for output */ 1181 /* Queue the packet to IP for output */
1112 len = skb->len; 1182 len = skb->len;
1113 rc = ip_queue_xmit(skb, 1); 1183 rc = ip_queue_xmit(skb, 1);
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 2eb54fd7bed5..4b564eda5bd9 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -1443,7 +1443,6 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
1443{ 1443{
1444 int status; 1444 int status;
1445 u64 v1, v2; 1445 u64 v1, v2;
1446 DECLARE_MAC_BUF(mac);
1447 1446
1448 netdev->features = NETIF_F_IP_CSUM; 1447 netdev->features = NETIF_F_IP_CSUM;
1449 1448
@@ -1474,9 +1473,8 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
1474 __func__, netdev->name, status); 1473 __func__, netdev->name, status);
1475 return status; 1474 return status;
1476 } 1475 }
1477 dev_info(ctodev(card), "%s: MAC addr %s\n", 1476 dev_info(ctodev(card), "%s: MAC addr %pM\n",
1478 netdev->name, 1477 netdev->name, netdev->dev_addr);
1479 print_mac(mac, netdev->dev_addr));
1480 1478
1481 return 0; 1479 return 0;
1482} 1480}
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index a834b52a6a2c..ec2314246682 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -30,10 +30,11 @@
30#include <linux/ip.h> 30#include <linux/ip.h>
31#include <linux/tcp.h> 31#include <linux/tcp.h>
32#include <linux/wireless.h> 32#include <linux/wireless.h>
33#include <linux/ieee80211.h>
34#include <linux/if_arp.h>
33#include <linux/ctype.h> 35#include <linux/ctype.h>
34#include <linux/string.h> 36#include <linux/string.h>
35#include <net/iw_handler.h> 37#include <net/iw_handler.h>
36#include <net/ieee80211.h>
37 38
38#include <linux/dma-mapping.h> 39#include <linux/dma-mapping.h>
39#include <net/checksum.h> 40#include <net/checksum.h>
@@ -449,9 +450,9 @@ static size_t gelic_wl_synthesize_ie(u8 *buf,
449 450
450 /* element id */ 451 /* element id */
451 if (rsn) 452 if (rsn)
452 *buf++ = MFIE_TYPE_RSN; 453 *buf++ = WLAN_EID_RSN;
453 else 454 else
454 *buf++ = MFIE_TYPE_GENERIC; 455 *buf++ = WLAN_EID_GENERIC;
455 456
456 /* length filed; set later */ 457 /* length filed; set later */
457 buf++; 458 buf++;
@@ -539,7 +540,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
539 break; 540 break;
540 541
541 switch (item_id) { 542 switch (item_id) {
542 case MFIE_TYPE_GENERIC: 543 case WLAN_EID_GENERIC:
543 if ((OUI_LEN + 1 <= item_len) && 544 if ((OUI_LEN + 1 <= item_len) &&
544 !memcmp(pos, wpa_oui, OUI_LEN) && 545 !memcmp(pos, wpa_oui, OUI_LEN) &&
545 pos[OUI_LEN] == 0x01) { 546 pos[OUI_LEN] == 0x01) {
@@ -547,7 +548,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
547 ie_info->wpa.len = item_len + 2; 548 ie_info->wpa.len = item_len + 2;
548 } 549 }
549 break; 550 break;
550 case MFIE_TYPE_RSN: 551 case WLAN_EID_RSN:
551 ie_info->rsn.data = pos - 2; 552 ie_info->rsn.data = pos - 2;
552 /* length includes the header */ 553 /* length includes the header */
553 ie_info->rsn.len = item_len + 2; 554 ie_info->rsn.len = item_len + 2;
@@ -581,7 +582,7 @@ static char *gelic_wl_translate_scan(struct net_device *netdev,
581 char *tmp; 582 char *tmp;
582 u8 rate; 583 u8 rate;
583 unsigned int i, j, len; 584 unsigned int i, j, len;
584 u8 buf[MAX_WPA_IE_LEN]; 585 u8 buf[64]; /* arbitrary size large enough */
585 586
586 pr_debug("%s: <-\n", __func__); 587 pr_debug("%s: <-\n", __func__);
587 588
@@ -763,7 +764,6 @@ static void scan_list_dump(struct gelic_wl_info *wl)
763{ 764{
764 struct gelic_wl_scan_info *scan_info; 765 struct gelic_wl_scan_info *scan_info;
765 int i; 766 int i;
766 DECLARE_MAC_BUF(mac);
767 767
768 i = 0; 768 i = 0;
769 list_for_each_entry(scan_info, &wl->network_list, list) { 769 list_for_each_entry(scan_info, &wl->network_list, list) {
@@ -775,8 +775,7 @@ static void scan_list_dump(struct gelic_wl_info *wl)
775 scan_info->rate_len, scan_info->rate_ext_len, 775 scan_info->rate_len, scan_info->rate_ext_len,
776 scan_info->essid_len); 776 scan_info->essid_len);
777 /* -- */ 777 /* -- */
778 pr_debug("bssid=%s\n", 778 pr_debug("bssid=%pM\n", &scan_info->hwinfo->bssid[2]);
779 print_mac(mac, &scan_info->hwinfo->bssid[2]));
780 pr_debug("essid=%s\n", scan_info->hwinfo->essid); 779 pr_debug("essid=%s\n", scan_info->hwinfo->essid);
781 } 780 }
782} 781}
@@ -1167,11 +1166,7 @@ static int gelic_wl_set_ap(struct net_device *netdev,
1167 ETH_ALEN); 1166 ETH_ALEN);
1168 set_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); 1167 set_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat);
1169 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 1168 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1170 pr_debug("%s: bss=%02x:%02x:%02x:%02x:%02x:%02x\n", 1169 pr_debug("%s: bss=%pM\n", __func__, wl->bssid);
1171 __func__,
1172 wl->bssid[0], wl->bssid[1],
1173 wl->bssid[2], wl->bssid[3],
1174 wl->bssid[4], wl->bssid[5]);
1175 } else { 1170 } else {
1176 pr_debug("%s: clear bssid\n", __func__); 1171 pr_debug("%s: clear bssid\n", __func__);
1177 clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); 1172 clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat);
@@ -1632,7 +1627,6 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1632 unsigned long this_time = jiffies; 1627 unsigned long this_time = jiffies;
1633 unsigned int data_len, i, found, r; 1628 unsigned int data_len, i, found, r;
1634 void *buf; 1629 void *buf;
1635 DECLARE_MAC_BUF(mac);
1636 1630
1637 pr_debug("%s:start\n", __func__); 1631 pr_debug("%s:start\n", __func__);
1638 mutex_lock(&wl->scan_lock); 1632 mutex_lock(&wl->scan_lock);
@@ -1684,9 +1678,9 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1684 scan_info_size < data_len; 1678 scan_info_size < data_len;
1685 i++, scan_info_size += be16_to_cpu(scan_info->size), 1679 i++, scan_info_size += be16_to_cpu(scan_info->size),
1686 scan_info = (void *)scan_info + be16_to_cpu(scan_info->size)) { 1680 scan_info = (void *)scan_info + be16_to_cpu(scan_info->size)) {
1687 pr_debug("%s:size=%d bssid=%s scan_info=%p\n", __func__, 1681 pr_debug("%s:size=%d bssid=%pM scan_info=%p\n", __func__,
1688 be16_to_cpu(scan_info->size), 1682 be16_to_cpu(scan_info->size),
1689 print_mac(mac, &scan_info->bssid[2]), scan_info); 1683 &scan_info->bssid[2], scan_info);
1690 1684
1691 /* 1685 /*
1692 * The wireless firmware may return invalid channel 0 and/or 1686 * The wireless firmware may return invalid channel 0 and/or
@@ -1741,14 +1735,14 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
1741 target->essid_len = strnlen(scan_info->essid, 1735 target->essid_len = strnlen(scan_info->essid,
1742 sizeof(scan_info->essid)); 1736 sizeof(scan_info->essid));
1743 target->rate_len = 0; 1737 target->rate_len = 0;
1744 for (r = 0; r < MAX_RATES_LENGTH; r++) 1738 for (r = 0; r < 12; r++)
1745 if (scan_info->rate[r]) 1739 if (scan_info->rate[r])
1746 target->rate_len++; 1740 target->rate_len++;
1747 if (8 < target->rate_len) 1741 if (8 < target->rate_len)
1748 pr_info("%s: AP returns %d rates\n", __func__, 1742 pr_info("%s: AP returns %d rates\n", __func__,
1749 target->rate_len); 1743 target->rate_len);
1750 target->rate_ext_len = 0; 1744 target->rate_ext_len = 0;
1751 for (r = 0; r < MAX_RATES_EX_LENGTH; r++) 1745 for (r = 0; r < 16; r++)
1752 if (scan_info->ext_rate[r]) 1746 if (scan_info->ext_rate[r])
1753 target->rate_ext_len++; 1747 target->rate_ext_len++;
1754 list_move_tail(&target->list, &wl->network_list); 1748 list_move_tail(&target->list, &wl->network_list);
@@ -1787,7 +1781,6 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
1787 struct gelic_wl_scan_info *best_bss; 1781 struct gelic_wl_scan_info *best_bss;
1788 int weight, best_weight; 1782 int weight, best_weight;
1789 u16 security; 1783 u16 security;
1790 DECLARE_MAC_BUF(mac);
1791 1784
1792 pr_debug("%s: <-\n", __func__); 1785 pr_debug("%s: <-\n", __func__);
1793 1786
@@ -1857,8 +1850,8 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl)
1857#ifdef DEBUG 1850#ifdef DEBUG
1858 pr_debug("%s: -> bss=%p\n", __func__, best_bss); 1851 pr_debug("%s: -> bss=%p\n", __func__, best_bss);
1859 if (best_bss) { 1852 if (best_bss) {
1860 pr_debug("%s:addr=%s\n", __func__, 1853 pr_debug("%s:addr=%pM\n", __func__,
1861 print_mac(mac, &best_bss->hwinfo->bssid[2])); 1854 &best_bss->hwinfo->bssid[2]);
1862 } 1855 }
1863#endif 1856#endif
1864 return best_bss; 1857 return best_bss;
diff --git a/drivers/net/ps3_gelic_wireless.h b/drivers/net/ps3_gelic_wireless.h
index 5339e0078d18..5b631c6c9775 100644
--- a/drivers/net/ps3_gelic_wireless.h
+++ b/drivers/net/ps3_gelic_wireless.h
@@ -164,8 +164,8 @@ struct gelic_eurus_scan_info {
164 __be16 security; 164 __be16 security;
165 u8 bssid[8]; /* last ETH_ALEN are valid. bssid[0],[1] are unused */ 165 u8 bssid[8]; /* last ETH_ALEN are valid. bssid[0],[1] are unused */
166 u8 essid[32]; /* IW_ESSID_MAX_SIZE */ 166 u8 essid[32]; /* IW_ESSID_MAX_SIZE */
167 u8 rate[16]; /* first MAX_RATES_LENGTH(12) are valid */ 167 u8 rate[16]; /* first 12 are valid */
168 u8 ext_rate[16]; /* first MAX_RATES_EX_LENGTH(16) are valid */ 168 u8 ext_rate[16]; /* first 16 are valid */
169 __be32 reserved1; 169 __be32 reserved1;
170 __be32 reserved2; 170 __be32 reserved2;
171 __be32 reserved3; 171 __be32 reserved3;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 508452c02151..189ec29ac7a4 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2127,7 +2127,6 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2127 skb->protocol = eth_type_trans(skb, qdev->ndev); 2127 skb->protocol = eth_type_trans(skb, qdev->ndev);
2128 2128
2129 netif_receive_skb(skb); 2129 netif_receive_skb(skb);
2130 qdev->ndev->last_rx = jiffies;
2131 lrg_buf_cb2->skb = NULL; 2130 lrg_buf_cb2->skb = NULL;
2132 2131
2133 if (qdev->device_id == QL3022_DEVICE_ID) 2132 if (qdev->device_id == QL3022_DEVICE_ID)
@@ -2201,7 +2200,6 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2201 netif_receive_skb(skb2); 2200 netif_receive_skb(skb2);
2202 ndev->stats.rx_packets++; 2201 ndev->stats.rx_packets++;
2203 ndev->stats.rx_bytes += length; 2202 ndev->stats.rx_bytes += length;
2204 ndev->last_rx = jiffies;
2205 lrg_buf_cb2->skb = NULL; 2203 lrg_buf_cb2->skb = NULL;
2206 2204
2207 if (qdev->device_id == QL3022_DEVICE_ID) 2205 if (qdev->device_id == QL3022_DEVICE_ID)
@@ -2286,7 +2284,6 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2286static int ql_poll(struct napi_struct *napi, int budget) 2284static int ql_poll(struct napi_struct *napi, int budget)
2287{ 2285{
2288 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2286 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
2289 struct net_device *ndev = qdev->ndev;
2290 int rx_cleaned = 0, tx_cleaned = 0; 2287 int rx_cleaned = 0, tx_cleaned = 0;
2291 unsigned long hw_flags; 2288 unsigned long hw_flags;
2292 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; 2289 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
@@ -2295,7 +2292,7 @@ static int ql_poll(struct napi_struct *napi, int budget)
2295 2292
2296 if (tx_cleaned + rx_cleaned != budget) { 2293 if (tx_cleaned + rx_cleaned != budget) {
2297 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2294 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2298 __netif_rx_complete(ndev, napi); 2295 __netif_rx_complete(napi);
2299 ql_update_small_bufq_prod_index(qdev); 2296 ql_update_small_bufq_prod_index(qdev);
2300 ql_update_lrg_bufq_prod_index(qdev); 2297 ql_update_lrg_bufq_prod_index(qdev);
2301 writel(qdev->rsp_consumer_index, 2298 writel(qdev->rsp_consumer_index,
@@ -2354,8 +2351,8 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2354 spin_unlock(&qdev->adapter_lock); 2351 spin_unlock(&qdev->adapter_lock);
2355 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2352 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2356 ql_disable_interrupts(qdev); 2353 ql_disable_interrupts(qdev);
2357 if (likely(netif_rx_schedule_prep(ndev, &qdev->napi))) { 2354 if (likely(netif_rx_schedule_prep(&qdev->napi))) {
2358 __netif_rx_schedule(ndev, &qdev->napi); 2355 __netif_rx_schedule(&qdev->napi);
2359 } 2356 }
2360 } else { 2357 } else {
2361 return IRQ_NONE; 2358 return IRQ_NONE;
@@ -3520,7 +3517,6 @@ static void ql_display_dev_info(struct net_device *ndev)
3520{ 3517{
3521 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3518 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3522 struct pci_dev *pdev = qdev->pdev; 3519 struct pci_dev *pdev = qdev->pdev;
3523 DECLARE_MAC_BUF(mac);
3524 3520
3525 printk(KERN_INFO PFX 3521 printk(KERN_INFO PFX
3526 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n", 3522 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
@@ -3546,8 +3542,8 @@ static void ql_display_dev_info(struct net_device *ndev)
3546 3542
3547 if (netif_msg_probe(qdev)) 3543 if (netif_msg_probe(qdev))
3548 printk(KERN_INFO PFX 3544 printk(KERN_INFO PFX
3549 "%s: MAC address %s\n", 3545 "%s: MAC address %pM\n",
3550 ndev->name, print_mac(mac, ndev->dev_addr)); 3546 ndev->name, ndev->dev_addr);
3551} 3547}
3552 3548
3553static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3549static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
@@ -3903,13 +3899,24 @@ static void ql3xxx_timer(unsigned long ptr)
3903 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); 3899 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
3904} 3900}
3905 3901
3902static const struct net_device_ops ql3xxx_netdev_ops = {
3903 .ndo_open = ql3xxx_open,
3904 .ndo_start_xmit = ql3xxx_send,
3905 .ndo_stop = ql3xxx_close,
3906 .ndo_set_multicast_list = NULL, /* not allowed on NIC side */
3907 .ndo_change_mtu = eth_change_mtu,
3908 .ndo_validate_addr = eth_validate_addr,
3909 .ndo_set_mac_address = ql3xxx_set_mac_address,
3910 .ndo_tx_timeout = ql3xxx_tx_timeout,
3911};
3912
3906static int __devinit ql3xxx_probe(struct pci_dev *pdev, 3913static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3907 const struct pci_device_id *pci_entry) 3914 const struct pci_device_id *pci_entry)
3908{ 3915{
3909 struct net_device *ndev = NULL; 3916 struct net_device *ndev = NULL;
3910 struct ql3_adapter *qdev = NULL; 3917 struct ql3_adapter *qdev = NULL;
3911 static int cards_found = 0; 3918 static int cards_found = 0;
3912 int pci_using_dac, err; 3919 int uninitialized_var(pci_using_dac), err;
3913 3920
3914 err = pci_enable_device(pdev); 3921 err = pci_enable_device(pdev);
3915 if (err) { 3922 if (err) {
@@ -3969,9 +3976,7 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3969 if (qdev->device_id == QL3032_DEVICE_ID) 3976 if (qdev->device_id == QL3032_DEVICE_ID)
3970 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3977 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3971 3978
3972 qdev->mem_map_registers = 3979 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
3973 ioremap_nocache(pci_resource_start(pdev, 1),
3974 pci_resource_len(qdev->pdev, 1));
3975 if (!qdev->mem_map_registers) { 3980 if (!qdev->mem_map_registers) {
3976 printk(KERN_ERR PFX "%s: cannot map device registers\n", 3981 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3977 pci_name(pdev)); 3982 pci_name(pdev));
@@ -3983,17 +3988,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3983 spin_lock_init(&qdev->hw_lock); 3988 spin_lock_init(&qdev->hw_lock);
3984 3989
3985 /* Set driver entry points */ 3990 /* Set driver entry points */
3986 ndev->open = ql3xxx_open; 3991 ndev->netdev_ops = &ql3xxx_netdev_ops;
3987 ndev->hard_start_xmit = ql3xxx_send;
3988 ndev->stop = ql3xxx_close;
3989 /* ndev->set_multicast_list
3990 * This device is one side of a two-function adapter
3991 * (NIC and iSCSI). Promiscuous mode setting/clearing is
3992 * not allowed from the NIC side.
3993 */
3994 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 3992 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3995 ndev->set_mac_address = ql3xxx_set_mac_address;
3996 ndev->tx_timeout = ql3xxx_tx_timeout;
3997 ndev->watchdog_timeo = 5 * HZ; 3993 ndev->watchdog_timeo = 5 * HZ;
3998 3994
3999 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3995 netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index b62fbd4bf00f..eefb81b13758 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -97,7 +97,7 @@ exit:
97 return status; 97 return status;
98} 98}
99 99
100void ql_update_stats(struct ql_adapter *qdev) 100static void ql_update_stats(struct ql_adapter *qdev)
101{ 101{
102 u32 i; 102 u32 i;
103 u64 data; 103 u64 data;
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index b83a9c9b6a97..718a7bd0cd1a 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -336,12 +336,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
336 (addr[5]); 336 (addr[5]);
337 337
338 QPRINTK(qdev, IFUP, INFO, 338 QPRINTK(qdev, IFUP, INFO,
339 "Adding %s address %02x:%02x:%02x:%02x:%02x:%02x" 339 "Adding %s address %pM"
340 " at index %d in the CAM.\n", 340 " at index %d in the CAM.\n",
341 ((type == 341 ((type ==
342 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" : 342 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
343 "UNICAST"), addr[0], addr[1], addr[2], addr[3], 343 "UNICAST"), addr, index);
344 addr[4], addr[5], index);
345 344
346 status = 345 status =
347 ql_wait_reg_rdy(qdev, 346 ql_wait_reg_rdy(qdev,
@@ -643,7 +642,7 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
643 642
644} 643}
645 644
646int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data) 645static int ql_read_flash_word(struct ql_adapter *qdev, int offset, u32 *data)
647{ 646{
648 int status = 0; 647 int status = 0;
649 /* wait for reg to come ready */ 648 /* wait for reg to come ready */
@@ -833,7 +832,7 @@ end:
833} 832}
834 833
835/* Get the next large buffer. */ 834/* Get the next large buffer. */
836struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) 835static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
837{ 836{
838 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; 837 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
839 rx_ring->lbq_curr_idx++; 838 rx_ring->lbq_curr_idx++;
@@ -844,7 +843,7 @@ struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
844} 843}
845 844
846/* Get the next small buffer. */ 845/* Get the next small buffer. */
847struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) 846static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
848{ 847{
849 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; 848 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
850 rx_ring->sbq_curr_idx++; 849 rx_ring->sbq_curr_idx++;
@@ -1167,7 +1166,7 @@ map_error:
1167 return NETDEV_TX_BUSY; 1166 return NETDEV_TX_BUSY;
1168} 1167}
1169 1168
1170void ql_realign_skb(struct sk_buff *skb, int len) 1169static void ql_realign_skb(struct sk_buff *skb, int len)
1171{ 1170{
1172 void *temp_addr = skb->data; 1171 void *temp_addr = skb->data;
1173 1172
@@ -1452,7 +1451,6 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1452 "Passing a normal packet upstream.\n"); 1451 "Passing a normal packet upstream.\n");
1453 netif_rx(skb); 1452 netif_rx(skb);
1454 } 1453 }
1455 ndev->last_rx = jiffies;
1456} 1454}
1457 1455
1458/* Process an outbound completion from an rx ring. */ 1456/* Process an outbound completion from an rx ring. */
@@ -1649,7 +1647,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1649 rx_ring->cq_id); 1647 rx_ring->cq_id);
1650 1648
1651 if (work_done < budget) { 1649 if (work_done < budget) {
1652 __netif_rx_complete(qdev->ndev, napi); 1650 __netif_rx_complete(napi);
1653 ql_enable_completion_interrupt(qdev, rx_ring->irq); 1651 ql_enable_completion_interrupt(qdev, rx_ring->irq);
1654 } 1652 }
1655 return work_done; 1653 return work_done;
@@ -1734,8 +1732,7 @@ static irqreturn_t qlge_msix_tx_isr(int irq, void *dev_id)
1734static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) 1732static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
1735{ 1733{
1736 struct rx_ring *rx_ring = dev_id; 1734 struct rx_ring *rx_ring = dev_id;
1737 struct ql_adapter *qdev = rx_ring->qdev; 1735 netif_rx_schedule(&rx_ring->napi);
1738 netif_rx_schedule(qdev->ndev, &rx_ring->napi);
1739 return IRQ_HANDLED; 1736 return IRQ_HANDLED;
1740} 1737}
1741 1738
@@ -1821,8 +1818,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
1821 &rx_ring->rx_work, 1818 &rx_ring->rx_work,
1822 0); 1819 0);
1823 else 1820 else
1824 netif_rx_schedule(qdev->ndev, 1821 netif_rx_schedule(&rx_ring->napi);
1825 &rx_ring->napi);
1826 work_done++; 1822 work_done++;
1827 } 1823 }
1828 } 1824 }
@@ -2071,7 +2067,7 @@ err:
2071 return -ENOMEM; 2067 return -ENOMEM;
2072} 2068}
2073 2069
2074void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2070static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2075{ 2071{
2076 int i; 2072 int i;
2077 struct bq_desc *lbq_desc; 2073 struct bq_desc *lbq_desc;
@@ -2134,7 +2130,7 @@ mem_error:
2134 return -ENOMEM; 2130 return -ENOMEM;
2135} 2131}
2136 2132
2137void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2133static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2138{ 2134{
2139 int i; 2135 int i;
2140 struct bq_desc *sbq_desc; 2136 struct bq_desc *sbq_desc;
@@ -2469,7 +2465,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2469 rx_ring->sbq_base_indirect_dma = shadow_reg_dma; 2465 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
2470 2466
2471 /* PCI doorbell mem area + 0x00 for consumer index register */ 2467 /* PCI doorbell mem area + 0x00 for consumer index register */
2472 rx_ring->cnsmr_idx_db_reg = (u32 *) doorbell_area; 2468 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
2473 rx_ring->cnsmr_idx = 0; 2469 rx_ring->cnsmr_idx = 0;
2474 rx_ring->curr_entry = rx_ring->cq_base; 2470 rx_ring->curr_entry = rx_ring->cq_base;
2475 2471
@@ -2477,10 +2473,10 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2477 rx_ring->valid_db_reg = doorbell_area + 0x04; 2473 rx_ring->valid_db_reg = doorbell_area + 0x04;
2478 2474
2479 /* PCI doorbell mem area + 0x18 for large buffer consumer */ 2475 /* PCI doorbell mem area + 0x18 for large buffer consumer */
2480 rx_ring->lbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x18); 2476 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
2481 2477
2482 /* PCI doorbell mem area + 0x1c */ 2478 /* PCI doorbell mem area + 0x1c */
2483 rx_ring->sbq_prod_idx_db_reg = (u32 *) (doorbell_area + 0x1c); 2479 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
2484 2480
2485 memset((void *)cqicb, 0, sizeof(struct cqicb)); 2481 memset((void *)cqicb, 0, sizeof(struct cqicb));
2486 cqicb->msix_vect = rx_ring->irq; 2482 cqicb->msix_vect = rx_ring->irq;
@@ -2611,7 +2607,7 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2611 * Assign doorbell registers for this tx_ring. 2607 * Assign doorbell registers for this tx_ring.
2612 */ 2608 */
2613 /* TX PCI doorbell mem area for tx producer index */ 2609 /* TX PCI doorbell mem area for tx producer index */
2614 tx_ring->prod_idx_db_reg = (u32 *) doorbell_area; 2610 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
2615 tx_ring->prod_idx = 0; 2611 tx_ring->prod_idx = 0;
2616 /* TX PCI doorbell mem area + 0x04 */ 2612 /* TX PCI doorbell mem area + 0x04 */
2617 tx_ring->valid_db_reg = doorbell_area + 0x04; 2613 tx_ring->valid_db_reg = doorbell_area + 0x04;
@@ -3127,11 +3123,7 @@ static void ql_display_dev_info(struct net_device *ndev)
3127 qdev->chip_rev_id >> 4 & 0x0000000f, 3123 qdev->chip_rev_id >> 4 & 0x0000000f,
3128 qdev->chip_rev_id >> 8 & 0x0000000f, 3124 qdev->chip_rev_id >> 8 & 0x0000000f,
3129 qdev->chip_rev_id >> 12 & 0x0000000f); 3125 qdev->chip_rev_id >> 12 & 0x0000000f);
3130 QPRINTK(qdev, PROBE, INFO, 3126 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
3131 "MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
3132 ndev->dev_addr[0], ndev->dev_addr[1],
3133 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
3134 ndev->dev_addr[5]);
3135} 3127}
3136 3128
3137static int ql_adapter_down(struct ql_adapter *qdev) 3129static int ql_adapter_down(struct ql_adapter *qdev)
@@ -3156,7 +3148,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
3156 * a workqueue only if it's a single interrupt 3148 * a workqueue only if it's a single interrupt
3157 * environment (MSI/Legacy). 3149 * environment (MSI/Legacy).
3158 */ 3150 */
3159 for (i = 1; i > qdev->rx_ring_count; i++) { 3151 for (i = 1; i < qdev->rx_ring_count; i++) {
3160 rx_ring = &qdev->rx_ring[i]; 3152 rx_ring = &qdev->rx_ring[i];
3161 /* Only the RSS rings use NAPI on multi irq 3153 /* Only the RSS rings use NAPI on multi irq
3162 * environment. Outbound completion processing 3154 * environment. Outbound completion processing
@@ -3526,6 +3518,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3526{ 3518{
3527 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 3519 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3528 struct sockaddr *addr = p; 3520 struct sockaddr *addr = p;
3521 int ret = 0;
3529 3522
3530 if (netif_running(ndev)) 3523 if (netif_running(ndev))
3531 return -EBUSY; 3524 return -EBUSY;
@@ -3538,11 +3531,11 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3538 if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, 3531 if (ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
3539 MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */ 3532 MAC_ADDR_TYPE_CAM_MAC, qdev->func)) {/* Unicast */
3540 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); 3533 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
3541 return -1; 3534 ret = -1;
3542 } 3535 }
3543 spin_unlock(&qdev->hw_lock); 3536 spin_unlock(&qdev->hw_lock);
3544 3537
3545 return 0; 3538 return ret;
3546} 3539}
3547 3540
3548static void qlge_tx_timeout(struct net_device *ndev) 3541static void qlge_tx_timeout(struct net_device *ndev)
@@ -3592,7 +3585,7 @@ static void ql_release_all(struct pci_dev *pdev)
3592 qdev->q_workqueue = NULL; 3585 qdev->q_workqueue = NULL;
3593 } 3586 }
3594 if (qdev->reg_base) 3587 if (qdev->reg_base)
3595 iounmap((void *)qdev->reg_base); 3588 iounmap(qdev->reg_base);
3596 if (qdev->doorbell_area) 3589 if (qdev->doorbell_area)
3597 iounmap(qdev->doorbell_area); 3590 iounmap(qdev->doorbell_area);
3598 pci_release_regions(pdev); 3591 pci_release_regions(pdev);
@@ -3721,6 +3714,22 @@ err_out:
3721 return err; 3714 return err;
3722} 3715}
3723 3716
3717
3718static const struct net_device_ops qlge_netdev_ops = {
3719 .ndo_open = qlge_open,
3720 .ndo_stop = qlge_close,
3721 .ndo_start_xmit = qlge_send,
3722 .ndo_change_mtu = qlge_change_mtu,
3723 .ndo_get_stats = qlge_get_stats,
3724 .ndo_set_multicast_list = qlge_set_multicast_list,
3725 .ndo_set_mac_address = qlge_set_mac_address,
3726 .ndo_validate_addr = eth_validate_addr,
3727 .ndo_tx_timeout = qlge_tx_timeout,
3728 .ndo_vlan_rx_register = ql_vlan_rx_register,
3729 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid,
3730 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid,
3731};
3732
3724static int __devinit qlge_probe(struct pci_dev *pdev, 3733static int __devinit qlge_probe(struct pci_dev *pdev,
3725 const struct pci_device_id *pci_entry) 3734 const struct pci_device_id *pci_entry)
3726{ 3735{
@@ -3758,19 +3767,11 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
3758 */ 3767 */
3759 ndev->tx_queue_len = qdev->tx_ring_size; 3768 ndev->tx_queue_len = qdev->tx_ring_size;
3760 ndev->irq = pdev->irq; 3769 ndev->irq = pdev->irq;
3761 ndev->open = qlge_open; 3770
3762 ndev->stop = qlge_close; 3771 ndev->netdev_ops = &qlge_netdev_ops;
3763 ndev->hard_start_xmit = qlge_send;
3764 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); 3772 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
3765 ndev->change_mtu = qlge_change_mtu;
3766 ndev->get_stats = qlge_get_stats;
3767 ndev->set_multicast_list = qlge_set_multicast_list;
3768 ndev->set_mac_address = qlge_set_mac_address;
3769 ndev->tx_timeout = qlge_tx_timeout;
3770 ndev->watchdog_timeo = 10 * HZ; 3773 ndev->watchdog_timeo = 10 * HZ;
3771 ndev->vlan_rx_register = ql_vlan_rx_register; 3774
3772 ndev->vlan_rx_add_vid = ql_vlan_rx_add_vid;
3773 ndev->vlan_rx_kill_vid = ql_vlan_rx_kill_vid;
3774 err = register_netdev(ndev); 3775 err = register_netdev(ndev);
3775 if (err) { 3776 if (err) {
3776 dev_err(&pdev->dev, "net device registration failed.\n"); 3777 dev_err(&pdev->dev, "net device registration failed.\n");
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 24fe344bcf1f..fa31891b6e62 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -19,7 +19,7 @@ exit:
19 return status; 19 return status;
20} 20}
21 21
22int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) 22static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
23{ 23{
24 int i, status; 24 int i, status;
25 25
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 34fe7ef8e5ed..53bbddfc8c95 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
49#include <asm/processor.h> 49#include <asm/processor.h>
50 50
51#define DRV_NAME "r6040" 51#define DRV_NAME "r6040"
52#define DRV_VERSION "0.18" 52#define DRV_VERSION "0.19"
53#define DRV_RELDATE "13Jul2008" 53#define DRV_RELDATE "18Dec2008"
54 54
55/* PHY CHIP Address */ 55/* PHY CHIP Address */
56#define PHY1_ADDR 1 /* For MAC1 */ 56#define PHY1_ADDR 1 /* For MAC1 */
@@ -214,7 +214,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
214 /* Wait for the read bit to be cleared */ 214 /* Wait for the read bit to be cleared */
215 while (limit--) { 215 while (limit--) {
216 cmd = ioread16(ioaddr + MMDIO); 216 cmd = ioread16(ioaddr + MMDIO);
217 if (cmd & MDIO_READ) 217 if (!(cmd & MDIO_READ))
218 break; 218 break;
219 } 219 }
220 220
@@ -233,7 +233,7 @@ static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val
233 /* Wait for the write bit to be cleared */ 233 /* Wait for the write bit to be cleared */
234 while (limit--) { 234 while (limit--) {
235 cmd = ioread16(ioaddr + MMDIO); 235 cmd = ioread16(ioaddr + MMDIO);
236 if (cmd & MDIO_WRITE) 236 if (!(cmd & MDIO_WRITE))
237 break; 237 break;
238 } 238 }
239} 239}
@@ -598,7 +598,6 @@ static int r6040_rx(struct net_device *dev, int limit)
598 598
599 /* Send to upper layer */ 599 /* Send to upper layer */
600 netif_receive_skb(skb_ptr); 600 netif_receive_skb(skb_ptr);
601 dev->last_rx = jiffies;
602 dev->stats.rx_packets++; 601 dev->stats.rx_packets++;
603 dev->stats.rx_bytes += descptr->len - 4; 602 dev->stats.rx_bytes += descptr->len - 4;
604 603
@@ -668,7 +667,7 @@ static int r6040_poll(struct napi_struct *napi, int budget)
668 work_done = r6040_rx(dev, budget); 667 work_done = r6040_rx(dev, budget);
669 668
670 if (work_done < budget) { 669 if (work_done < budget) {
671 netif_rx_complete(dev, napi); 670 netif_rx_complete(napi);
672 /* Enable RX interrupt */ 671 /* Enable RX interrupt */
673 iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER); 672 iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
674 } 673 }
@@ -681,8 +680,10 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
681 struct net_device *dev = dev_id; 680 struct net_device *dev = dev_id;
682 struct r6040_private *lp = netdev_priv(dev); 681 struct r6040_private *lp = netdev_priv(dev);
683 void __iomem *ioaddr = lp->base; 682 void __iomem *ioaddr = lp->base;
684 u16 status; 683 u16 misr, status;
685 684
685 /* Save MIER */
686 misr = ioread16(ioaddr + MIER);
686 /* Mask off RDC MAC interrupt */ 687 /* Mask off RDC MAC interrupt */
687 iowrite16(MSK_INT, ioaddr + MIER); 688 iowrite16(MSK_INT, ioaddr + MIER);
688 /* Read MISR status and clear */ 689 /* Read MISR status and clear */
@@ -702,14 +703,17 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
702 dev->stats.rx_fifo_errors++; 703 dev->stats.rx_fifo_errors++;
703 704
704 /* Mask off RX interrupt */ 705 /* Mask off RX interrupt */
705 iowrite16(ioread16(ioaddr + MIER) & ~RX_INTS, ioaddr + MIER); 706 misr &= ~RX_INTS;
706 netif_rx_schedule(dev, &lp->napi); 707 netif_rx_schedule(&lp->napi);
707 } 708 }
708 709
709 /* TX interrupt request */ 710 /* TX interrupt request */
710 if (status & TX_INTS) 711 if (status & TX_INTS)
711 r6040_tx(dev); 712 r6040_tx(dev);
712 713
714 /* Restore RDC MAC interrupt */
715 iowrite16(misr, ioaddr + MIER);
716
713 return IRQ_HANDLED; 717 return IRQ_HANDLED;
714} 718}
715 719
@@ -1030,13 +1034,28 @@ static u32 netdev_get_link(struct net_device *dev)
1030 return mii_link_ok(&rp->mii_if); 1034 return mii_link_ok(&rp->mii_if);
1031} 1035}
1032 1036
1033static struct ethtool_ops netdev_ethtool_ops = { 1037static const struct ethtool_ops netdev_ethtool_ops = {
1034 .get_drvinfo = netdev_get_drvinfo, 1038 .get_drvinfo = netdev_get_drvinfo,
1035 .get_settings = netdev_get_settings, 1039 .get_settings = netdev_get_settings,
1036 .set_settings = netdev_set_settings, 1040 .set_settings = netdev_set_settings,
1037 .get_link = netdev_get_link, 1041 .get_link = netdev_get_link,
1038}; 1042};
1039 1043
1044static const struct net_device_ops r6040_netdev_ops = {
1045 .ndo_open = r6040_open,
1046 .ndo_stop = r6040_close,
1047 .ndo_start_xmit = r6040_start_xmit,
1048 .ndo_get_stats = r6040_get_stats,
1049 .ndo_set_multicast_list = r6040_multicast_list,
1050 .ndo_change_mtu = eth_change_mtu,
1051 .ndo_validate_addr = eth_validate_addr,
1052 .ndo_do_ioctl = r6040_ioctl,
1053 .ndo_tx_timeout = r6040_tx_timeout,
1054#ifdef CONFIG_NET_POLL_CONTROLLER
1055 .ndo_poll_controller = r6040_poll_controller,
1056#endif
1057};
1058
1040static int __devinit r6040_init_one(struct pci_dev *pdev, 1059static int __devinit r6040_init_one(struct pci_dev *pdev,
1041 const struct pci_device_id *ent) 1060 const struct pci_device_id *ent)
1042{ 1061{
@@ -1128,18 +1147,10 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1128 lp->switch_sig = 0; 1147 lp->switch_sig = 0;
1129 1148
1130 /* The RDC-specific entries in the device structure. */ 1149 /* The RDC-specific entries in the device structure. */
1131 dev->open = &r6040_open; 1150 dev->netdev_ops = &r6040_netdev_ops;
1132 dev->hard_start_xmit = &r6040_start_xmit;
1133 dev->stop = &r6040_close;
1134 dev->get_stats = r6040_get_stats;
1135 dev->set_multicast_list = &r6040_multicast_list;
1136 dev->do_ioctl = &r6040_ioctl;
1137 dev->ethtool_ops = &netdev_ethtool_ops; 1151 dev->ethtool_ops = &netdev_ethtool_ops;
1138 dev->tx_timeout = &r6040_tx_timeout;
1139 dev->watchdog_timeo = TX_TIMEOUT; 1152 dev->watchdog_timeo = TX_TIMEOUT;
1140#ifdef CONFIG_NET_POLL_CONTROLLER 1153
1141 dev->poll_controller = r6040_poll_controller;
1142#endif
1143 netif_napi_add(dev, &lp->napi, r6040_poll, 64); 1154 netif_napi_add(dev, &lp->napi, r6040_poll, 64);
1144 lp->mii_if.dev = dev; 1155 lp->mii_if.dev = dev;
1145 lp->mii_if.mdio_read = r6040_mdio_read; 1156 lp->mii_if.mdio_read = r6040_mdio_read;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 4b7cb389dc49..2c73ca606b35 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -474,6 +474,7 @@ struct rtl8169_private {
474 void (*hw_start)(struct net_device *); 474 void (*hw_start)(struct net_device *);
475 unsigned int (*phy_reset_pending)(void __iomem *); 475 unsigned int (*phy_reset_pending)(void __iomem *);
476 unsigned int (*link_ok)(void __iomem *); 476 unsigned int (*link_ok)(void __iomem *);
477 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
477 int pcie_cap; 478 int pcie_cap;
478 struct delayed_work task; 479 struct delayed_work task;
479 unsigned features; 480 unsigned features;
@@ -1829,9 +1830,11 @@ static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1829 struct rtl8169_private *tp = netdev_priv(dev); 1830 struct rtl8169_private *tp = netdev_priv(dev);
1830 struct mii_ioctl_data *data = if_mii(ifr); 1831 struct mii_ioctl_data *data = if_mii(ifr);
1831 1832
1832 if (!netif_running(dev)) 1833 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
1833 return -ENODEV; 1834}
1834 1835
1836static int rtl_xmii_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
1837{
1835 switch (cmd) { 1838 switch (cmd) {
1836 case SIOCGMIIPHY: 1839 case SIOCGMIIPHY:
1837 data->phy_id = 32; /* Internal PHY */ 1840 data->phy_id = 32; /* Internal PHY */
@@ -1850,6 +1853,11 @@ static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1850 return -EOPNOTSUPP; 1853 return -EOPNOTSUPP;
1851} 1854}
1852 1855
1856static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
1857{
1858 return -EOPNOTSUPP;
1859}
1860
1853static const struct rtl_cfg_info { 1861static const struct rtl_cfg_info {
1854 void (*hw_start)(struct net_device *); 1862 void (*hw_start)(struct net_device *);
1855 unsigned int region; 1863 unsigned int region;
@@ -1915,6 +1923,26 @@ static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
1915 } 1923 }
1916} 1924}
1917 1925
1926static const struct net_device_ops rtl8169_netdev_ops = {
1927 .ndo_open = rtl8169_open,
1928 .ndo_stop = rtl8169_close,
1929 .ndo_get_stats = rtl8169_get_stats,
1930 .ndo_start_xmit = rtl8169_start_xmit,
1931 .ndo_tx_timeout = rtl8169_tx_timeout,
1932 .ndo_validate_addr = eth_validate_addr,
1933 .ndo_change_mtu = rtl8169_change_mtu,
1934 .ndo_set_mac_address = rtl_set_mac_address,
1935 .ndo_do_ioctl = rtl8169_ioctl,
1936 .ndo_set_multicast_list = rtl_set_rx_mode,
1937#ifdef CONFIG_R8169_VLAN
1938 .ndo_vlan_rx_register = rtl8169_vlan_rx_register,
1939#endif
1940#ifdef CONFIG_NET_POLL_CONTROLLER
1941 .ndo_poll_controller = rtl8169_netpoll,
1942#endif
1943
1944};
1945
1918static int __devinit 1946static int __devinit
1919rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1947rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1920{ 1948{
@@ -1941,6 +1969,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1941 } 1969 }
1942 1970
1943 SET_NETDEV_DEV(dev, &pdev->dev); 1971 SET_NETDEV_DEV(dev, &pdev->dev);
1972 dev->netdev_ops = &rtl8169_netdev_ops;
1944 tp = netdev_priv(dev); 1973 tp = netdev_priv(dev);
1945 tp->dev = dev; 1974 tp->dev = dev;
1946 tp->pci_dev = pdev; 1975 tp->pci_dev = pdev;
@@ -2076,6 +2105,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2076 tp->phy_reset_enable = rtl8169_tbi_reset_enable; 2105 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
2077 tp->phy_reset_pending = rtl8169_tbi_reset_pending; 2106 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
2078 tp->link_ok = rtl8169_tbi_link_ok; 2107 tp->link_ok = rtl8169_tbi_link_ok;
2108 tp->do_ioctl = rtl_tbi_ioctl;
2079 2109
2080 tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */ 2110 tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
2081 } else { 2111 } else {
@@ -2084,8 +2114,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2084 tp->phy_reset_enable = rtl8169_xmii_reset_enable; 2114 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
2085 tp->phy_reset_pending = rtl8169_xmii_reset_pending; 2115 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
2086 tp->link_ok = rtl8169_xmii_link_ok; 2116 tp->link_ok = rtl8169_xmii_link_ok;
2087 2117 tp->do_ioctl = rtl_xmii_ioctl;
2088 dev->do_ioctl = rtl8169_ioctl;
2089 } 2118 }
2090 2119
2091 spin_lock_init(&tp->lock); 2120 spin_lock_init(&tp->lock);
@@ -2097,28 +2126,15 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2097 dev->dev_addr[i] = RTL_R8(MAC0 + i); 2126 dev->dev_addr[i] = RTL_R8(MAC0 + i);
2098 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 2127 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
2099 2128
2100 dev->open = rtl8169_open;
2101 dev->hard_start_xmit = rtl8169_start_xmit;
2102 dev->get_stats = rtl8169_get_stats;
2103 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); 2129 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
2104 dev->stop = rtl8169_close;
2105 dev->tx_timeout = rtl8169_tx_timeout;
2106 dev->set_multicast_list = rtl_set_rx_mode;
2107 dev->watchdog_timeo = RTL8169_TX_TIMEOUT; 2130 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
2108 dev->irq = pdev->irq; 2131 dev->irq = pdev->irq;
2109 dev->base_addr = (unsigned long) ioaddr; 2132 dev->base_addr = (unsigned long) ioaddr;
2110 dev->change_mtu = rtl8169_change_mtu;
2111 dev->set_mac_address = rtl_set_mac_address;
2112 2133
2113 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); 2134 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
2114 2135
2115#ifdef CONFIG_R8169_VLAN 2136#ifdef CONFIG_R8169_VLAN
2116 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 2137 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2117 dev->vlan_rx_register = rtl8169_vlan_rx_register;
2118#endif
2119
2120#ifdef CONFIG_NET_POLL_CONTROLLER
2121 dev->poll_controller = rtl8169_netpoll;
2122#endif 2138#endif
2123 2139
2124 tp->intr_mask = 0xffff; 2140 tp->intr_mask = 0xffff;
@@ -3484,7 +3500,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
3484 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0) 3500 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
3485 netif_receive_skb(skb); 3501 netif_receive_skb(skb);
3486 3502
3487 dev->last_rx = jiffies;
3488 dev->stats.rx_bytes += pkt_size; 3503 dev->stats.rx_bytes += pkt_size;
3489 dev->stats.rx_packets++; 3504 dev->stats.rx_packets++;
3490 } 3505 }
@@ -3566,8 +3581,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
3566 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); 3581 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
3567 tp->intr_mask = ~tp->napi_event; 3582 tp->intr_mask = ~tp->napi_event;
3568 3583
3569 if (likely(netif_rx_schedule_prep(dev, &tp->napi))) 3584 if (likely(netif_rx_schedule_prep(&tp->napi)))
3570 __netif_rx_schedule(dev, &tp->napi); 3585 __netif_rx_schedule(&tp->napi);
3571 else if (netif_msg_intr(tp)) { 3586 else if (netif_msg_intr(tp)) {
3572 printk(KERN_INFO "%s: interrupt %04x in poll\n", 3587 printk(KERN_INFO "%s: interrupt %04x in poll\n",
3573 dev->name, status); 3588 dev->name, status);
@@ -3588,7 +3603,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
3588 rtl8169_tx_interrupt(dev, tp, ioaddr); 3603 rtl8169_tx_interrupt(dev, tp, ioaddr);
3589 3604
3590 if (work_done < budget) { 3605 if (work_done < budget) {
3591 netif_rx_complete(dev, napi); 3606 netif_rx_complete(napi);
3592 tp->intr_mask = 0xffff; 3607 tp->intr_mask = 0xffff;
3593 /* 3608 /*
3594 * 20040426: the barrier is not strictly required but the 3609 * 20040426: the barrier is not strictly required but the
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 2b8fd68bc516..a6fd27a2cc3d 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -94,7 +94,7 @@ static int rionet_rx_clean(struct net_device *ndev)
94{ 94{
95 int i; 95 int i;
96 int error = 0; 96 int error = 0;
97 struct rionet_private *rnet = ndev->priv; 97 struct rionet_private *rnet = netdev_priv(ndev);
98 void *data; 98 void *data;
99 99
100 i = rnet->rx_slot; 100 i = rnet->rx_slot;
@@ -132,7 +132,7 @@ static int rionet_rx_clean(struct net_device *ndev)
132static void rionet_rx_fill(struct net_device *ndev, int end) 132static void rionet_rx_fill(struct net_device *ndev, int end)
133{ 133{
134 int i; 134 int i;
135 struct rionet_private *rnet = ndev->priv; 135 struct rionet_private *rnet = netdev_priv(ndev);
136 136
137 i = rnet->rx_slot; 137 i = rnet->rx_slot;
138 do { 138 do {
@@ -151,7 +151,7 @@ static void rionet_rx_fill(struct net_device *ndev, int end)
151static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev, 151static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
152 struct rio_dev *rdev) 152 struct rio_dev *rdev)
153{ 153{
154 struct rionet_private *rnet = ndev->priv; 154 struct rionet_private *rnet = netdev_priv(ndev);
155 155
156 rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len); 156 rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
157 rnet->tx_skb[rnet->tx_slot] = skb; 157 rnet->tx_skb[rnet->tx_slot] = skb;
@@ -175,7 +175,7 @@ static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
175static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 175static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
176{ 176{
177 int i; 177 int i;
178 struct rionet_private *rnet = ndev->priv; 178 struct rionet_private *rnet = netdev_priv(ndev);
179 struct ethhdr *eth = (struct ethhdr *)skb->data; 179 struct ethhdr *eth = (struct ethhdr *)skb->data;
180 u16 destid; 180 u16 destid;
181 unsigned long flags; 181 unsigned long flags;
@@ -215,7 +215,7 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
215 u16 info) 215 u16 info)
216{ 216{
217 struct net_device *ndev = dev_id; 217 struct net_device *ndev = dev_id;
218 struct rionet_private *rnet = ndev->priv; 218 struct rionet_private *rnet = netdev_priv(ndev);
219 struct rionet_peer *peer; 219 struct rionet_peer *peer;
220 220
221 if (netif_msg_intr(rnet)) 221 if (netif_msg_intr(rnet))
@@ -243,7 +243,7 @@ static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox
243{ 243{
244 int n; 244 int n;
245 struct net_device *ndev = dev_id; 245 struct net_device *ndev = dev_id;
246 struct rionet_private *rnet = (struct rionet_private *)ndev->priv; 246 struct rionet_private *rnet = netdev_priv(ndev);
247 247
248 if (netif_msg_intr(rnet)) 248 if (netif_msg_intr(rnet))
249 printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n", 249 printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
@@ -258,7 +258,7 @@ static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox
258static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) 258static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
259{ 259{
260 struct net_device *ndev = dev_id; 260 struct net_device *ndev = dev_id;
261 struct rionet_private *rnet = ndev->priv; 261 struct rionet_private *rnet = netdev_priv(ndev);
262 262
263 spin_lock(&rnet->lock); 263 spin_lock(&rnet->lock);
264 264
@@ -287,7 +287,7 @@ static int rionet_open(struct net_device *ndev)
287 int i, rc = 0; 287 int i, rc = 0;
288 struct rionet_peer *peer, *tmp; 288 struct rionet_peer *peer, *tmp;
289 u32 pwdcsr; 289 u32 pwdcsr;
290 struct rionet_private *rnet = ndev->priv; 290 struct rionet_private *rnet = netdev_priv(ndev);
291 291
292 if (netif_msg_ifup(rnet)) 292 if (netif_msg_ifup(rnet))
293 printk(KERN_INFO "%s: open\n", DRV_NAME); 293 printk(KERN_INFO "%s: open\n", DRV_NAME);
@@ -351,7 +351,7 @@ static int rionet_open(struct net_device *ndev)
351 351
352static int rionet_close(struct net_device *ndev) 352static int rionet_close(struct net_device *ndev)
353{ 353{
354 struct rionet_private *rnet = (struct rionet_private *)ndev->priv; 354 struct rionet_private *rnet = netdev_priv(ndev);
355 struct rionet_peer *peer, *tmp; 355 struct rionet_peer *peer, *tmp;
356 int i; 356 int i;
357 357
@@ -400,7 +400,7 @@ static void rionet_remove(struct rio_dev *rdev)
400static void rionet_get_drvinfo(struct net_device *ndev, 400static void rionet_get_drvinfo(struct net_device *ndev,
401 struct ethtool_drvinfo *info) 401 struct ethtool_drvinfo *info)
402{ 402{
403 struct rionet_private *rnet = ndev->priv; 403 struct rionet_private *rnet = netdev_priv(ndev);
404 404
405 strcpy(info->driver, DRV_NAME); 405 strcpy(info->driver, DRV_NAME);
406 strcpy(info->version, DRV_VERSION); 406 strcpy(info->version, DRV_VERSION);
@@ -410,14 +410,14 @@ static void rionet_get_drvinfo(struct net_device *ndev,
410 410
411static u32 rionet_get_msglevel(struct net_device *ndev) 411static u32 rionet_get_msglevel(struct net_device *ndev)
412{ 412{
413 struct rionet_private *rnet = ndev->priv; 413 struct rionet_private *rnet = netdev_priv(ndev);
414 414
415 return rnet->msg_enable; 415 return rnet->msg_enable;
416} 416}
417 417
418static void rionet_set_msglevel(struct net_device *ndev, u32 value) 418static void rionet_set_msglevel(struct net_device *ndev, u32 value)
419{ 419{
420 struct rionet_private *rnet = ndev->priv; 420 struct rionet_private *rnet = netdev_priv(ndev);
421 421
422 rnet->msg_enable = value; 422 rnet->msg_enable = value;
423} 423}
@@ -435,7 +435,6 @@ static int rionet_setup_netdev(struct rio_mport *mport)
435 struct net_device *ndev = NULL; 435 struct net_device *ndev = NULL;
436 struct rionet_private *rnet; 436 struct rionet_private *rnet;
437 u16 device_id; 437 u16 device_id;
438 DECLARE_MAC_BUF(mac);
439 438
440 /* Allocate our net_device structure */ 439 /* Allocate our net_device structure */
441 ndev = alloc_etherdev(sizeof(struct rionet_private)); 440 ndev = alloc_etherdev(sizeof(struct rionet_private));
@@ -456,7 +455,7 @@ static int rionet_setup_netdev(struct rio_mport *mport)
456 RIO_MAX_ROUTE_ENTRIES(mport->sys_size)); 455 RIO_MAX_ROUTE_ENTRIES(mport->sys_size));
457 456
458 /* Set up private area */ 457 /* Set up private area */
459 rnet = (struct rionet_private *)ndev->priv; 458 rnet = netdev_priv(ndev);
460 rnet->mport = mport; 459 rnet->mport = mport;
461 460
462 /* Set the default MAC address */ 461 /* Set the default MAC address */
@@ -485,12 +484,12 @@ static int rionet_setup_netdev(struct rio_mport *mport)
485 if (rc != 0) 484 if (rc != 0)
486 goto out; 485 goto out;
487 486
488 printk("%s: %s %s Version %s, MAC %s\n", 487 printk("%s: %s %s Version %s, MAC %pM\n",
489 ndev->name, 488 ndev->name,
490 DRV_NAME, 489 DRV_NAME,
491 DRV_DESC, 490 DRV_DESC,
492 DRV_VERSION, 491 DRV_VERSION,
493 print_mac(mac, ndev->dev_addr)); 492 ndev->dev_addr);
494 493
495 out: 494 out:
496 return rc; 495 return rc;
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 3dd8f1342f70..d890829a9acc 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -63,6 +63,16 @@ MODULE_LICENSE("GPL");
63 63
64static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n"; 64static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
65 65
66
67static const struct net_device_ops rr_netdev_ops = {
68 .ndo_open = rr_open,
69 .ndo_stop = rr_close,
70 .ndo_do_ioctl = rr_ioctl,
71 .ndo_start_xmit = rr_start_xmit,
72 .ndo_change_mtu = hippi_change_mtu,
73 .ndo_set_mac_address = hippi_mac_addr,
74};
75
66/* 76/*
67 * Implementation notes: 77 * Implementation notes:
68 * 78 *
@@ -115,10 +125,7 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
115 spin_lock_init(&rrpriv->lock); 125 spin_lock_init(&rrpriv->lock);
116 126
117 dev->irq = pdev->irq; 127 dev->irq = pdev->irq;
118 dev->open = &rr_open; 128 dev->netdev_ops = &rr_netdev_ops;
119 dev->hard_start_xmit = &rr_start_xmit;
120 dev->stop = &rr_close;
121 dev->do_ioctl = &rr_ioctl;
122 129
123 dev->base_addr = pci_resource_start(pdev, 0); 130 dev->base_addr = pci_resource_start(pdev, 0);
124 131
@@ -511,7 +518,6 @@ static int __devinit rr_init(struct net_device *dev)
511 struct rr_private *rrpriv; 518 struct rr_private *rrpriv;
512 struct rr_regs __iomem *regs; 519 struct rr_regs __iomem *regs;
513 u32 sram_size, rev; 520 u32 sram_size, rev;
514 DECLARE_MAC_BUF(mac);
515 521
516 rrpriv = netdev_priv(dev); 522 rrpriv = netdev_priv(dev);
517 regs = rrpriv->regs; 523 regs = rrpriv->regs;
@@ -549,7 +555,7 @@ static int __devinit rr_init(struct net_device *dev)
549 *(__be32 *)(dev->dev_addr+2) = 555 *(__be32 *)(dev->dev_addr+2) =
550 htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4]))); 556 htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4])));
551 557
552 printk(" MAC: %s\n", print_mac(mac, dev->dev_addr)); 558 printk(" MAC: %pM\n", dev->dev_addr);
553 559
554 sram_size = rr_read_eeprom_word(rrpriv, 8); 560 sram_size = rr_read_eeprom_word(rrpriv, 8);
555 printk(" SRAM size 0x%06x\n", sram_size); 561 printk(" SRAM size 0x%06x\n", sram_size);
@@ -1006,7 +1012,6 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
1006 1012
1007 netif_rx(skb); /* send it up */ 1013 netif_rx(skb); /* send it up */
1008 1014
1009 dev->last_rx = jiffies;
1010 dev->stats.rx_packets++; 1015 dev->stats.rx_packets++;
1011 dev->stats.rx_bytes += pkt_len; 1016 dev->stats.rx_bytes += pkt_len;
1012 } 1017 }
@@ -1708,9 +1713,3 @@ static void __exit rr_cleanup_module(void)
1708 1713
1709module_init(rr_init_module); 1714module_init(rr_init_module);
1710module_exit(rr_cleanup_module); 1715module_exit(rr_cleanup_module);
1711
1712/*
1713 * Local variables:
1714 * compile-command: "gcc -D__KERNEL__ -I../../include -Wall -Wstrict-prototypes -O2 -pipe -fomit-frame-pointer -fno-strength-reduce -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2 -DMODULE -DMODVERSIONS -include ../../include/linux/modversions.h -c rrunner.c"
1715 * End:
1716 */
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 6a1375f9cbb8..f5c57c059bca 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -352,12 +352,13 @@ static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
352 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32); 352 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40); 353 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354} 354}
355
355/* Add the vlan */ 356/* Add the vlan */
356static void s2io_vlan_rx_register(struct net_device *dev, 357static void s2io_vlan_rx_register(struct net_device *dev,
357 struct vlan_group *grp) 358 struct vlan_group *grp)
358{ 359{
359 int i; 360 int i;
360 struct s2io_nic *nic = dev->priv; 361 struct s2io_nic *nic = netdev_priv(dev);
361 unsigned long flags[MAX_TX_FIFOS]; 362 unsigned long flags[MAX_TX_FIFOS];
362 struct mac_info *mac_control = &nic->mac_control; 363 struct mac_info *mac_control = &nic->mac_control;
363 struct config_param *config = &nic->config; 364 struct config_param *config = &nic->config;
@@ -372,10 +373,10 @@ static void s2io_vlan_rx_register(struct net_device *dev,
372} 373}
373 374
374/* Unregister the vlan */ 375/* Unregister the vlan */
375static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) 376static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
376{ 377{
377 int i; 378 int i;
378 struct s2io_nic *nic = dev->priv; 379 struct s2io_nic *nic = netdev_priv(dev);
379 unsigned long flags[MAX_TX_FIFOS]; 380 unsigned long flags[MAX_TX_FIFOS];
380 struct mac_info *mac_control = &nic->mac_control; 381 struct mac_info *mac_control = &nic->mac_control;
381 struct config_param *config = &nic->config; 382 struct config_param *config = &nic->config;
@@ -2837,7 +2838,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
2837 int pkts_processed = 0; 2838 int pkts_processed = 0;
2838 u8 __iomem *addr = NULL; 2839 u8 __iomem *addr = NULL;
2839 u8 val8 = 0; 2840 u8 val8 = 0;
2840 struct s2io_nic *nic = dev->priv; 2841 struct s2io_nic *nic = netdev_priv(dev);
2841 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2842 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2842 int budget_org = budget; 2843 int budget_org = budget;
2843 2844
@@ -2851,7 +2852,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
2851 s2io_chk_rx_buffers(nic, ring); 2852 s2io_chk_rx_buffers(nic, ring);
2852 2853
2853 if (pkts_processed < budget_org) { 2854 if (pkts_processed < budget_org) {
2854 netif_rx_complete(dev, napi); 2855 netif_rx_complete(napi);
2855 /*Re Enable MSI-Rx Vector*/ 2856 /*Re Enable MSI-Rx Vector*/
2856 addr = (u8 __iomem *)&bar0->xmsi_mask_reg; 2857 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2857 addr += 7 - ring->ring_no; 2858 addr += 7 - ring->ring_no;
@@ -2865,7 +2866,6 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
2865{ 2866{
2866 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); 2867 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2867 struct ring_info *ring; 2868 struct ring_info *ring;
2868 struct net_device *dev = nic->dev;
2869 struct config_param *config; 2869 struct config_param *config;
2870 struct mac_info *mac_control; 2870 struct mac_info *mac_control;
2871 int pkts_processed = 0; 2871 int pkts_processed = 0;
@@ -2889,7 +2889,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
2889 break; 2889 break;
2890 } 2890 }
2891 if (pkts_processed < budget_org) { 2891 if (pkts_processed < budget_org) {
2892 netif_rx_complete(dev, napi); 2892 netif_rx_complete(napi);
2893 /* Re enable the Rx interrupts for the ring */ 2893 /* Re enable the Rx interrupts for the ring */
2894 writeq(0, &bar0->rx_traffic_mask); 2894 writeq(0, &bar0->rx_traffic_mask);
2895 readl(&bar0->rx_traffic_mask); 2895 readl(&bar0->rx_traffic_mask);
@@ -2909,7 +2909,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
2909 */ 2909 */
2910static void s2io_netpoll(struct net_device *dev) 2910static void s2io_netpoll(struct net_device *dev)
2911{ 2911{
2912 struct s2io_nic *nic = dev->priv; 2912 struct s2io_nic *nic = netdev_priv(dev);
2913 struct mac_info *mac_control; 2913 struct mac_info *mac_control;
2914 struct config_param *config; 2914 struct config_param *config;
2915 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2915 struct XENA_dev_config __iomem *bar0 = nic->bar0;
@@ -3171,7 +3171,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
3171static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev) 3171static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3172{ 3172{
3173 u64 val64 = 0x0; 3173 u64 val64 = 0x0;
3174 struct s2io_nic *sp = dev->priv; 3174 struct s2io_nic *sp = netdev_priv(dev);
3175 struct XENA_dev_config __iomem *bar0 = sp->bar0; 3175 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3176 3176
3177 //address transaction 3177 //address transaction
@@ -3220,7 +3220,7 @@ static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3220{ 3220{
3221 u64 val64 = 0x0; 3221 u64 val64 = 0x0;
3222 u64 rval64 = 0x0; 3222 u64 rval64 = 0x0;
3223 struct s2io_nic *sp = dev->priv; 3223 struct s2io_nic *sp = netdev_priv(dev);
3224 struct XENA_dev_config __iomem *bar0 = sp->bar0; 3224 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3225 3225
3226 /* address transaction */ 3226 /* address transaction */
@@ -3324,7 +3324,7 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
3324 u64 val64 = 0x0; 3324 u64 val64 = 0x0;
3325 u64 addr = 0x0; 3325 u64 addr = 0x0;
3326 3326
3327 struct s2io_nic *sp = dev->priv; 3327 struct s2io_nic *sp = netdev_priv(dev);
3328 struct stat_block *stat_info = sp->mac_control.stats_info; 3328 struct stat_block *stat_info = sp->mac_control.stats_info;
3329 3329
3330 /* Check the communication with the MDIO slave */ 3330 /* Check the communication with the MDIO slave */
@@ -3990,7 +3990,7 @@ static void remove_inta_isr(struct s2io_nic *sp)
3990 3990
3991static int s2io_open(struct net_device *dev) 3991static int s2io_open(struct net_device *dev)
3992{ 3992{
3993 struct s2io_nic *sp = dev->priv; 3993 struct s2io_nic *sp = netdev_priv(dev);
3994 int err = 0; 3994 int err = 0;
3995 3995
3996 /* 3996 /*
@@ -4048,7 +4048,7 @@ hw_init_failed:
4048 4048
4049static int s2io_close(struct net_device *dev) 4049static int s2io_close(struct net_device *dev)
4050{ 4050{
4051 struct s2io_nic *sp = dev->priv; 4051 struct s2io_nic *sp = netdev_priv(dev);
4052 struct config_param *config = &sp->config; 4052 struct config_param *config = &sp->config;
4053 u64 tmp64; 4053 u64 tmp64;
4054 int offset; 4054 int offset;
@@ -4087,7 +4087,7 @@ static int s2io_close(struct net_device *dev)
4087 4087
4088static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) 4088static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4089{ 4089{
4090 struct s2io_nic *sp = dev->priv; 4090 struct s2io_nic *sp = netdev_priv(dev);
4091 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off; 4091 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4092 register u64 val64; 4092 register u64 val64;
4093 struct TxD *txdp; 4093 struct TxD *txdp;
@@ -4329,7 +4329,6 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4329 struct ring_info *ring = (struct ring_info *)dev_id; 4329 struct ring_info *ring = (struct ring_info *)dev_id;
4330 struct s2io_nic *sp = ring->nic; 4330 struct s2io_nic *sp = ring->nic;
4331 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4331 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4332 struct net_device *dev = sp->dev;
4333 4332
4334 if (unlikely(!is_s2io_card_up(sp))) 4333 if (unlikely(!is_s2io_card_up(sp)))
4335 return IRQ_HANDLED; 4334 return IRQ_HANDLED;
@@ -4343,7 +4342,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4343 val8 = (ring->ring_no == 0) ? 0x7f : 0xff; 4342 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4344 writeb(val8, addr); 4343 writeb(val8, addr);
4345 val8 = readb(addr); 4344 val8 = readb(addr);
4346 netif_rx_schedule(dev, &ring->napi); 4345 netif_rx_schedule(&ring->napi);
4347 } else { 4346 } else {
4348 rx_intr_handler(ring, 0); 4347 rx_intr_handler(ring, 0);
4349 s2io_chk_rx_buffers(sp, ring); 4348 s2io_chk_rx_buffers(sp, ring);
@@ -4485,7 +4484,7 @@ static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4485static void s2io_handle_errors(void * dev_id) 4484static void s2io_handle_errors(void * dev_id)
4486{ 4485{
4487 struct net_device *dev = (struct net_device *) dev_id; 4486 struct net_device *dev = (struct net_device *) dev_id;
4488 struct s2io_nic *sp = dev->priv; 4487 struct s2io_nic *sp = netdev_priv(dev);
4489 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4488 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4490 u64 temp64 = 0,val64=0; 4489 u64 temp64 = 0,val64=0;
4491 int i = 0; 4490 int i = 0;
@@ -4752,7 +4751,7 @@ reset:
4752static irqreturn_t s2io_isr(int irq, void *dev_id) 4751static irqreturn_t s2io_isr(int irq, void *dev_id)
4753{ 4752{
4754 struct net_device *dev = (struct net_device *) dev_id; 4753 struct net_device *dev = (struct net_device *) dev_id;
4755 struct s2io_nic *sp = dev->priv; 4754 struct s2io_nic *sp = netdev_priv(dev);
4756 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4755 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4757 int i; 4756 int i;
4758 u64 reason = 0; 4757 u64 reason = 0;
@@ -4790,7 +4789,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4790 4789
4791 if (config->napi) { 4790 if (config->napi) {
4792 if (reason & GEN_INTR_RXTRAFFIC) { 4791 if (reason & GEN_INTR_RXTRAFFIC) {
4793 netif_rx_schedule(dev, &sp->napi); 4792 netif_rx_schedule(&sp->napi);
4794 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); 4793 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4795 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4794 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4796 readl(&bar0->rx_traffic_int); 4795 readl(&bar0->rx_traffic_int);
@@ -4881,7 +4880,7 @@ static void s2io_updt_stats(struct s2io_nic *sp)
4881 4880
4882static struct net_device_stats *s2io_get_stats(struct net_device *dev) 4881static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4883{ 4882{
4884 struct s2io_nic *sp = dev->priv; 4883 struct s2io_nic *sp = netdev_priv(dev);
4885 struct mac_info *mac_control; 4884 struct mac_info *mac_control;
4886 struct config_param *config; 4885 struct config_param *config;
4887 int i; 4886 int i;
@@ -4948,7 +4947,7 @@ static void s2io_set_multicast(struct net_device *dev)
4948{ 4947{
4949 int i, j, prev_cnt; 4948 int i, j, prev_cnt;
4950 struct dev_mc_list *mclist; 4949 struct dev_mc_list *mclist;
4951 struct s2io_nic *sp = dev->priv; 4950 struct s2io_nic *sp = netdev_priv(dev);
4952 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4951 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4953 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = 4952 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4954 0xfeffffffffffULL; 4953 0xfeffffffffffULL;
@@ -5112,7 +5111,7 @@ static void s2io_set_multicast(struct net_device *dev)
5112/* read from CAM unicast & multicast addresses and store it in 5111/* read from CAM unicast & multicast addresses and store it in
5113 * def_mac_addr structure 5112 * def_mac_addr structure
5114 */ 5113 */
5115void do_s2io_store_unicast_mc(struct s2io_nic *sp) 5114static void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5116{ 5115{
5117 int offset; 5116 int offset;
5118 u64 mac_addr = 0x0; 5117 u64 mac_addr = 0x0;
@@ -5277,7 +5276,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p)
5277 5276
5278static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr) 5277static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5279{ 5278{
5280 struct s2io_nic *sp = dev->priv; 5279 struct s2io_nic *sp = netdev_priv(dev);
5281 register u64 mac_addr = 0, perm_addr = 0; 5280 register u64 mac_addr = 0, perm_addr = 0;
5282 int i; 5281 int i;
5283 u64 tmp64; 5282 u64 tmp64;
@@ -5336,7 +5335,7 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5336static int s2io_ethtool_sset(struct net_device *dev, 5335static int s2io_ethtool_sset(struct net_device *dev,
5337 struct ethtool_cmd *info) 5336 struct ethtool_cmd *info)
5338{ 5337{
5339 struct s2io_nic *sp = dev->priv; 5338 struct s2io_nic *sp = netdev_priv(dev);
5340 if ((info->autoneg == AUTONEG_ENABLE) || 5339 if ((info->autoneg == AUTONEG_ENABLE) ||
5341 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL)) 5340 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5342 return -EINVAL; 5341 return -EINVAL;
@@ -5362,7 +5361,7 @@ static int s2io_ethtool_sset(struct net_device *dev,
5362 5361
5363static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info) 5362static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5364{ 5363{
5365 struct s2io_nic *sp = dev->priv; 5364 struct s2io_nic *sp = netdev_priv(dev);
5366 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 5365 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5367 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 5366 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5368 info->port = PORT_FIBRE; 5367 info->port = PORT_FIBRE;
@@ -5397,7 +5396,7 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5397static void s2io_ethtool_gdrvinfo(struct net_device *dev, 5396static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5398 struct ethtool_drvinfo *info) 5397 struct ethtool_drvinfo *info)
5399{ 5398{
5400 struct s2io_nic *sp = dev->priv; 5399 struct s2io_nic *sp = netdev_priv(dev);
5401 5400
5402 strncpy(info->driver, s2io_driver_name, sizeof(info->driver)); 5401 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5403 strncpy(info->version, s2io_driver_version, sizeof(info->version)); 5402 strncpy(info->version, s2io_driver_version, sizeof(info->version));
@@ -5427,7 +5426,7 @@ static void s2io_ethtool_gregs(struct net_device *dev,
5427 int i; 5426 int i;
5428 u64 reg; 5427 u64 reg;
5429 u8 *reg_space = (u8 *) space; 5428 u8 *reg_space = (u8 *) space;
5430 struct s2io_nic *sp = dev->priv; 5429 struct s2io_nic *sp = netdev_priv(dev);
5431 5430
5432 regs->len = XENA_REG_SPACE; 5431 regs->len = XENA_REG_SPACE;
5433 regs->version = sp->pdev->subsystem_device; 5432 regs->version = sp->pdev->subsystem_device;
@@ -5487,7 +5486,7 @@ static void s2io_phy_id(unsigned long data)
5487static int s2io_ethtool_idnic(struct net_device *dev, u32 data) 5486static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5488{ 5487{
5489 u64 val64 = 0, last_gpio_ctrl_val; 5488 u64 val64 = 0, last_gpio_ctrl_val;
5490 struct s2io_nic *sp = dev->priv; 5489 struct s2io_nic *sp = netdev_priv(dev);
5491 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5490 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5492 u16 subid; 5491 u16 subid;
5493 5492
@@ -5525,7 +5524,7 @@ static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5525static void s2io_ethtool_gringparam(struct net_device *dev, 5524static void s2io_ethtool_gringparam(struct net_device *dev,
5526 struct ethtool_ringparam *ering) 5525 struct ethtool_ringparam *ering)
5527{ 5526{
5528 struct s2io_nic *sp = dev->priv; 5527 struct s2io_nic *sp = netdev_priv(dev);
5529 int i,tx_desc_count=0,rx_desc_count=0; 5528 int i,tx_desc_count=0,rx_desc_count=0;
5530 5529
5531 if (sp->rxd_mode == RXD_MODE_1) 5530 if (sp->rxd_mode == RXD_MODE_1)
@@ -5568,7 +5567,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
5568 struct ethtool_pauseparam *ep) 5567 struct ethtool_pauseparam *ep)
5569{ 5568{
5570 u64 val64; 5569 u64 val64;
5571 struct s2io_nic *sp = dev->priv; 5570 struct s2io_nic *sp = netdev_priv(dev);
5572 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5571 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5573 5572
5574 val64 = readq(&bar0->rmac_pause_cfg); 5573 val64 = readq(&bar0->rmac_pause_cfg);
@@ -5595,7 +5594,7 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
5595 struct ethtool_pauseparam *ep) 5594 struct ethtool_pauseparam *ep)
5596{ 5595{
5597 u64 val64; 5596 u64 val64;
5598 struct s2io_nic *sp = dev->priv; 5597 struct s2io_nic *sp = netdev_priv(dev);
5599 struct XENA_dev_config __iomem *bar0 = sp->bar0; 5598 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5600 5599
5601 val64 = readq(&bar0->rmac_pause_cfg); 5600 val64 = readq(&bar0->rmac_pause_cfg);
@@ -5825,7 +5824,7 @@ static int s2io_ethtool_geeprom(struct net_device *dev,
5825{ 5824{
5826 u32 i, valid; 5825 u32 i, valid;
5827 u64 data; 5826 u64 data;
5828 struct s2io_nic *sp = dev->priv; 5827 struct s2io_nic *sp = netdev_priv(dev);
5829 5828
5830 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16); 5829 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5831 5830
@@ -5863,7 +5862,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
5863{ 5862{
5864 int len = eeprom->len, cnt = 0; 5863 int len = eeprom->len, cnt = 0;
5865 u64 valid = 0, data; 5864 u64 valid = 0, data;
5866 struct s2io_nic *sp = dev->priv; 5865 struct s2io_nic *sp = netdev_priv(dev);
5867 5866
5868 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { 5867 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5869 DBG_PRINT(ERR_DBG, 5868 DBG_PRINT(ERR_DBG,
@@ -6243,7 +6242,7 @@ static void s2io_ethtool_test(struct net_device *dev,
6243 struct ethtool_test *ethtest, 6242 struct ethtool_test *ethtest,
6244 uint64_t * data) 6243 uint64_t * data)
6245{ 6244{
6246 struct s2io_nic *sp = dev->priv; 6245 struct s2io_nic *sp = netdev_priv(dev);
6247 int orig_state = netif_running(sp->dev); 6246 int orig_state = netif_running(sp->dev);
6248 6247
6249 if (ethtest->flags == ETH_TEST_FL_OFFLINE) { 6248 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
@@ -6299,7 +6298,7 @@ static void s2io_get_ethtool_stats(struct net_device *dev,
6299 u64 * tmp_stats) 6298 u64 * tmp_stats)
6300{ 6299{
6301 int i = 0, k; 6300 int i = 0, k;
6302 struct s2io_nic *sp = dev->priv; 6301 struct s2io_nic *sp = netdev_priv(dev);
6303 struct stat_block *stat_info = sp->mac_control.stats_info; 6302 struct stat_block *stat_info = sp->mac_control.stats_info;
6304 6303
6305 s2io_updt_stats(sp); 6304 s2io_updt_stats(sp);
@@ -6578,14 +6577,14 @@ static int s2io_ethtool_get_regs_len(struct net_device *dev)
6578 6577
6579static u32 s2io_ethtool_get_rx_csum(struct net_device * dev) 6578static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6580{ 6579{
6581 struct s2io_nic *sp = dev->priv; 6580 struct s2io_nic *sp = netdev_priv(dev);
6582 6581
6583 return (sp->rx_csum); 6582 return (sp->rx_csum);
6584} 6583}
6585 6584
6586static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data) 6585static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6587{ 6586{
6588 struct s2io_nic *sp = dev->priv; 6587 struct s2io_nic *sp = netdev_priv(dev);
6589 6588
6590 if (data) 6589 if (data)
6591 sp->rx_csum = 1; 6590 sp->rx_csum = 1;
@@ -6602,7 +6601,7 @@ static int s2io_get_eeprom_len(struct net_device *dev)
6602 6601
6603static int s2io_get_sset_count(struct net_device *dev, int sset) 6602static int s2io_get_sset_count(struct net_device *dev, int sset)
6604{ 6603{
6605 struct s2io_nic *sp = dev->priv; 6604 struct s2io_nic *sp = netdev_priv(dev);
6606 6605
6607 switch (sset) { 6606 switch (sset) {
6608 case ETH_SS_TEST: 6607 case ETH_SS_TEST:
@@ -6625,7 +6624,7 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
6625 u32 stringset, u8 * data) 6624 u32 stringset, u8 * data)
6626{ 6625{
6627 int stat_size = 0; 6626 int stat_size = 0;
6628 struct s2io_nic *sp = dev->priv; 6627 struct s2io_nic *sp = netdev_priv(dev);
6629 6628
6630 switch (stringset) { 6629 switch (stringset) {
6631 case ETH_SS_TEST: 6630 case ETH_SS_TEST:
@@ -6727,7 +6726,7 @@ static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6727 6726
6728static int s2io_change_mtu(struct net_device *dev, int new_mtu) 6727static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6729{ 6728{
6730 struct s2io_nic *sp = dev->priv; 6729 struct s2io_nic *sp = netdev_priv(dev);
6731 int ret = 0; 6730 int ret = 0;
6732 6731
6733 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) { 6732 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
@@ -7331,7 +7330,7 @@ out_unlock:
7331 7330
7332static void s2io_tx_watchdog(struct net_device *dev) 7331static void s2io_tx_watchdog(struct net_device *dev)
7333{ 7332{
7334 struct s2io_nic *sp = dev->priv; 7333 struct s2io_nic *sp = netdev_priv(dev);
7335 7334
7336 if (netif_carrier_ok(dev)) { 7335 if (netif_carrier_ok(dev)) {
7337 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++; 7336 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
@@ -7366,7 +7365,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7366 int ring_no = ring_data->ring_no; 7365 int ring_no = ring_data->ring_no;
7367 u16 l3_csum, l4_csum; 7366 u16 l3_csum, l4_csum;
7368 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; 7367 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7369 struct lro *lro; 7368 struct lro *uninitialized_var(lro);
7370 u8 err_mask; 7369 u8 err_mask;
7371 7370
7372 skb->dev = dev; 7371 skb->dev = dev;
@@ -7544,7 +7543,6 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7544 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; 7543 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7545send_up: 7544send_up:
7546 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); 7545 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7547 dev->last_rx = jiffies;
7548aggregate: 7546aggregate:
7549 sp->mac_control.rings[ring_no].rx_bufs_left -= 1; 7547 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7550 return SUCCESS; 7548 return SUCCESS;
@@ -7718,6 +7716,24 @@ static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7718 S2IO_BIT_RESET); 7716 S2IO_BIT_RESET);
7719} 7717}
7720 7718
7719static const struct net_device_ops s2io_netdev_ops = {
7720 .ndo_open = s2io_open,
7721 .ndo_stop = s2io_close,
7722 .ndo_get_stats = s2io_get_stats,
7723 .ndo_start_xmit = s2io_xmit,
7724 .ndo_validate_addr = eth_validate_addr,
7725 .ndo_set_multicast_list = s2io_set_multicast,
7726 .ndo_do_ioctl = s2io_ioctl,
7727 .ndo_set_mac_address = s2io_set_mac_addr,
7728 .ndo_change_mtu = s2io_change_mtu,
7729 .ndo_vlan_rx_register = s2io_vlan_rx_register,
7730 .ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
7731 .ndo_tx_timeout = s2io_tx_watchdog,
7732#ifdef CONFIG_NET_POLL_CONTROLLER
7733 .ndo_poll_controller = s2io_netpoll,
7734#endif
7735};
7736
7721/** 7737/**
7722 * s2io_init_nic - Initialization of the adapter . 7738 * s2io_init_nic - Initialization of the adapter .
7723 * @pdev : structure containing the PCI related information of the device. 7739 * @pdev : structure containing the PCI related information of the device.
@@ -7748,7 +7764,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7748 int mode; 7764 int mode;
7749 u8 dev_intr_type = intr_type; 7765 u8 dev_intr_type = intr_type;
7750 u8 dev_multiq = 0; 7766 u8 dev_multiq = 0;
7751 DECLARE_MAC_BUF(mac);
7752 7767
7753 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq); 7768 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7754 if (ret) 7769 if (ret)
@@ -7798,7 +7813,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7798 SET_NETDEV_DEV(dev, &pdev->dev); 7813 SET_NETDEV_DEV(dev, &pdev->dev);
7799 7814
7800 /* Private member variable initialized to s2io NIC structure */ 7815 /* Private member variable initialized to s2io NIC structure */
7801 sp = dev->priv; 7816 sp = netdev_priv(dev);
7802 memset(sp, 0, sizeof(struct s2io_nic)); 7817 memset(sp, 0, sizeof(struct s2io_nic));
7803 sp->dev = dev; 7818 sp->dev = dev;
7804 sp->pdev = pdev; 7819 sp->pdev = pdev;
@@ -7918,8 +7933,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7918 goto mem_alloc_failed; 7933 goto mem_alloc_failed;
7919 } 7934 }
7920 7935
7921 sp->bar0 = ioremap(pci_resource_start(pdev, 0), 7936 sp->bar0 = pci_ioremap_bar(pdev, 0);
7922 pci_resource_len(pdev, 0));
7923 if (!sp->bar0) { 7937 if (!sp->bar0) {
7924 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n", 7938 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7925 dev->name); 7939 dev->name);
@@ -7927,8 +7941,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7927 goto bar0_remap_failed; 7941 goto bar0_remap_failed;
7928 } 7942 }
7929 7943
7930 sp->bar1 = ioremap(pci_resource_start(pdev, 2), 7944 sp->bar1 = pci_ioremap_bar(pdev, 2);
7931 pci_resource_len(pdev, 2));
7932 if (!sp->bar1) { 7945 if (!sp->bar1) {
7933 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n", 7946 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7934 dev->name); 7947 dev->name);
@@ -7946,26 +7959,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7946 } 7959 }
7947 7960
7948 /* Driver entry points */ 7961 /* Driver entry points */
7949 dev->open = &s2io_open; 7962 dev->netdev_ops = &s2io_netdev_ops;
7950 dev->stop = &s2io_close;
7951 dev->hard_start_xmit = &s2io_xmit;
7952 dev->get_stats = &s2io_get_stats;
7953 dev->set_multicast_list = &s2io_set_multicast;
7954 dev->do_ioctl = &s2io_ioctl;
7955 dev->set_mac_address = &s2io_set_mac_addr;
7956 dev->change_mtu = &s2io_change_mtu;
7957 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 7963 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7958 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 7964 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7959 dev->vlan_rx_register = s2io_vlan_rx_register;
7960 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7961
7962 /*
7963 * will use eth_mac_addr() for dev->set_mac_address
7964 * mac address will be set every time dev->open() is called
7965 */
7966#ifdef CONFIG_NET_POLL_CONTROLLER
7967 dev->poll_controller = s2io_netpoll;
7968#endif
7969 7965
7970 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 7966 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7971 if (sp->high_dma_flag == TRUE) 7967 if (sp->high_dma_flag == TRUE)
@@ -7976,7 +7972,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7976 dev->features |= NETIF_F_UFO; 7972 dev->features |= NETIF_F_UFO;
7977 dev->features |= NETIF_F_HW_CSUM; 7973 dev->features |= NETIF_F_HW_CSUM;
7978 } 7974 }
7979 dev->tx_timeout = &s2io_tx_watchdog;
7980 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; 7975 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7981 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); 7976 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7982 INIT_WORK(&sp->set_link_task, s2io_set_link); 7977 INIT_WORK(&sp->set_link_task, s2io_set_link);
@@ -8125,8 +8120,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
8125 sp->product_name, pdev->revision); 8120 sp->product_name, pdev->revision);
8126 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name, 8121 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8127 s2io_driver_version); 8122 s2io_driver_version);
8128 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n", 8123 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %pM\n", dev->name, dev->dev_addr);
8129 dev->name, print_mac(mac, dev->dev_addr));
8130 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num); 8124 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8131 if (sp->device_type & XFRAME_II_DEVICE) { 8125 if (sp->device_type & XFRAME_II_DEVICE) {
8132 mode = s2io_print_pci_mode(sp); 8126 mode = s2io_print_pci_mode(sp);
@@ -8255,7 +8249,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8255 8249
8256 flush_scheduled_work(); 8250 flush_scheduled_work();
8257 8251
8258 sp = dev->priv; 8252 sp = netdev_priv(dev);
8259 unregister_netdev(dev); 8253 unregister_netdev(dev);
8260 8254
8261 free_shared_mem(sp); 8255 free_shared_mem(sp);
@@ -8590,7 +8584,7 @@ static void clear_lro_session(struct lro *lro)
8590static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag) 8584static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8591{ 8585{
8592 struct net_device *dev = skb->dev; 8586 struct net_device *dev = skb->dev;
8593 struct s2io_nic *sp = dev->priv; 8587 struct s2io_nic *sp = netdev_priv(dev);
8594 8588
8595 skb->protocol = eth_type_trans(skb, dev); 8589 skb->protocol = eth_type_trans(skb, dev);
8596 if (sp->vlgrp && vlan_tag 8590 if (sp->vlgrp && vlan_tag
@@ -8639,7 +8633,7 @@ static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8639 pci_channel_state_t state) 8633 pci_channel_state_t state)
8640{ 8634{
8641 struct net_device *netdev = pci_get_drvdata(pdev); 8635 struct net_device *netdev = pci_get_drvdata(pdev);
8642 struct s2io_nic *sp = netdev->priv; 8636 struct s2io_nic *sp = netdev_priv(netdev);
8643 8637
8644 netif_device_detach(netdev); 8638 netif_device_detach(netdev);
8645 8639
@@ -8664,7 +8658,7 @@ static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8664static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev) 8658static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8665{ 8659{
8666 struct net_device *netdev = pci_get_drvdata(pdev); 8660 struct net_device *netdev = pci_get_drvdata(pdev);
8667 struct s2io_nic *sp = netdev->priv; 8661 struct s2io_nic *sp = netdev_priv(netdev);
8668 8662
8669 if (pci_enable_device(pdev)) { 8663 if (pci_enable_device(pdev)) {
8670 printk(KERN_ERR "s2io: " 8664 printk(KERN_ERR "s2io: "
@@ -8688,7 +8682,7 @@ static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8688static void s2io_io_resume(struct pci_dev *pdev) 8682static void s2io_io_resume(struct pci_dev *pdev)
8689{ 8683{
8690 struct net_device *netdev = pci_get_drvdata(pdev); 8684 struct net_device *netdev = pci_get_drvdata(pdev);
8691 struct s2io_nic *sp = netdev->priv; 8685 struct s2io_nic *sp = netdev_priv(netdev);
8692 8686
8693 if (netif_running(netdev)) { 8687 if (netif_running(netdev)) {
8694 if (s2io_card_up(sp)) { 8688 if (s2io_card_up(sp)) {
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 5986cec17f19..be3025310e90 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -869,7 +869,6 @@ printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[3
869 /* datagram completed: send to upper level */ 869 /* datagram completed: send to upper level */
870 skb_trim(skb, dlen); 870 skb_trim(skb, dlen);
871 netif_rx(skb); 871 netif_rx(skb);
872 dev->last_rx = jiffies;
873 stats->rx_bytes+=dlen; 872 stats->rx_bytes+=dlen;
874 stats->rx_packets++; 873 stats->rx_packets++;
875 lp->rx_skb[ns] = NULL; 874 lp->rx_skb[ns] = NULL;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 2615d46e6e50..31e38fae017f 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2039,9 +2039,9 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance)
2039 sbdma_tx_process(sc,&(sc->sbm_txdma), 0); 2039 sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
2040 2040
2041 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { 2041 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
2042 if (netif_rx_schedule_prep(dev, &sc->napi)) { 2042 if (netif_rx_schedule_prep(&sc->napi)) {
2043 __raw_writeq(0, sc->sbm_imr); 2043 __raw_writeq(0, sc->sbm_imr);
2044 __netif_rx_schedule(dev, &sc->napi); 2044 __netif_rx_schedule(&sc->napi);
2045 /* Depend on the exit from poll to reenable intr */ 2045 /* Depend on the exit from poll to reenable intr */
2046 } 2046 }
2047 else { 2047 else {
@@ -2292,7 +2292,6 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2292 uint64_t ea_reg; 2292 uint64_t ea_reg;
2293 int i; 2293 int i;
2294 int err; 2294 int err;
2295 DECLARE_MAC_BUF(mac);
2296 2295
2297 sc->sbm_dev = dev; 2296 sc->sbm_dev = dev;
2298 sc->sbe_idx = idx; 2297 sc->sbe_idx = idx;
@@ -2373,8 +2372,8 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2373 * process so we need to finish off the config message that 2372 * process so we need to finish off the config message that
2374 * was being displayed) 2373 * was being displayed)
2375 */ 2374 */
2376 pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %s\n", 2375 pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
2377 dev->name, base, print_mac(mac, eaddr)); 2376 dev->name, base, eaddr);
2378 2377
2379 sc->mii_bus->name = sbmac_mdio_string; 2378 sc->mii_bus->name = sbmac_mdio_string;
2380 snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx); 2379 snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
@@ -2668,7 +2667,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget)
2668 sbdma_tx_process(sc, &(sc->sbm_txdma), 1); 2667 sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
2669 2668
2670 if (work_done < budget) { 2669 if (work_done < budget) {
2671 netif_rx_complete(dev, napi); 2670 netif_rx_complete(napi);
2672 2671
2673#ifdef CONFIG_SBMAC_COALESCE 2672#ifdef CONFIG_SBMAC_COALESCE
2674 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | 2673 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 61955f8d8011..42fd31276602 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -816,7 +816,6 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
816 } 816 }
817 817
818 skb->protocol = eth_type_trans(skb, dev); 818 skb->protocol = eth_type_trans(skb, dev);
819 dev->last_rx = jiffies;
820 netif_rx(skb); 819 netif_rx(skb);
821 820
822 dev->stats.rx_bytes += pkt_size; 821 dev->stats.rx_bytes += pkt_size;
@@ -1387,7 +1386,7 @@ static void sc92031_ethtool_get_ethtool_stats(struct net_device *dev,
1387 spin_unlock_bh(&priv->lock); 1386 spin_unlock_bh(&priv->lock);
1388} 1387}
1389 1388
1390static struct ethtool_ops sc92031_ethtool_ops = { 1389static const struct ethtool_ops sc92031_ethtool_ops = {
1391 .get_settings = sc92031_ethtool_get_settings, 1390 .get_settings = sc92031_ethtool_get_settings,
1392 .set_settings = sc92031_ethtool_set_settings, 1391 .set_settings = sc92031_ethtool_set_settings,
1393 .get_drvinfo = sc92031_ethtool_get_drvinfo, 1392 .get_drvinfo = sc92031_ethtool_get_drvinfo,
@@ -1400,6 +1399,21 @@ static struct ethtool_ops sc92031_ethtool_ops = {
1400 .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats, 1399 .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats,
1401}; 1400};
1402 1401
1402
1403static const struct net_device_ops sc92031_netdev_ops = {
1404 .ndo_get_stats = sc92031_get_stats,
1405 .ndo_start_xmit = sc92031_start_xmit,
1406 .ndo_open = sc92031_open,
1407 .ndo_stop = sc92031_stop,
1408 .ndo_set_multicast_list = sc92031_set_multicast_list,
1409 .ndo_change_mtu = eth_change_mtu,
1410 .ndo_validate_addr = eth_validate_addr,
1411 .ndo_tx_timeout = sc92031_tx_timeout,
1412#ifdef CONFIG_NET_POLL_CONTROLLER
1413 .ndo_poll_controller = sc92031_poll_controller,
1414#endif
1415};
1416
1403static int __devinit sc92031_probe(struct pci_dev *pdev, 1417static int __devinit sc92031_probe(struct pci_dev *pdev,
1404 const struct pci_device_id *id) 1418 const struct pci_device_id *id)
1405{ 1419{
@@ -1453,17 +1467,9 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1453 /* faked with skb_copy_and_csum_dev */ 1467 /* faked with skb_copy_and_csum_dev */
1454 dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; 1468 dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
1455 1469
1456 dev->get_stats = sc92031_get_stats; 1470 dev->netdev_ops = &sc92031_netdev_ops;
1457 dev->ethtool_ops = &sc92031_ethtool_ops;
1458 dev->hard_start_xmit = sc92031_start_xmit;
1459 dev->watchdog_timeo = TX_TIMEOUT; 1471 dev->watchdog_timeo = TX_TIMEOUT;
1460 dev->open = sc92031_open; 1472 dev->ethtool_ops = &sc92031_ethtool_ops;
1461 dev->stop = sc92031_stop;
1462 dev->set_multicast_list = sc92031_set_multicast_list;
1463 dev->tx_timeout = sc92031_tx_timeout;
1464#ifdef CONFIG_NET_POLL_CONTROLLER
1465 dev->poll_controller = sc92031_poll_controller;
1466#endif
1467 1473
1468 priv = netdev_priv(dev); 1474 priv = netdev_priv(dev);
1469 spin_lock_init(&priv->lock); 1475 spin_lock_init(&priv->lock);
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 48c64fb20eec..12a8ffffeb03 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -158,7 +158,6 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
158 int old_dmaar; 158 int old_dmaar;
159 int old_rear; 159 int old_rear;
160 int retval; 160 int retval;
161 DECLARE_MAC_BUF(mac);
162 161
163 if (!request_region(ioaddr, SEEQ8005_IO_EXTENT, "seeq8005")) 162 if (!request_region(ioaddr, SEEQ8005_IO_EXTENT, "seeq8005"))
164 return -ENODEV; 163 return -ENODEV;
@@ -303,7 +302,7 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
303 /* Retrieve and print the ethernet address. */ 302 /* Retrieve and print the ethernet address. */
304 for (i = 0; i < 6; i++) 303 for (i = 0; i < 6; i++)
305 dev->dev_addr[i] = SA_prom[i+6]; 304 dev->dev_addr[i] = SA_prom[i+6];
306 printk("%s", print_mac(mac, dev->dev_addr)); 305 printk("%pM", dev->dev_addr);
307 306
308 if (dev->irq == 0xff) 307 if (dev->irq == 0xff)
309 ; /* Do nothing: a user-level program will set it. */ 308 ; /* Do nothing: a user-level program will set it. */
@@ -564,7 +563,6 @@ static void seeq8005_rx(struct net_device *dev)
564 563
565 skb->protocol=eth_type_trans(skb,dev); 564 skb->protocol=eth_type_trans(skb,dev);
566 netif_rx(skb); 565 netif_rx(skb);
567 dev->last_rx = jiffies;
568 dev->stats.rx_packets++; 566 dev->stats.rx_packets++;
569 dev->stats.rx_bytes += pkt_len; 567 dev->stats.rx_bytes += pkt_len;
570 } 568 }
@@ -746,12 +744,3 @@ void __exit cleanup_module(void)
746} 744}
747 745
748#endif /* MODULE */ 746#endif /* MODULE */
749
750/*
751 * Local variables:
752 * compile-command: "gcc -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c skeleton.c"
753 * version-control: t
754 * kept-new-versions: 5
755 * tab-width: 4
756 * End:
757 */
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index 3be13b592b4d..c535408ad6be 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -12,3 +12,11 @@ config SFC
12 12
13 To compile this driver as a module, choose M here. The module 13 To compile this driver as a module, choose M here. The module
14 will be called sfc. 14 will be called sfc.
15config SFC_MTD
16 bool "Solarflare Solarstorm SFC4000 flash MTD support"
17 depends on SFC && MTD && !(SFC=y && MTD=m)
18 default y
19 help
20 This exposes the on-board flash memory as an MTD device (e.g.
21 /dev/mtd1). This makes it possible to upload new boot code
22 to the NIC.
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index c8f5704c8fb1..b89f9be3cb13 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,5 +1,6 @@
1sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ 1sfc-y += efx.o falcon.o tx.o rx.o falcon_gmac.o \
2 selftest.o ethtool.o xfp_phy.o \ 2 falcon_xmac.o selftest.o ethtool.o xfp_phy.o \
3 mdio_10g.o tenxpress.o boards.o sfe4001.o 3 mdio_10g.o tenxpress.o boards.o sfe4001.o
4sfc-$(CONFIG_SFC_MTD) += mtd.o
4 5
5obj-$(CONFIG_SFC) += sfc.o 6obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
index 99e602373269..64903496aa9a 100644
--- a/drivers/net/sfc/boards.c
+++ b/drivers/net/sfc/boards.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc. 3 * Copyright 2007-2008 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -11,6 +11,7 @@
11#include "phy.h" 11#include "phy.h"
12#include "boards.h" 12#include "boards.h"
13#include "efx.h" 13#include "efx.h"
14#include "workarounds.h"
14 15
15/* Macros for unpacking the board revision */ 16/* Macros for unpacking the board revision */
16/* The revision info is in host byte order. */ 17/* The revision info is in host byte order. */
@@ -52,9 +53,128 @@ static void board_blink(struct efx_nic *efx, bool blink)
52} 53}
53 54
54/***************************************************************************** 55/*****************************************************************************
56 * Support for LM87 sensor chip used on several boards
57 */
58#define LM87_REG_ALARMS1 0x41
59#define LM87_REG_ALARMS2 0x42
60#define LM87_IN_LIMITS(nr, _min, _max) \
61 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
62#define LM87_AIN_LIMITS(nr, _min, _max) \
63 0x3B + (nr), _max, 0x1A + (nr), _min
64#define LM87_TEMP_INT_LIMITS(_min, _max) \
65 0x39, _max, 0x3A, _min
66#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
67 0x37, _max, 0x38, _min
68
69#define LM87_ALARM_TEMP_INT 0x10
70#define LM87_ALARM_TEMP_EXT1 0x20
71
72#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
73
74static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
75 const u8 *reg_values)
76{
77 struct i2c_client *client = i2c_new_device(&efx->i2c_adap, info);
78 int rc;
79
80 if (!client)
81 return -EIO;
82
83 while (*reg_values) {
84 u8 reg = *reg_values++;
85 u8 value = *reg_values++;
86 rc = i2c_smbus_write_byte_data(client, reg, value);
87 if (rc)
88 goto err;
89 }
90
91 efx->board_info.hwmon_client = client;
92 return 0;
93
94err:
95 i2c_unregister_device(client);
96 return rc;
97}
98
99static void efx_fini_lm87(struct efx_nic *efx)
100{
101 i2c_unregister_device(efx->board_info.hwmon_client);
102}
103
104static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
105{
106 struct i2c_client *client = efx->board_info.hwmon_client;
107 s32 alarms1, alarms2;
108
109 /* If link is up then do not monitor temperature */
110 if (EFX_WORKAROUND_7884(efx) && efx->link_up)
111 return 0;
112
113 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
114 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
115 if (alarms1 < 0)
116 return alarms1;
117 if (alarms2 < 0)
118 return alarms2;
119 alarms1 &= mask;
120 alarms2 &= mask >> 8;
121 if (alarms1 || alarms2) {
122 EFX_ERR(efx,
123 "LM87 detected a hardware failure (status %02x:%02x)"
124 "%s%s\n",
125 alarms1, alarms2,
126 (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
127 (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
128 return -ERANGE;
129 }
130
131 return 0;
132}
133
134#else /* !CONFIG_SENSORS_LM87 */
135
136static inline int
137efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
138 const u8 *reg_values)
139{
140 return 0;
141}
142static inline void efx_fini_lm87(struct efx_nic *efx)
143{
144}
145static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
146{
147 return 0;
148}
149
150#endif /* CONFIG_SENSORS_LM87 */
151
152/*****************************************************************************
55 * Support for the SFE4002 153 * Support for the SFE4002
56 * 154 *
57 */ 155 */
156static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
157
158static const u8 sfe4002_lm87_regs[] = {
159 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
160 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
161 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
162 LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
163 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
164 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
165 LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
166 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
167 LM87_TEMP_INT_LIMITS(10, 60), /* board */
168 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
169 0
170};
171
172static struct i2c_board_info sfe4002_hwmon_info = {
173 I2C_BOARD_INFO("lm87", 0x2e),
174 .platform_data = &sfe4002_lm87_channel,
175 .irq = -1,
176};
177
58/****************************************************************************/ 178/****************************************************************************/
59/* LED allocations. Note that on rev A0 boards the schematic and the reality 179/* LED allocations. Note that on rev A0 boards the schematic and the reality
60 * differ: red and green are swapped. Below is the fixed (A1) layout (there 180 * differ: red and green are swapped. Below is the fixed (A1) layout (there
@@ -84,81 +204,67 @@ static void sfe4002_fault_led(struct efx_nic *efx, bool state)
84 QUAKE_LED_OFF); 204 QUAKE_LED_OFF);
85} 205}
86 206
207static int sfe4002_check_hw(struct efx_nic *efx)
208{
209 /* A0 board rev. 4002s report a temperature fault the whole time
210 * (bad sensor) so we mask it out. */
211 unsigned alarm_mask =
212 (efx->board_info.major == 0 && efx->board_info.minor == 0) ?
213 ~LM87_ALARM_TEMP_EXT1 : ~0;
214
215 return efx_check_lm87(efx, alarm_mask);
216}
217
87static int sfe4002_init(struct efx_nic *efx) 218static int sfe4002_init(struct efx_nic *efx)
88{ 219{
220 int rc = efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
221 if (rc)
222 return rc;
223 efx->board_info.monitor = sfe4002_check_hw;
89 efx->board_info.init_leds = sfe4002_init_leds; 224 efx->board_info.init_leds = sfe4002_init_leds;
90 efx->board_info.set_fault_led = sfe4002_fault_led; 225 efx->board_info.set_fault_led = sfe4002_fault_led;
91 efx->board_info.blink = board_blink; 226 efx->board_info.blink = board_blink;
227 efx->board_info.fini = efx_fini_lm87;
92 return 0; 228 return 0;
93} 229}
94 230
95/* This will get expanded as board-specific details get moved out of the 231/* This will get expanded as board-specific details get moved out of the
96 * PHY drivers. */ 232 * PHY drivers. */
97struct efx_board_data { 233struct efx_board_data {
234 enum efx_board_type type;
98 const char *ref_model; 235 const char *ref_model;
99 const char *gen_type; 236 const char *gen_type;
100 int (*init) (struct efx_nic *nic); 237 int (*init) (struct efx_nic *nic);
101}; 238};
102 239
103static int dummy_init(struct efx_nic *nic)
104{
105 return 0;
106}
107 240
108static struct efx_board_data board_data[] = { 241static struct efx_board_data board_data[] = {
109 [EFX_BOARD_INVALID] = 242 { EFX_BOARD_SFE4001, "SFE4001", "10GBASE-T adapter", sfe4001_init },
110 {NULL, NULL, dummy_init}, 243 { EFX_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init },
111 [EFX_BOARD_SFE4001] = 244 { EFX_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter",
112 {"SFE4001", "10GBASE-T adapter", sfe4001_init}, 245 sfn4111t_init },
113 [EFX_BOARD_SFE4002] =
114 {"SFE4002", "XFP adapter", sfe4002_init},
115}; 246};
116 247
117int efx_set_board_info(struct efx_nic *efx, u16 revision_info) 248void efx_set_board_info(struct efx_nic *efx, u16 revision_info)
118{ 249{
119 int rc = 0; 250 struct efx_board_data *data = NULL;
120 struct efx_board_data *data; 251 int i;
121
122 if (BOARD_TYPE(revision_info) >= EFX_BOARD_MAX) {
123 EFX_ERR(efx, "squashing unknown board type %d\n",
124 BOARD_TYPE(revision_info));
125 revision_info = 0;
126 }
127 252
128 if (BOARD_TYPE(revision_info) == 0) { 253 efx->board_info.type = BOARD_TYPE(revision_info);
129 efx->board_info.major = 0; 254 efx->board_info.major = BOARD_MAJOR(revision_info);
130 efx->board_info.minor = 0; 255 efx->board_info.minor = BOARD_MINOR(revision_info);
131 /* For early boards that don't have revision info. there is
132 * only 1 board for each PHY type, so we can work it out, with
133 * the exception of the PHY-less boards. */
134 switch (efx->phy_type) {
135 case PHY_TYPE_10XPRESS:
136 efx->board_info.type = EFX_BOARD_SFE4001;
137 break;
138 case PHY_TYPE_XFP:
139 efx->board_info.type = EFX_BOARD_SFE4002;
140 break;
141 default:
142 efx->board_info.type = 0;
143 break;
144 }
145 } else {
146 efx->board_info.type = BOARD_TYPE(revision_info);
147 efx->board_info.major = BOARD_MAJOR(revision_info);
148 efx->board_info.minor = BOARD_MINOR(revision_info);
149 }
150 256
151 data = &board_data[efx->board_info.type]; 257 for (i = 0; i < ARRAY_SIZE(board_data); i++)
258 if (board_data[i].type == efx->board_info.type)
259 data = &board_data[i];
152 260
153 /* Report the board model number or generic type for recognisable 261 if (data) {
154 * boards. */
155 if (efx->board_info.type != 0)
156 EFX_INFO(efx, "board is %s rev %c%d\n", 262 EFX_INFO(efx, "board is %s rev %c%d\n",
157 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) 263 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
158 ? data->ref_model : data->gen_type, 264 ? data->ref_model : data->gen_type,
159 'A' + efx->board_info.major, efx->board_info.minor); 265 'A' + efx->board_info.major, efx->board_info.minor);
160 266 efx->board_info.init = data->init;
161 efx->board_info.init = data->init; 267 } else {
162 268 EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type);
163 return rc; 269 }
164} 270}
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
index c6e01b64bfb4..d93c6c6a7548 100644
--- a/drivers/net/sfc/boards.h
+++ b/drivers/net/sfc/boards.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc. 3 * Copyright 2007-2008 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -12,14 +12,16 @@
12 12
13/* Board IDs (must fit in 8 bits) */ 13/* Board IDs (must fit in 8 bits) */
14enum efx_board_type { 14enum efx_board_type {
15 EFX_BOARD_INVALID = 0, 15 EFX_BOARD_SFE4001 = 1,
16 EFX_BOARD_SFE4001 = 1, /* SFE4001 (10GBASE-T) */
17 EFX_BOARD_SFE4002 = 2, 16 EFX_BOARD_SFE4002 = 2,
18 /* Insert new types before here */ 17 EFX_BOARD_SFN4111T = 0x51,
19 EFX_BOARD_MAX
20}; 18};
21 19
22extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); 20extern void efx_set_board_info(struct efx_nic *efx, u16 revision_info);
21
22/* SFE4001 (10GBASE-T) */
23extern int sfe4001_init(struct efx_nic *efx); 23extern int sfe4001_init(struct efx_nic *efx);
24/* SFN4111T (100/1000/10GBASE-T) */
25extern int sfn4111t_init(struct efx_nic *efx);
24 26
25#endif 27#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 06ea71c7e34e..7673fd92eaf5 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -21,14 +21,12 @@
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include "net_driver.h" 23#include "net_driver.h"
24#include "gmii.h"
25#include "ethtool.h" 24#include "ethtool.h"
26#include "tx.h" 25#include "tx.h"
27#include "rx.h" 26#include "rx.h"
28#include "efx.h" 27#include "efx.h"
29#include "mdio_10g.h" 28#include "mdio_10g.h"
30#include "falcon.h" 29#include "falcon.h"
31#include "mac.h"
32 30
33#define EFX_MAX_MTU (9 * 1024) 31#define EFX_MAX_MTU (9 * 1024)
34 32
@@ -39,6 +37,12 @@
39 */ 37 */
40static struct workqueue_struct *refill_workqueue; 38static struct workqueue_struct *refill_workqueue;
41 39
40/* Reset workqueue. If any NIC has a hardware failure then a reset will be
41 * queued onto this work queue. This is not a per-nic work queue, because
42 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
43 */
44static struct workqueue_struct *reset_workqueue;
45
42/************************************************************************** 46/**************************************************************************
43 * 47 *
44 * Configurable values 48 * Configurable values
@@ -58,13 +62,15 @@ MODULE_PARM_DESC(lro, "Large receive offload acceleration");
58/* 62/*
59 * Use separate channels for TX and RX events 63 * Use separate channels for TX and RX events
60 * 64 *
61 * Set this to 1 to use separate channels for TX and RX. It allows us to 65 * Set this to 1 to use separate channels for TX and RX. It allows us
62 * apply a higher level of interrupt moderation to TX events. 66 * to control interrupt affinity separately for TX and RX.
63 * 67 *
64 * This is forced to 0 for MSI interrupt mode as the interrupt vector 68 * This is only used in MSI-X interrupt mode
65 * is not written
66 */ 69 */
67static unsigned int separate_tx_and_rx_channels = true; 70static unsigned int separate_tx_channels;
71module_param(separate_tx_channels, uint, 0644);
72MODULE_PARM_DESC(separate_tx_channels,
73 "Use separate channels for TX and RX");
68 74
69/* This is the weight assigned to each of the (per-channel) virtual 75/* This is the weight assigned to each of the (per-channel) virtual
70 * NAPI devices. 76 * NAPI devices.
@@ -77,11 +83,6 @@ static int napi_weight = 64;
77 */ 83 */
78unsigned int efx_monitor_interval = 1 * HZ; 84unsigned int efx_monitor_interval = 1 * HZ;
79 85
80/* This controls whether or not the hardware monitor will trigger a
81 * reset when it detects an error condition.
82 */
83static unsigned int monitor_reset = true;
84
85/* This controls whether or not the driver will initialise devices 86/* This controls whether or not the driver will initialise devices
86 * with invalid MAC addresses stored in the EEPROM or flash. If true, 87 * with invalid MAC addresses stored in the EEPROM or flash. If true,
87 * such devices will be initialised with a random locally-generated 88 * such devices will be initialised with a random locally-generated
@@ -128,6 +129,10 @@ static unsigned int rss_cpus;
128module_param(rss_cpus, uint, 0444); 129module_param(rss_cpus, uint, 0444);
129MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 130MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
130 131
132static int phy_flash_cfg;
133module_param(phy_flash_cfg, int, 0644);
134MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
135
131/************************************************************************** 136/**************************************************************************
132 * 137 *
133 * Utility functions and prototypes 138 * Utility functions and prototypes
@@ -211,7 +216,6 @@ static int efx_poll(struct napi_struct *napi, int budget)
211{ 216{
212 struct efx_channel *channel = 217 struct efx_channel *channel =
213 container_of(napi, struct efx_channel, napi_str); 218 container_of(napi, struct efx_channel, napi_str);
214 struct net_device *napi_dev = channel->napi_dev;
215 int rx_packets; 219 int rx_packets;
216 220
217 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", 221 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
@@ -225,7 +229,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
225 * since efx_channel_processed() will have no effect if 229 * since efx_channel_processed() will have no effect if
226 * interrupts have already been disabled. 230 * interrupts have already been disabled.
227 */ 231 */
228 netif_rx_complete(napi_dev, napi); 232 netif_rx_complete(napi);
229 efx_channel_processed(channel); 233 efx_channel_processed(channel);
230 } 234 }
231 235
@@ -349,6 +353,27 @@ static int efx_probe_channel(struct efx_channel *channel)
349} 353}
350 354
351 355
356static void efx_set_channel_names(struct efx_nic *efx)
357{
358 struct efx_channel *channel;
359 const char *type = "";
360 int number;
361
362 efx_for_each_channel(channel, efx) {
363 number = channel->channel;
364 if (efx->n_channels > efx->n_rx_queues) {
365 if (channel->channel < efx->n_rx_queues) {
366 type = "-rx";
367 } else {
368 type = "-tx";
369 number -= efx->n_rx_queues;
370 }
371 }
372 snprintf(channel->name, sizeof(channel->name),
373 "%s%s-%d", efx->name, type, number);
374 }
375}
376
352/* Channels are shutdown and reinitialised whilst the NIC is running 377/* Channels are shutdown and reinitialised whilst the NIC is running
353 * to propagate configuration changes (mtu, checksum offload), or 378 * to propagate configuration changes (mtu, checksum offload), or
354 * to clear hardware error conditions 379 * to clear hardware error conditions
@@ -523,26 +548,8 @@ static void efx_link_status_changed(struct efx_nic *efx)
523 548
524 /* Status message for kernel log */ 549 /* Status message for kernel log */
525 if (efx->link_up) { 550 if (efx->link_up) {
526 struct mii_if_info *gmii = &efx->mii; 551 EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
527 unsigned adv, lpa; 552 efx->link_speed, efx->link_fd ? "full" : "half",
528 /* NONE here means direct XAUI from the controller, with no
529 * MDIO-attached device we can query. */
530 if (efx->phy_type != PHY_TYPE_NONE) {
531 adv = gmii_advertised(gmii);
532 lpa = gmii_lpa(gmii);
533 } else {
534 lpa = GM_LPA_10000 | LPA_DUPLEX;
535 adv = lpa;
536 }
537 EFX_INFO(efx, "link up at %dMbps %s-duplex "
538 "(adv %04x lpa %04x) (MTU %d)%s\n",
539 (efx->link_options & GM_LPA_10000 ? 10000 :
540 (efx->link_options & GM_LPA_1000 ? 1000 :
541 (efx->link_options & GM_LPA_100 ? 100 :
542 10))),
543 (efx->link_options & GM_LPA_DUPLEX ?
544 "full" : "half"),
545 adv, lpa,
546 efx->net_dev->mtu, 553 efx->net_dev->mtu,
547 (efx->promiscuous ? " [PROMISC]" : "")); 554 (efx->promiscuous ? " [PROMISC]" : ""));
548 } else { 555 } else {
@@ -566,10 +573,28 @@ void __efx_reconfigure_port(struct efx_nic *efx)
566 netif_addr_unlock_bh(efx->net_dev); 573 netif_addr_unlock_bh(efx->net_dev);
567 } 574 }
568 575
569 falcon_reconfigure_xmac(efx); 576 falcon_deconfigure_mac_wrapper(efx);
577
578 /* Reconfigure the PHY, disabling transmit in mac level loopback. */
579 if (LOOPBACK_INTERNAL(efx))
580 efx->phy_mode |= PHY_MODE_TX_DISABLED;
581 else
582 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
583 efx->phy_op->reconfigure(efx);
584
585 if (falcon_switch_mac(efx))
586 goto fail;
587
588 efx->mac_op->reconfigure(efx);
570 589
571 /* Inform kernel of loss/gain of carrier */ 590 /* Inform kernel of loss/gain of carrier */
572 efx_link_status_changed(efx); 591 efx_link_status_changed(efx);
592 return;
593
594fail:
595 EFX_ERR(efx, "failed to reconfigure MAC\n");
596 efx->phy_op->fini(efx);
597 efx->port_initialized = false;
573} 598}
574 599
575/* Reinitialise the MAC to pick up new PHY settings, even if the port is 600/* Reinitialise the MAC to pick up new PHY settings, even if the port is
@@ -586,10 +611,9 @@ void efx_reconfigure_port(struct efx_nic *efx)
586/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all() 611/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
587 * we don't efx_reconfigure_port() if the port is disabled. Care is taken 612 * we don't efx_reconfigure_port() if the port is disabled. Care is taken
588 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */ 613 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
589static void efx_reconfigure_work(struct work_struct *data) 614static void efx_phy_work(struct work_struct *data)
590{ 615{
591 struct efx_nic *efx = container_of(data, struct efx_nic, 616 struct efx_nic *efx = container_of(data, struct efx_nic, phy_work);
592 reconfigure_work);
593 617
594 mutex_lock(&efx->mac_lock); 618 mutex_lock(&efx->mac_lock);
595 if (efx->port_enabled) 619 if (efx->port_enabled)
@@ -597,6 +621,16 @@ static void efx_reconfigure_work(struct work_struct *data)
597 mutex_unlock(&efx->mac_lock); 621 mutex_unlock(&efx->mac_lock);
598} 622}
599 623
624static void efx_mac_work(struct work_struct *data)
625{
626 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
627
628 mutex_lock(&efx->mac_lock);
629 if (efx->port_enabled)
630 efx->mac_op->irq(efx);
631 mutex_unlock(&efx->mac_lock);
632}
633
600static int efx_probe_port(struct efx_nic *efx) 634static int efx_probe_port(struct efx_nic *efx)
601{ 635{
602 int rc; 636 int rc;
@@ -608,21 +642,22 @@ static int efx_probe_port(struct efx_nic *efx)
608 if (rc) 642 if (rc)
609 goto err; 643 goto err;
610 644
645 if (phy_flash_cfg)
646 efx->phy_mode = PHY_MODE_SPECIAL;
647
611 /* Sanity check MAC address */ 648 /* Sanity check MAC address */
612 if (is_valid_ether_addr(efx->mac_address)) { 649 if (is_valid_ether_addr(efx->mac_address)) {
613 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); 650 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
614 } else { 651 } else {
615 DECLARE_MAC_BUF(mac); 652 EFX_ERR(efx, "invalid MAC address %pM\n",
616 653 efx->mac_address);
617 EFX_ERR(efx, "invalid MAC address %s\n",
618 print_mac(mac, efx->mac_address));
619 if (!allow_bad_hwaddr) { 654 if (!allow_bad_hwaddr) {
620 rc = -EINVAL; 655 rc = -EINVAL;
621 goto err; 656 goto err;
622 } 657 }
623 random_ether_addr(efx->net_dev->dev_addr); 658 random_ether_addr(efx->net_dev->dev_addr);
624 EFX_INFO(efx, "using locally-generated MAC %s\n", 659 EFX_INFO(efx, "using locally-generated MAC %pM\n",
625 print_mac(mac, efx->net_dev->dev_addr)); 660 efx->net_dev->dev_addr);
626 } 661 }
627 662
628 return 0; 663 return 0;
@@ -638,23 +673,30 @@ static int efx_init_port(struct efx_nic *efx)
638 673
639 EFX_LOG(efx, "init port\n"); 674 EFX_LOG(efx, "init port\n");
640 675
641 /* Initialise the MAC and PHY */ 676 rc = efx->phy_op->init(efx);
642 rc = falcon_init_xmac(efx);
643 if (rc) 677 if (rc)
644 return rc; 678 return rc;
679 efx->phy_op->reconfigure(efx);
680
681 mutex_lock(&efx->mac_lock);
682 rc = falcon_switch_mac(efx);
683 mutex_unlock(&efx->mac_lock);
684 if (rc)
685 goto fail;
686 efx->mac_op->reconfigure(efx);
645 687
646 efx->port_initialized = true; 688 efx->port_initialized = true;
647 efx->stats_enabled = true; 689 efx->stats_enabled = true;
648
649 /* Reconfigure port to program MAC registers */
650 falcon_reconfigure_xmac(efx);
651
652 return 0; 690 return 0;
691
692fail:
693 efx->phy_op->fini(efx);
694 return rc;
653} 695}
654 696
655/* Allow efx_reconfigure_port() to be scheduled, and close the window 697/* Allow efx_reconfigure_port() to be scheduled, and close the window
656 * between efx_stop_port and efx_flush_all whereby a previously scheduled 698 * between efx_stop_port and efx_flush_all whereby a previously scheduled
657 * efx_reconfigure_port() may have been cancelled */ 699 * efx_phy_work()/efx_mac_work() may have been cancelled */
658static void efx_start_port(struct efx_nic *efx) 700static void efx_start_port(struct efx_nic *efx)
659{ 701{
660 EFX_LOG(efx, "start port\n"); 702 EFX_LOG(efx, "start port\n");
@@ -663,13 +705,14 @@ static void efx_start_port(struct efx_nic *efx)
663 mutex_lock(&efx->mac_lock); 705 mutex_lock(&efx->mac_lock);
664 efx->port_enabled = true; 706 efx->port_enabled = true;
665 __efx_reconfigure_port(efx); 707 __efx_reconfigure_port(efx);
708 efx->mac_op->irq(efx);
666 mutex_unlock(&efx->mac_lock); 709 mutex_unlock(&efx->mac_lock);
667} 710}
668 711
669/* Prevent efx_reconfigure_work and efx_monitor() from executing, and 712/* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing,
670 * efx_set_multicast_list() from scheduling efx_reconfigure_work. 713 * and efx_set_multicast_list() from scheduling efx_phy_work. efx_phy_work
671 * efx_reconfigure_work can still be scheduled via NAPI processing 714 * and efx_mac_work may still be scheduled via NAPI processing until
672 * until efx_flush_all() is called */ 715 * efx_flush_all() is called */
673static void efx_stop_port(struct efx_nic *efx) 716static void efx_stop_port(struct efx_nic *efx)
674{ 717{
675 EFX_LOG(efx, "stop port\n"); 718 EFX_LOG(efx, "stop port\n");
@@ -692,7 +735,7 @@ static void efx_fini_port(struct efx_nic *efx)
692 if (!efx->port_initialized) 735 if (!efx->port_initialized)
693 return; 736 return;
694 737
695 falcon_fini_xmac(efx); 738 efx->phy_op->fini(efx);
696 efx->port_initialized = false; 739 efx->port_initialized = false;
697 740
698 efx->link_up = false; 741 efx->link_up = false;
@@ -840,26 +883,33 @@ static void efx_probe_interrupts(struct efx_nic *efx)
840 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 883 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
841 struct msix_entry xentries[EFX_MAX_CHANNELS]; 884 struct msix_entry xentries[EFX_MAX_CHANNELS];
842 int wanted_ints; 885 int wanted_ints;
886 int rx_queues;
843 887
844 /* We want one RX queue and interrupt per CPU package 888 /* We want one RX queue and interrupt per CPU package
845 * (or as specified by the rss_cpus module parameter). 889 * (or as specified by the rss_cpus module parameter).
846 * We will need one channel per interrupt. 890 * We will need one channel per interrupt.
847 */ 891 */
848 wanted_ints = rss_cpus ? rss_cpus : efx_wanted_rx_queues(); 892 rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
849 efx->n_rx_queues = min(wanted_ints, max_channels); 893 wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
894 wanted_ints = min(wanted_ints, max_channels);
850 895
851 for (i = 0; i < efx->n_rx_queues; i++) 896 for (i = 0; i < wanted_ints; i++)
852 xentries[i].entry = i; 897 xentries[i].entry = i;
853 rc = pci_enable_msix(efx->pci_dev, xentries, efx->n_rx_queues); 898 rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints);
854 if (rc > 0) { 899 if (rc > 0) {
855 EFX_BUG_ON_PARANOID(rc >= efx->n_rx_queues); 900 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
856 efx->n_rx_queues = rc; 901 " available (%d < %d).\n", rc, wanted_ints);
902 EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
903 EFX_BUG_ON_PARANOID(rc >= wanted_ints);
904 wanted_ints = rc;
857 rc = pci_enable_msix(efx->pci_dev, xentries, 905 rc = pci_enable_msix(efx->pci_dev, xentries,
858 efx->n_rx_queues); 906 wanted_ints);
859 } 907 }
860 908
861 if (rc == 0) { 909 if (rc == 0) {
862 for (i = 0; i < efx->n_rx_queues; i++) 910 efx->n_rx_queues = min(rx_queues, wanted_ints);
911 efx->n_channels = wanted_ints;
912 for (i = 0; i < wanted_ints; i++)
863 efx->channel[i].irq = xentries[i].vector; 913 efx->channel[i].irq = xentries[i].vector;
864 } else { 914 } else {
865 /* Fall back to single channel MSI */ 915 /* Fall back to single channel MSI */
@@ -871,6 +921,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
871 /* Try single interrupt MSI */ 921 /* Try single interrupt MSI */
872 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 922 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
873 efx->n_rx_queues = 1; 923 efx->n_rx_queues = 1;
924 efx->n_channels = 1;
874 rc = pci_enable_msi(efx->pci_dev); 925 rc = pci_enable_msi(efx->pci_dev);
875 if (rc == 0) { 926 if (rc == 0) {
876 efx->channel[0].irq = efx->pci_dev->irq; 927 efx->channel[0].irq = efx->pci_dev->irq;
@@ -883,6 +934,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
883 /* Assume legacy interrupts */ 934 /* Assume legacy interrupts */
884 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 935 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
885 efx->n_rx_queues = 1; 936 efx->n_rx_queues = 1;
937 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
886 efx->legacy_irq = efx->pci_dev->irq; 938 efx->legacy_irq = efx->pci_dev->irq;
887 } 939 }
888} 940}
@@ -907,8 +959,8 @@ static void efx_set_channels(struct efx_nic *efx)
907 struct efx_rx_queue *rx_queue; 959 struct efx_rx_queue *rx_queue;
908 960
909 efx_for_each_tx_queue(tx_queue, efx) { 961 efx_for_each_tx_queue(tx_queue, efx) {
910 if (!EFX_INT_MODE_USE_MSI(efx) && separate_tx_and_rx_channels) 962 if (separate_tx_channels)
911 tx_queue->channel = &efx->channel[1]; 963 tx_queue->channel = &efx->channel[efx->n_channels-1];
912 else 964 else
913 tx_queue->channel = &efx->channel[0]; 965 tx_queue->channel = &efx->channel[0];
914 tx_queue->channel->used_flags |= EFX_USED_BY_TX; 966 tx_queue->channel->used_flags |= EFX_USED_BY_TX;
@@ -985,6 +1037,7 @@ static int efx_probe_all(struct efx_nic *efx)
985 goto fail3; 1037 goto fail3;
986 } 1038 }
987 } 1039 }
1040 efx_set_channel_names(efx);
988 1041
989 return 0; 1042 return 0;
990 1043
@@ -1050,7 +1103,8 @@ static void efx_flush_all(struct efx_nic *efx)
1050 cancel_delayed_work_sync(&rx_queue->work); 1103 cancel_delayed_work_sync(&rx_queue->work);
1051 1104
1052 /* Stop scheduled port reconfigurations */ 1105 /* Stop scheduled port reconfigurations */
1053 cancel_work_sync(&efx->reconfigure_work); 1106 cancel_work_sync(&efx->mac_work);
1107 cancel_work_sync(&efx->phy_work);
1054 1108
1055} 1109}
1056 1110
@@ -1087,7 +1141,7 @@ static void efx_stop_all(struct efx_nic *efx)
1087 * window to loose phy events */ 1141 * window to loose phy events */
1088 efx_stop_port(efx); 1142 efx_stop_port(efx);
1089 1143
1090 /* Flush reconfigure_work, refill_workqueue, monitor_work */ 1144 /* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */
1091 efx_flush_all(efx); 1145 efx_flush_all(efx);
1092 1146
1093 /* Isolate the MAC from the TX and RX engines, so that queue 1147 /* Isolate the MAC from the TX and RX engines, so that queue
@@ -1159,36 +1213,31 @@ static void efx_monitor(struct work_struct *data)
1159{ 1213{
1160 struct efx_nic *efx = container_of(data, struct efx_nic, 1214 struct efx_nic *efx = container_of(data, struct efx_nic,
1161 monitor_work.work); 1215 monitor_work.work);
1162 int rc = 0; 1216 int rc;
1163 1217
1164 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n", 1218 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1165 raw_smp_processor_id()); 1219 raw_smp_processor_id());
1166 1220
1167
1168 /* If the mac_lock is already held then it is likely a port 1221 /* If the mac_lock is already held then it is likely a port
1169 * reconfiguration is already in place, which will likely do 1222 * reconfiguration is already in place, which will likely do
1170 * most of the work of check_hw() anyway. */ 1223 * most of the work of check_hw() anyway. */
1171 if (!mutex_trylock(&efx->mac_lock)) { 1224 if (!mutex_trylock(&efx->mac_lock))
1172 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1225 goto out_requeue;
1173 efx_monitor_interval); 1226 if (!efx->port_enabled)
1174 return; 1227 goto out_unlock;
1175 } 1228 rc = efx->board_info.monitor(efx);
1176
1177 if (efx->port_enabled)
1178 rc = falcon_check_xmac(efx);
1179 mutex_unlock(&efx->mac_lock);
1180
1181 if (rc) { 1229 if (rc) {
1182 if (monitor_reset) { 1230 EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
1183 EFX_ERR(efx, "hardware monitor detected a fault: " 1231 (rc == -ERANGE) ? "reported fault" : "failed");
1184 "triggering reset\n"); 1232 efx->phy_mode |= PHY_MODE_LOW_POWER;
1185 efx_schedule_reset(efx, RESET_TYPE_MONITOR); 1233 falcon_sim_phy_event(efx);
1186 } else {
1187 EFX_ERR(efx, "hardware monitor detected a fault, "
1188 "skipping reset\n");
1189 }
1190 } 1234 }
1235 efx->phy_op->poll(efx);
1236 efx->mac_op->poll(efx);
1191 1237
1238out_unlock:
1239 mutex_unlock(&efx->mac_lock);
1240out_requeue:
1192 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1241 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1193 efx_monitor_interval); 1242 efx_monitor_interval);
1194} 1243}
@@ -1282,6 +1331,8 @@ static int efx_net_open(struct net_device *net_dev)
1282 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name, 1331 EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
1283 raw_smp_processor_id()); 1332 raw_smp_processor_id());
1284 1333
1334 if (efx->state == STATE_DISABLED)
1335 return -EIO;
1285 if (efx->phy_mode & PHY_MODE_SPECIAL) 1336 if (efx->phy_mode & PHY_MODE_SPECIAL)
1286 return -EBUSY; 1337 return -EBUSY;
1287 1338
@@ -1300,10 +1351,12 @@ static int efx_net_stop(struct net_device *net_dev)
1300 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name, 1351 EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
1301 raw_smp_processor_id()); 1352 raw_smp_processor_id());
1302 1353
1303 /* Stop the device and flush all the channels */ 1354 if (efx->state != STATE_DISABLED) {
1304 efx_stop_all(efx); 1355 /* Stop the device and flush all the channels */
1305 efx_fini_channels(efx); 1356 efx_stop_all(efx);
1306 efx_init_channels(efx); 1357 efx_fini_channels(efx);
1358 efx_init_channels(efx);
1359 }
1307 1360
1308 return 0; 1361 return 0;
1309} 1362}
@@ -1322,7 +1375,7 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1322 if (!spin_trylock(&efx->stats_lock)) 1375 if (!spin_trylock(&efx->stats_lock))
1323 return stats; 1376 return stats;
1324 if (efx->stats_enabled) { 1377 if (efx->stats_enabled) {
1325 falcon_update_stats_xmac(efx); 1378 efx->mac_op->update_stats(efx);
1326 falcon_update_nic_stats(efx); 1379 falcon_update_nic_stats(efx);
1327 } 1380 }
1328 spin_unlock(&efx->stats_lock); 1381 spin_unlock(&efx->stats_lock);
@@ -1360,12 +1413,11 @@ static void efx_watchdog(struct net_device *net_dev)
1360{ 1413{
1361 struct efx_nic *efx = netdev_priv(net_dev); 1414 struct efx_nic *efx = netdev_priv(net_dev);
1362 1415
1363 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n", 1416 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:"
1364 atomic_read(&efx->netif_stop_count), efx->port_enabled, 1417 " resetting channels\n",
1365 monitor_reset ? "resetting channels" : "skipping reset"); 1418 atomic_read(&efx->netif_stop_count), efx->port_enabled);
1366 1419
1367 if (monitor_reset) 1420 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1368 efx_schedule_reset(efx, RESET_TYPE_MONITOR);
1369} 1421}
1370 1422
1371 1423
@@ -1401,9 +1453,8 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1401 EFX_ASSERT_RESET_SERIALISED(efx); 1453 EFX_ASSERT_RESET_SERIALISED(efx);
1402 1454
1403 if (!is_valid_ether_addr(new_addr)) { 1455 if (!is_valid_ether_addr(new_addr)) {
1404 DECLARE_MAC_BUF(mac); 1456 EFX_ERR(efx, "invalid ethernet MAC address requested: %pM\n",
1405 EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n", 1457 new_addr);
1406 print_mac(mac, new_addr));
1407 return -EINVAL; 1458 return -EINVAL;
1408 } 1459 }
1409 1460
@@ -1447,22 +1498,43 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1447 return; 1498 return;
1448 1499
1449 if (changed) 1500 if (changed)
1450 queue_work(efx->workqueue, &efx->reconfigure_work); 1501 queue_work(efx->workqueue, &efx->phy_work);
1451 1502
1452 /* Create and activate new global multicast hash table */ 1503 /* Create and activate new global multicast hash table */
1453 falcon_set_multicast_hash(efx); 1504 falcon_set_multicast_hash(efx);
1454} 1505}
1455 1506
1507static const struct net_device_ops efx_netdev_ops = {
1508 .ndo_open = efx_net_open,
1509 .ndo_stop = efx_net_stop,
1510 .ndo_get_stats = efx_net_stats,
1511 .ndo_tx_timeout = efx_watchdog,
1512 .ndo_start_xmit = efx_hard_start_xmit,
1513 .ndo_validate_addr = eth_validate_addr,
1514 .ndo_do_ioctl = efx_ioctl,
1515 .ndo_change_mtu = efx_change_mtu,
1516 .ndo_set_mac_address = efx_set_mac_address,
1517 .ndo_set_multicast_list = efx_set_multicast_list,
1518#ifdef CONFIG_NET_POLL_CONTROLLER
1519 .ndo_poll_controller = efx_netpoll,
1520#endif
1521};
1522
1523static void efx_update_name(struct efx_nic *efx)
1524{
1525 strcpy(efx->name, efx->net_dev->name);
1526 efx_mtd_rename(efx);
1527 efx_set_channel_names(efx);
1528}
1529
1456static int efx_netdev_event(struct notifier_block *this, 1530static int efx_netdev_event(struct notifier_block *this,
1457 unsigned long event, void *ptr) 1531 unsigned long event, void *ptr)
1458{ 1532{
1459 struct net_device *net_dev = ptr; 1533 struct net_device *net_dev = ptr;
1460 1534
1461 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { 1535 if (net_dev->netdev_ops == &efx_netdev_ops &&
1462 struct efx_nic *efx = netdev_priv(net_dev); 1536 event == NETDEV_CHANGENAME)
1463 1537 efx_update_name(netdev_priv(net_dev));
1464 strcpy(efx->name, net_dev->name);
1465 }
1466 1538
1467 return NOTIFY_DONE; 1539 return NOTIFY_DONE;
1468} 1540}
@@ -1471,6 +1543,14 @@ static struct notifier_block efx_netdev_notifier = {
1471 .notifier_call = efx_netdev_event, 1543 .notifier_call = efx_netdev_event,
1472}; 1544};
1473 1545
1546static ssize_t
1547show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
1548{
1549 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
1550 return sprintf(buf, "%d\n", efx->phy_type);
1551}
1552static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
1553
1474static int efx_register_netdev(struct efx_nic *efx) 1554static int efx_register_netdev(struct efx_nic *efx)
1475{ 1555{
1476 struct net_device *net_dev = efx->net_dev; 1556 struct net_device *net_dev = efx->net_dev;
@@ -1478,18 +1558,7 @@ static int efx_register_netdev(struct efx_nic *efx)
1478 1558
1479 net_dev->watchdog_timeo = 5 * HZ; 1559 net_dev->watchdog_timeo = 5 * HZ;
1480 net_dev->irq = efx->pci_dev->irq; 1560 net_dev->irq = efx->pci_dev->irq;
1481 net_dev->open = efx_net_open; 1561 net_dev->netdev_ops = &efx_netdev_ops;
1482 net_dev->stop = efx_net_stop;
1483 net_dev->get_stats = efx_net_stats;
1484 net_dev->tx_timeout = &efx_watchdog;
1485 net_dev->hard_start_xmit = efx_hard_start_xmit;
1486 net_dev->do_ioctl = efx_ioctl;
1487 net_dev->change_mtu = efx_change_mtu;
1488 net_dev->set_mac_address = efx_set_mac_address;
1489 net_dev->set_multicast_list = efx_set_multicast_list;
1490#ifdef CONFIG_NET_POLL_CONTROLLER
1491 net_dev->poll_controller = efx_netpoll;
1492#endif
1493 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev); 1562 SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
1494 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); 1563 SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
1495 1564
@@ -1497,7 +1566,7 @@ static int efx_register_netdev(struct efx_nic *efx)
1497 netif_carrier_off(efx->net_dev); 1566 netif_carrier_off(efx->net_dev);
1498 1567
1499 /* Clear MAC statistics */ 1568 /* Clear MAC statistics */
1500 falcon_update_stats_xmac(efx); 1569 efx->mac_op->update_stats(efx);
1501 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); 1570 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
1502 1571
1503 rc = register_netdev(net_dev); 1572 rc = register_netdev(net_dev);
@@ -1505,9 +1574,22 @@ static int efx_register_netdev(struct efx_nic *efx)
1505 EFX_ERR(efx, "could not register net dev\n"); 1574 EFX_ERR(efx, "could not register net dev\n");
1506 return rc; 1575 return rc;
1507 } 1576 }
1508 strcpy(efx->name, net_dev->name); 1577
1578 rtnl_lock();
1579 efx_update_name(efx);
1580 rtnl_unlock();
1581
1582 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
1583 if (rc) {
1584 EFX_ERR(efx, "failed to init net dev attributes\n");
1585 goto fail_registered;
1586 }
1509 1587
1510 return 0; 1588 return 0;
1589
1590fail_registered:
1591 unregister_netdev(net_dev);
1592 return rc;
1511} 1593}
1512 1594
1513static void efx_unregister_netdev(struct efx_nic *efx) 1595static void efx_unregister_netdev(struct efx_nic *efx)
@@ -1527,6 +1609,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1527 1609
1528 if (efx_dev_registered(efx)) { 1610 if (efx_dev_registered(efx)) {
1529 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1611 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
1612 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
1530 unregister_netdev(efx->net_dev); 1613 unregister_netdev(efx->net_dev);
1531 } 1614 }
1532} 1615}
@@ -1541,8 +1624,6 @@ static void efx_unregister_netdev(struct efx_nic *efx)
1541 * before reset. */ 1624 * before reset. */
1542void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) 1625void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1543{ 1626{
1544 int rc;
1545
1546 EFX_ASSERT_RESET_SERIALISED(efx); 1627 EFX_ASSERT_RESET_SERIALISED(efx);
1547 1628
1548 /* The net_dev->get_stats handler is quite slow, and will fail 1629 /* The net_dev->get_stats handler is quite slow, and will fail
@@ -1553,10 +1634,9 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
1553 1634
1554 efx_stop_all(efx); 1635 efx_stop_all(efx);
1555 mutex_lock(&efx->mac_lock); 1636 mutex_lock(&efx->mac_lock);
1637 mutex_lock(&efx->spi_lock);
1556 1638
1557 rc = falcon_xmac_get_settings(efx, ecmd); 1639 efx->phy_op->get_settings(efx, ecmd);
1558 if (rc)
1559 EFX_ERR(efx, "could not back up PHY settings\n");
1560 1640
1561 efx_fini_channels(efx); 1641 efx_fini_channels(efx);
1562} 1642}
@@ -1581,10 +1661,11 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok)
1581 if (ok) { 1661 if (ok) {
1582 efx_init_channels(efx); 1662 efx_init_channels(efx);
1583 1663
1584 if (falcon_xmac_set_settings(efx, ecmd)) 1664 if (efx->phy_op->set_settings(efx, ecmd))
1585 EFX_ERR(efx, "could not restore PHY settings\n"); 1665 EFX_ERR(efx, "could not restore PHY settings\n");
1586 } 1666 }
1587 1667
1668 mutex_unlock(&efx->spi_lock);
1588 mutex_unlock(&efx->mac_lock); 1669 mutex_unlock(&efx->mac_lock);
1589 1670
1590 if (ok) { 1671 if (ok) {
@@ -1607,7 +1688,7 @@ static int efx_reset(struct efx_nic *efx)
1607{ 1688{
1608 struct ethtool_cmd ecmd; 1689 struct ethtool_cmd ecmd;
1609 enum reset_type method = efx->reset_pending; 1690 enum reset_type method = efx->reset_pending;
1610 int rc; 1691 int rc = 0;
1611 1692
1612 /* Serialise with kernel interfaces */ 1693 /* Serialise with kernel interfaces */
1613 rtnl_lock(); 1694 rtnl_lock();
@@ -1616,7 +1697,7 @@ static int efx_reset(struct efx_nic *efx)
1616 * flag set so that efx_pci_probe_main will be retried */ 1697 * flag set so that efx_pci_probe_main will be retried */
1617 if (efx->state != STATE_RUNNING) { 1698 if (efx->state != STATE_RUNNING) {
1618 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n"); 1699 EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
1619 goto unlock_rtnl; 1700 goto out_unlock;
1620 } 1701 }
1621 1702
1622 EFX_INFO(efx, "resetting (%d)\n", method); 1703 EFX_INFO(efx, "resetting (%d)\n", method);
@@ -1626,7 +1707,7 @@ static int efx_reset(struct efx_nic *efx)
1626 rc = falcon_reset_hw(efx, method); 1707 rc = falcon_reset_hw(efx, method);
1627 if (rc) { 1708 if (rc) {
1628 EFX_ERR(efx, "failed to reset hardware\n"); 1709 EFX_ERR(efx, "failed to reset hardware\n");
1629 goto fail; 1710 goto out_disable;
1630 } 1711 }
1631 1712
1632 /* Allow resets to be rescheduled. */ 1713 /* Allow resets to be rescheduled. */
@@ -1640,28 +1721,23 @@ static int efx_reset(struct efx_nic *efx)
1640 1721
1641 /* Leave device stopped if necessary */ 1722 /* Leave device stopped if necessary */
1642 if (method == RESET_TYPE_DISABLE) { 1723 if (method == RESET_TYPE_DISABLE) {
1724 efx_reset_up(efx, &ecmd, false);
1643 rc = -EIO; 1725 rc = -EIO;
1644 goto fail; 1726 } else {
1727 rc = efx_reset_up(efx, &ecmd, true);
1645 } 1728 }
1646 1729
1647 rc = efx_reset_up(efx, &ecmd, true); 1730out_disable:
1648 if (rc) 1731 if (rc) {
1649 goto disable; 1732 EFX_ERR(efx, "has been disabled\n");
1650 1733 efx->state = STATE_DISABLED;
1651 EFX_LOG(efx, "reset complete\n"); 1734 dev_close(efx->net_dev);
1652 unlock_rtnl: 1735 } else {
1653 rtnl_unlock(); 1736 EFX_LOG(efx, "reset complete\n");
1654 return 0; 1737 }
1655
1656 fail:
1657 efx_reset_up(efx, &ecmd, false);
1658 disable:
1659 EFX_ERR(efx, "has been disabled\n");
1660 efx->state = STATE_DISABLED;
1661 1738
1739out_unlock:
1662 rtnl_unlock(); 1740 rtnl_unlock();
1663 efx_unregister_netdev(efx);
1664 efx_fini_port(efx);
1665 return rc; 1741 return rc;
1666} 1742}
1667 1743
@@ -1709,7 +1785,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1709 1785
1710 efx->reset_pending = method; 1786 efx->reset_pending = method;
1711 1787
1712 queue_work(efx->reset_workqueue, &efx->reset_work); 1788 queue_work(reset_workqueue, &efx->reset_work);
1713} 1789}
1714 1790
1715/************************************************************************** 1791/**************************************************************************
@@ -1743,10 +1819,16 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
1743void efx_port_dummy_op_void(struct efx_nic *efx) {} 1819void efx_port_dummy_op_void(struct efx_nic *efx) {}
1744void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {} 1820void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {}
1745 1821
1822static struct efx_mac_operations efx_dummy_mac_operations = {
1823 .reconfigure = efx_port_dummy_op_void,
1824 .poll = efx_port_dummy_op_void,
1825 .irq = efx_port_dummy_op_void,
1826};
1827
1746static struct efx_phy_operations efx_dummy_phy_operations = { 1828static struct efx_phy_operations efx_dummy_phy_operations = {
1747 .init = efx_port_dummy_op_int, 1829 .init = efx_port_dummy_op_int,
1748 .reconfigure = efx_port_dummy_op_void, 1830 .reconfigure = efx_port_dummy_op_void,
1749 .check_hw = efx_port_dummy_op_int, 1831 .poll = efx_port_dummy_op_void,
1750 .fini = efx_port_dummy_op_void, 1832 .fini = efx_port_dummy_op_void,
1751 .clear_interrupt = efx_port_dummy_op_void, 1833 .clear_interrupt = efx_port_dummy_op_void,
1752}; 1834};
@@ -1755,6 +1837,7 @@ static struct efx_board efx_dummy_board_info = {
1755 .init = efx_port_dummy_op_int, 1837 .init = efx_port_dummy_op_int,
1756 .init_leds = efx_port_dummy_op_int, 1838 .init_leds = efx_port_dummy_op_int,
1757 .set_fault_led = efx_port_dummy_op_blink, 1839 .set_fault_led = efx_port_dummy_op_blink,
1840 .monitor = efx_port_dummy_op_int,
1758 .blink = efx_port_dummy_op_blink, 1841 .blink = efx_port_dummy_op_blink,
1759 .fini = efx_port_dummy_op_void, 1842 .fini = efx_port_dummy_op_void,
1760}; 1843};
@@ -1774,12 +1857,13 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1774 struct efx_channel *channel; 1857 struct efx_channel *channel;
1775 struct efx_tx_queue *tx_queue; 1858 struct efx_tx_queue *tx_queue;
1776 struct efx_rx_queue *rx_queue; 1859 struct efx_rx_queue *rx_queue;
1777 int i, rc; 1860 int i;
1778 1861
1779 /* Initialise common structures */ 1862 /* Initialise common structures */
1780 memset(efx, 0, sizeof(*efx)); 1863 memset(efx, 0, sizeof(*efx));
1781 spin_lock_init(&efx->biu_lock); 1864 spin_lock_init(&efx->biu_lock);
1782 spin_lock_init(&efx->phy_lock); 1865 spin_lock_init(&efx->phy_lock);
1866 mutex_init(&efx->spi_lock);
1783 INIT_WORK(&efx->reset_work, efx_reset_work); 1867 INIT_WORK(&efx->reset_work, efx_reset_work);
1784 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); 1868 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
1785 efx->pci_dev = pci_dev; 1869 efx->pci_dev = pci_dev;
@@ -1793,9 +1877,11 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1793 spin_lock_init(&efx->netif_stop_lock); 1877 spin_lock_init(&efx->netif_stop_lock);
1794 spin_lock_init(&efx->stats_lock); 1878 spin_lock_init(&efx->stats_lock);
1795 mutex_init(&efx->mac_lock); 1879 mutex_init(&efx->mac_lock);
1880 efx->mac_op = &efx_dummy_mac_operations;
1796 efx->phy_op = &efx_dummy_phy_operations; 1881 efx->phy_op = &efx_dummy_phy_operations;
1797 efx->mii.dev = net_dev; 1882 efx->mii.dev = net_dev;
1798 INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work); 1883 INIT_WORK(&efx->phy_work, efx_phy_work);
1884 INIT_WORK(&efx->mac_work, efx_mac_work);
1799 atomic_set(&efx->netif_stop_count, 1); 1885 atomic_set(&efx->netif_stop_count, 1);
1800 1886
1801 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 1887 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
@@ -1841,34 +1927,18 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1841 efx->interrupt_mode = max(efx->type->max_interrupt_mode, 1927 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
1842 interrupt_mode); 1928 interrupt_mode);
1843 1929
1844 efx->workqueue = create_singlethread_workqueue("sfc_work"); 1930 /* Would be good to use the net_dev name, but we're too early */
1845 if (!efx->workqueue) { 1931 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
1846 rc = -ENOMEM; 1932 pci_name(pci_dev));
1847 goto fail1; 1933 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
1848 } 1934 if (!efx->workqueue)
1849 1935 return -ENOMEM;
1850 efx->reset_workqueue = create_singlethread_workqueue("sfc_reset");
1851 if (!efx->reset_workqueue) {
1852 rc = -ENOMEM;
1853 goto fail2;
1854 }
1855 1936
1856 return 0; 1937 return 0;
1857
1858 fail2:
1859 destroy_workqueue(efx->workqueue);
1860 efx->workqueue = NULL;
1861
1862 fail1:
1863 return rc;
1864} 1938}
1865 1939
1866static void efx_fini_struct(struct efx_nic *efx) 1940static void efx_fini_struct(struct efx_nic *efx)
1867{ 1941{
1868 if (efx->reset_workqueue) {
1869 destroy_workqueue(efx->reset_workqueue);
1870 efx->reset_workqueue = NULL;
1871 }
1872 if (efx->workqueue) { 1942 if (efx->workqueue) {
1873 destroy_workqueue(efx->workqueue); 1943 destroy_workqueue(efx->workqueue);
1874 efx->workqueue = NULL; 1944 efx->workqueue = NULL;
@@ -1927,11 +1997,13 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
1927 1997
1928 efx_unregister_netdev(efx); 1998 efx_unregister_netdev(efx);
1929 1999
2000 efx_mtd_remove(efx);
2001
1930 /* Wait for any scheduled resets to complete. No more will be 2002 /* Wait for any scheduled resets to complete. No more will be
1931 * scheduled from this point because efx_stop_all() has been 2003 * scheduled from this point because efx_stop_all() has been
1932 * called, we are no longer registered with driverlink, and 2004 * called, we are no longer registered with driverlink, and
1933 * the net_device's have been removed. */ 2005 * the net_device's have been removed. */
1934 flush_workqueue(efx->reset_workqueue); 2006 cancel_work_sync(&efx->reset_work);
1935 2007
1936 efx_pci_remove_main(efx); 2008 efx_pci_remove_main(efx);
1937 2009
@@ -1992,6 +2064,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
1992 efx_fini_port(efx); 2064 efx_fini_port(efx);
1993 fail5: 2065 fail5:
1994 fail4: 2066 fail4:
2067 efx->board_info.fini(efx);
1995 fail3: 2068 fail3:
1996 efx_fini_napi(efx); 2069 efx_fini_napi(efx);
1997 fail2: 2070 fail2:
@@ -2045,14 +2118,23 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2045 * we're in STATE_INIT. */ 2118 * we're in STATE_INIT. */
2046 for (i = 0; i < 5; i++) { 2119 for (i = 0; i < 5; i++) {
2047 rc = efx_pci_probe_main(efx); 2120 rc = efx_pci_probe_main(efx);
2048 if (rc == 0)
2049 break;
2050 2121
2051 /* Serialise against efx_reset(). No more resets will be 2122 /* Serialise against efx_reset(). No more resets will be
2052 * scheduled since efx_stop_all() has been called, and we 2123 * scheduled since efx_stop_all() has been called, and we
2053 * have not and never have been registered with either 2124 * have not and never have been registered with either
2054 * the rtnetlink or driverlink layers. */ 2125 * the rtnetlink or driverlink layers. */
2055 flush_workqueue(efx->reset_workqueue); 2126 cancel_work_sync(&efx->reset_work);
2127
2128 if (rc == 0) {
2129 if (efx->reset_pending != RESET_TYPE_NONE) {
2130 /* If there was a scheduled reset during
2131 * probe, the NIC is probably hosed anyway */
2132 efx_pci_remove_main(efx);
2133 rc = -EIO;
2134 } else {
2135 break;
2136 }
2137 }
2056 2138
2057 /* Retry if a recoverably reset event has been scheduled */ 2139 /* Retry if a recoverably reset event has been scheduled */
2058 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) && 2140 if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
@@ -2070,16 +2152,15 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2070 /* Switch to the running state before we expose the device to 2152 /* Switch to the running state before we expose the device to
2071 * the OS. This is to ensure that the initial gathering of 2153 * the OS. This is to ensure that the initial gathering of
2072 * MAC stats succeeds. */ 2154 * MAC stats succeeds. */
2073 rtnl_lock();
2074 efx->state = STATE_RUNNING; 2155 efx->state = STATE_RUNNING;
2075 rtnl_unlock(); 2156
2157 efx_mtd_probe(efx); /* allowed to fail */
2076 2158
2077 rc = efx_register_netdev(efx); 2159 rc = efx_register_netdev(efx);
2078 if (rc) 2160 if (rc)
2079 goto fail5; 2161 goto fail5;
2080 2162
2081 EFX_LOG(efx, "initialisation successful\n"); 2163 EFX_LOG(efx, "initialisation successful\n");
2082
2083 return 0; 2164 return 0;
2084 2165
2085 fail5: 2166 fail5:
@@ -2127,6 +2208,11 @@ static int __init efx_init_module(void)
2127 rc = -ENOMEM; 2208 rc = -ENOMEM;
2128 goto err_refill; 2209 goto err_refill;
2129 } 2210 }
2211 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2212 if (!reset_workqueue) {
2213 rc = -ENOMEM;
2214 goto err_reset;
2215 }
2130 2216
2131 rc = pci_register_driver(&efx_pci_driver); 2217 rc = pci_register_driver(&efx_pci_driver);
2132 if (rc < 0) 2218 if (rc < 0)
@@ -2135,6 +2221,8 @@ static int __init efx_init_module(void)
2135 return 0; 2221 return 0;
2136 2222
2137 err_pci: 2223 err_pci:
2224 destroy_workqueue(reset_workqueue);
2225 err_reset:
2138 destroy_workqueue(refill_workqueue); 2226 destroy_workqueue(refill_workqueue);
2139 err_refill: 2227 err_refill:
2140 unregister_netdevice_notifier(&efx_netdev_notifier); 2228 unregister_netdevice_notifier(&efx_netdev_notifier);
@@ -2147,6 +2235,7 @@ static void __exit efx_exit_module(void)
2147 printk(KERN_INFO "Solarflare NET driver unloading\n"); 2235 printk(KERN_INFO "Solarflare NET driver unloading\n");
2148 2236
2149 pci_unregister_driver(&efx_pci_driver); 2237 pci_unregister_driver(&efx_pci_driver);
2238 destroy_workqueue(reset_workqueue);
2150 destroy_workqueue(refill_workqueue); 2239 destroy_workqueue(refill_workqueue);
2151 unregister_netdevice_notifier(&efx_netdev_notifier); 2240 unregister_netdevice_notifier(&efx_netdev_notifier);
2152 2241
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index d02937b70eee..0dd7a532c78a 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -58,6 +58,16 @@ extern int efx_port_dummy_op_int(struct efx_nic *efx);
58extern void efx_port_dummy_op_void(struct efx_nic *efx); 58extern void efx_port_dummy_op_void(struct efx_nic *efx);
59extern void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink); 59extern void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink);
60 60
61/* MTD */
62#ifdef CONFIG_SFC_MTD
63extern int efx_mtd_probe(struct efx_nic *efx);
64extern void efx_mtd_rename(struct efx_nic *efx);
65extern void efx_mtd_remove(struct efx_nic *efx);
66#else
67static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
68static inline void efx_mtd_rename(struct efx_nic *efx) {}
69static inline void efx_mtd_remove(struct efx_nic *efx) {}
70#endif
61 71
62extern unsigned int efx_monitor_interval; 72extern unsigned int efx_monitor_interval;
63 73
@@ -67,7 +77,7 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
67 channel->channel, raw_smp_processor_id()); 77 channel->channel, raw_smp_processor_id());
68 channel->work_pending = true; 78 channel->work_pending = true;
69 79
70 netif_rx_schedule(channel->napi_dev, &channel->napi_str); 80 netif_rx_schedule(&channel->napi_str);
71} 81}
72 82
73#endif /* EFX_EFX_H */ 83#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index cec15dbb88e4..60cbc6e1e66b 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc. 3 * Copyright 2007-2008 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -13,22 +13,24 @@
13/** 13/**
14 * enum efx_loopback_mode - loopback modes 14 * enum efx_loopback_mode - loopback modes
15 * @LOOPBACK_NONE: no loopback 15 * @LOOPBACK_NONE: no loopback
16 * @LOOPBACK_XGMII: loopback within MAC at XGMII level 16 * @LOOPBACK_GMAC: loopback within GMAC at unspecified level
17 * @LOOPBACK_XGXS: loopback within MAC at XGXS level 17 * @LOOPBACK_XGMII: loopback within XMAC at XGMII level
18 * @LOOPBACK_XAUI: loopback within MAC at XAUI level 18 * @LOOPBACK_XGXS: loopback within XMAC at XGXS level
19 * @LOOPBACK_PHYXS: loopback within PHY at PHYXS level 19 * @LOOPBACK_XAUI: loopback within XMAC at XAUI level
20 * @LOOPBACK_PCS: loopback within PHY at PCS level 20 * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
21 * @LOOPBACK_PMAPMD: loopback within PHY at PMAPMD level 21 * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
22 * @LOOPBACK_PCS: loopback within 10G PHY at PCS level
23 * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
22 * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!) 24 * @LOOPBACK_NETWORK: reflecting loopback (even further than furthest!)
23 */ 25 */
24/* Please keep in order and up-to-date w.r.t the following two #defines */ 26/* Please keep in order and up-to-date w.r.t the following two #defines */
25enum efx_loopback_mode { 27enum efx_loopback_mode {
26 LOOPBACK_NONE = 0, 28 LOOPBACK_NONE = 0,
27 LOOPBACK_MAC = 1, 29 LOOPBACK_GMAC = 1,
28 LOOPBACK_XGMII = 2, 30 LOOPBACK_XGMII = 2,
29 LOOPBACK_XGXS = 3, 31 LOOPBACK_XGXS = 3,
30 LOOPBACK_XAUI = 4, 32 LOOPBACK_XAUI = 4,
31 LOOPBACK_PHY = 5, 33 LOOPBACK_GPHY = 5,
32 LOOPBACK_PHYXS = 6, 34 LOOPBACK_PHYXS = 6,
33 LOOPBACK_PCS = 7, 35 LOOPBACK_PCS = 7,
34 LOOPBACK_PMAPMD = 8, 36 LOOPBACK_PMAPMD = 8,
@@ -45,15 +47,19 @@ extern const char *efx_loopback_mode_names[];
45 LOOPBACK_MODE_NAME(efx->loopback_mode) 47 LOOPBACK_MODE_NAME(efx->loopback_mode)
46 48
47/* These loopbacks occur within the controller */ 49/* These loopbacks occur within the controller */
48#define LOOPBACKS_10G_INTERNAL ((1 << LOOPBACK_XGMII)| \ 50#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_GMAC) | \
49 (1 << LOOPBACK_XGXS) | \ 51 (1 << LOOPBACK_XGMII)| \
50 (1 << LOOPBACK_XAUI)) 52 (1 << LOOPBACK_XGXS) | \
53 (1 << LOOPBACK_XAUI))
51 54
52#define LOOPBACK_MASK(_efx) \ 55#define LOOPBACK_MASK(_efx) \
53 (1 << (_efx)->loopback_mode) 56 (1 << (_efx)->loopback_mode)
54 57
55#define LOOPBACK_INTERNAL(_efx) \ 58#define LOOPBACK_INTERNAL(_efx) \
56 (!!(LOOPBACKS_10G_INTERNAL & LOOPBACK_MASK(_efx))) 59 (!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
60
61#define LOOPBACK_CHANGED(_from, _to, _mask) \
62 (!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
57 63
58#define LOOPBACK_OUT_OF(_from, _to, _mask) \ 64#define LOOPBACK_OUT_OF(_from, _to, _mask) \
59 ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask))) 65 ((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
@@ -72,7 +78,7 @@ extern const char *efx_loopback_mode_names[];
72 * @RESET_TYPE_ALL: reset everything but PCI core blocks 78 * @RESET_TYPE_ALL: reset everything but PCI core blocks
73 * @RESET_TYPE_WORLD: reset everything, save & restore PCI config 79 * @RESET_TYPE_WORLD: reset everything, save & restore PCI config
74 * @RESET_TYPE_DISABLE: disable NIC 80 * @RESET_TYPE_DISABLE: disable NIC
75 * @RESET_TYPE_MONITOR: reset due to hardware monitor 81 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
76 * @RESET_TYPE_INT_ERROR: reset due to internal error 82 * @RESET_TYPE_INT_ERROR: reset due to internal error
77 * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors 83 * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
78 * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch 84 * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
@@ -86,7 +92,7 @@ enum reset_type {
86 RESET_TYPE_WORLD = 2, 92 RESET_TYPE_WORLD = 2,
87 RESET_TYPE_DISABLE = 3, 93 RESET_TYPE_DISABLE = 3,
88 RESET_TYPE_MAX_METHOD, 94 RESET_TYPE_MAX_METHOD,
89 RESET_TYPE_MONITOR, 95 RESET_TYPE_TX_WATCHDOG,
90 RESET_TYPE_INT_ERROR, 96 RESET_TYPE_INT_ERROR,
91 RESET_TYPE_RX_RECOVERY, 97 RESET_TYPE_RX_RECOVERY,
92 RESET_TYPE_RX_DESC_FETCH, 98 RESET_TYPE_RX_DESC_FETCH,
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index cd0d0873d978..53d259e90187 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -12,24 +12,24 @@
12#include <linux/ethtool.h> 12#include <linux/ethtool.h>
13#include <linux/rtnetlink.h> 13#include <linux/rtnetlink.h>
14#include "net_driver.h" 14#include "net_driver.h"
15#include "workarounds.h"
15#include "selftest.h" 16#include "selftest.h"
16#include "efx.h" 17#include "efx.h"
17#include "ethtool.h" 18#include "ethtool.h"
18#include "falcon.h" 19#include "falcon.h"
19#include "gmii.h"
20#include "spi.h" 20#include "spi.h"
21#include "mac.h" 21#include "mdio_10g.h"
22 22
23const char *efx_loopback_mode_names[] = { 23const char *efx_loopback_mode_names[] = {
24 [LOOPBACK_NONE] = "NONE", 24 [LOOPBACK_NONE] = "NONE",
25 [LOOPBACK_MAC] = "MAC", 25 [LOOPBACK_GMAC] = "GMAC",
26 [LOOPBACK_XGMII] = "XGMII", 26 [LOOPBACK_XGMII] = "XGMII",
27 [LOOPBACK_XGXS] = "XGXS", 27 [LOOPBACK_XGXS] = "XGXS",
28 [LOOPBACK_XAUI] = "XAUI", 28 [LOOPBACK_XAUI] = "XAUI",
29 [LOOPBACK_PHY] = "PHY", 29 [LOOPBACK_GPHY] = "GPHY",
30 [LOOPBACK_PHYXS] = "PHY(XS)", 30 [LOOPBACK_PHYXS] = "PHYXS",
31 [LOOPBACK_PCS] = "PHY(PCS)", 31 [LOOPBACK_PCS] = "PCS",
32 [LOOPBACK_PMAPMD] = "PHY(PMAPMD)", 32 [LOOPBACK_PMAPMD] = "PMA/PMD",
33 [LOOPBACK_NETWORK] = "NETWORK", 33 [LOOPBACK_NETWORK] = "NETWORK",
34}; 34};
35 35
@@ -172,10 +172,7 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
172/* Number of ethtool statistics */ 172/* Number of ethtool statistics */
173#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats) 173#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
174 174
175/* EEPROM range with gPXE configuration */
176#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB 175#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
177#define EFX_ETHTOOL_EEPROM_MIN 0x800U
178#define EFX_ETHTOOL_EEPROM_MAX 0x1800U
179 176
180/************************************************************************** 177/**************************************************************************
181 * 178 *
@@ -185,12 +182,16 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
185 */ 182 */
186 183
187/* Identify device by flashing LEDs */ 184/* Identify device by flashing LEDs */
188static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds) 185static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count)
189{ 186{
190 struct efx_nic *efx = netdev_priv(net_dev); 187 struct efx_nic *efx = netdev_priv(net_dev);
191 188
192 efx->board_info.blink(efx, 1); 189 efx->board_info.blink(efx, 1);
193 schedule_timeout_interruptible(seconds * HZ); 190 set_current_state(TASK_INTERRUPTIBLE);
191 if (count)
192 schedule_timeout(count * HZ);
193 else
194 schedule();
194 efx->board_info.blink(efx, 0); 195 efx->board_info.blink(efx, 0);
195 return 0; 196 return 0;
196} 197}
@@ -200,13 +201,15 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
200 struct ethtool_cmd *ecmd) 201 struct ethtool_cmd *ecmd)
201{ 202{
202 struct efx_nic *efx = netdev_priv(net_dev); 203 struct efx_nic *efx = netdev_priv(net_dev);
203 int rc;
204 204
205 mutex_lock(&efx->mac_lock); 205 mutex_lock(&efx->mac_lock);
206 rc = falcon_xmac_get_settings(efx, ecmd); 206 efx->phy_op->get_settings(efx, ecmd);
207 mutex_unlock(&efx->mac_lock); 207 mutex_unlock(&efx->mac_lock);
208 208
209 return rc; 209 /* Falcon GMAC does not support 1000Mbps HD */
210 ecmd->supported &= ~SUPPORTED_1000baseT_Half;
211
212 return 0;
210} 213}
211 214
212/* This must be called with rtnl_lock held. */ 215/* This must be called with rtnl_lock held. */
@@ -216,8 +219,18 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
216 struct efx_nic *efx = netdev_priv(net_dev); 219 struct efx_nic *efx = netdev_priv(net_dev);
217 int rc; 220 int rc;
218 221
222 if (EFX_WORKAROUND_13963(efx) && !ecmd->autoneg)
223 return -EINVAL;
224
225 /* Falcon GMAC does not support 1000Mbps HD */
226 if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
227 EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
228 " setting\n");
229 return -EINVAL;
230 }
231
219 mutex_lock(&efx->mac_lock); 232 mutex_lock(&efx->mac_lock);
220 rc = falcon_xmac_set_settings(efx, ecmd); 233 rc = efx->phy_op->set_settings(efx, ecmd);
221 mutex_unlock(&efx->mac_lock); 234 mutex_unlock(&efx->mac_lock);
222 if (!rc) 235 if (!rc)
223 efx_reconfigure_port(efx); 236 efx_reconfigure_port(efx);
@@ -241,10 +254,10 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
241 * @strings: Ethtool strings, or %NULL 254 * @strings: Ethtool strings, or %NULL
242 * @data: Ethtool test results, or %NULL 255 * @data: Ethtool test results, or %NULL
243 * @test: Pointer to test result (used only if data != %NULL) 256 * @test: Pointer to test result (used only if data != %NULL)
244 * @unit_format: Unit name format (e.g. "channel\%d") 257 * @unit_format: Unit name format (e.g. "chan\%d")
245 * @unit_id: Unit id (e.g. 0 for "channel0") 258 * @unit_id: Unit id (e.g. 0 for "chan0")
246 * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") 259 * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
247 * @test_id: Test id (e.g. "PHY" for "loopback.PHY.tx_sent") 260 * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
248 * 261 *
249 * Fill in an individual self-test entry. 262 * Fill in an individual self-test entry.
250 */ 263 */
@@ -261,18 +274,20 @@ static void efx_fill_test(unsigned int test_index,
261 274
262 /* Fill string, if applicable */ 275 /* Fill string, if applicable */
263 if (strings) { 276 if (strings) {
264 snprintf(unit_str.name, sizeof(unit_str.name), 277 if (strchr(unit_format, '%'))
265 unit_format, unit_id); 278 snprintf(unit_str.name, sizeof(unit_str.name),
279 unit_format, unit_id);
280 else
281 strcpy(unit_str.name, unit_format);
266 snprintf(test_str.name, sizeof(test_str.name), 282 snprintf(test_str.name, sizeof(test_str.name),
267 test_format, test_id); 283 test_format, test_id);
268 snprintf(strings[test_index].name, 284 snprintf(strings[test_index].name,
269 sizeof(strings[test_index].name), 285 sizeof(strings[test_index].name),
270 "%-9s%-17s", unit_str.name, test_str.name); 286 "%-6s %-24s", unit_str.name, test_str.name);
271 } 287 }
272} 288}
273 289
274#define EFX_PORT_NAME "port%d", 0 290#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
275#define EFX_CHANNEL_NAME(_channel) "channel%d", _channel->channel
276#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue 291#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
277#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue 292#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
278#define EFX_LOOPBACK_NAME(_mode, _counter) \ 293#define EFX_LOOPBACK_NAME(_mode, _counter) \
@@ -307,11 +322,11 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
307 } 322 }
308 efx_fill_test(test_index++, strings, data, 323 efx_fill_test(test_index++, strings, data,
309 &lb_tests->rx_good, 324 &lb_tests->rx_good,
310 EFX_PORT_NAME, 325 "rx", 0,
311 EFX_LOOPBACK_NAME(mode, "rx_good")); 326 EFX_LOOPBACK_NAME(mode, "rx_good"));
312 efx_fill_test(test_index++, strings, data, 327 efx_fill_test(test_index++, strings, data,
313 &lb_tests->rx_bad, 328 &lb_tests->rx_bad,
314 EFX_PORT_NAME, 329 "rx", 0,
315 EFX_LOOPBACK_NAME(mode, "rx_bad")); 330 EFX_LOOPBACK_NAME(mode, "rx_bad"));
316 331
317 return test_index; 332 return test_index;
@@ -330,7 +345,7 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
330 u64 *data) 345 u64 *data)
331{ 346{
332 struct efx_channel *channel; 347 struct efx_channel *channel;
333 unsigned int n = 0; 348 unsigned int n = 0, i;
334 enum efx_loopback_mode mode; 349 enum efx_loopback_mode mode;
335 350
336 efx_fill_test(n++, strings, data, &tests->mii, 351 efx_fill_test(n++, strings, data, &tests->mii,
@@ -358,14 +373,12 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
358 373
359 efx_fill_test(n++, strings, data, &tests->registers, 374 efx_fill_test(n++, strings, data, &tests->registers,
360 "core", 0, "registers", NULL); 375 "core", 0, "registers", NULL);
361 efx_fill_test(n++, strings, data, &tests->phy, 376
362 EFX_PORT_NAME, "phy", NULL); 377 for (i = 0; i < efx->phy_op->num_tests; i++)
378 efx_fill_test(n++, strings, data, &tests->phy[i],
379 "phy", 0, efx->phy_op->test_names[i], NULL);
363 380
364 /* Loopback tests */ 381 /* Loopback tests */
365 efx_fill_test(n++, strings, data, &tests->loopback_speed,
366 EFX_PORT_NAME, "loopback.speed", NULL);
367 efx_fill_test(n++, strings, data, &tests->loopback_full_duplex,
368 EFX_PORT_NAME, "loopback.full_duplex", NULL);
369 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { 382 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
370 if (!(efx->loopback_modes & (1 << mode))) 383 if (!(efx->loopback_modes & (1 << mode)))
371 continue; 384 continue;
@@ -429,7 +442,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
429 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); 442 EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
430 443
431 /* Update MAC and NIC statistics */ 444 /* Update MAC and NIC statistics */
432 net_dev->get_stats(net_dev); 445 dev_get_stats(net_dev);
433 446
434 /* Fill detailed statistics buffer */ 447 /* Fill detailed statistics buffer */
435 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { 448 for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
@@ -476,7 +489,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
476{ 489{
477 struct efx_nic *efx = netdev_priv(net_dev); 490 struct efx_nic *efx = netdev_priv(net_dev);
478 struct efx_self_tests efx_tests; 491 struct efx_self_tests efx_tests;
479 int offline, already_up; 492 int already_up;
480 int rc; 493 int rc;
481 494
482 ASSERT_RTNL(); 495 ASSERT_RTNL();
@@ -496,24 +509,15 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
496 } 509 }
497 510
498 memset(&efx_tests, 0, sizeof(efx_tests)); 511 memset(&efx_tests, 0, sizeof(efx_tests));
499 offline = (test->flags & ETH_TEST_FL_OFFLINE);
500
501 /* Perform online self tests first */
502 rc = efx_online_test(efx, &efx_tests);
503 if (rc)
504 goto out;
505 512
506 /* Perform offline tests only if online tests passed */ 513 rc = efx_selftest(efx, &efx_tests, test->flags);
507 if (offline)
508 rc = efx_offline_test(efx, &efx_tests,
509 efx->loopback_modes);
510 514
511 out:
512 if (!already_up) 515 if (!already_up)
513 dev_close(efx->net_dev); 516 dev_close(efx->net_dev);
514 517
515 EFX_LOG(efx, "%s all %sline self-tests\n", 518 EFX_LOG(efx, "%s %sline self-tests\n",
516 rc == 0 ? "passed" : "failed", offline ? "off" : "on"); 519 rc == 0 ? "passed" : "failed",
520 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
517 521
518 fail2: 522 fail2:
519 fail1: 523 fail1:
@@ -545,8 +549,8 @@ static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
545 549
546 if (!spi) 550 if (!spi)
547 return 0; 551 return 0;
548 return min(spi->size, EFX_ETHTOOL_EEPROM_MAX) - 552 return min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
549 min(spi->size, EFX_ETHTOOL_EEPROM_MIN); 553 min(spi->size, EFX_EEPROM_BOOTCONFIG_START);
550} 554}
551 555
552static int efx_ethtool_get_eeprom(struct net_device *net_dev, 556static int efx_ethtool_get_eeprom(struct net_device *net_dev,
@@ -557,8 +561,13 @@ static int efx_ethtool_get_eeprom(struct net_device *net_dev,
557 size_t len; 561 size_t len;
558 int rc; 562 int rc;
559 563
560 rc = falcon_spi_read(spi, eeprom->offset + EFX_ETHTOOL_EEPROM_MIN, 564 rc = mutex_lock_interruptible(&efx->spi_lock);
565 if (rc)
566 return rc;
567 rc = falcon_spi_read(spi, eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
561 eeprom->len, &len, buf); 568 eeprom->len, &len, buf);
569 mutex_unlock(&efx->spi_lock);
570
562 eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC; 571 eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
563 eeprom->len = len; 572 eeprom->len = len;
564 return rc; 573 return rc;
@@ -575,8 +584,13 @@ static int efx_ethtool_set_eeprom(struct net_device *net_dev,
575 if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC) 584 if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
576 return -EINVAL; 585 return -EINVAL;
577 586
578 rc = falcon_spi_write(spi, eeprom->offset + EFX_ETHTOOL_EEPROM_MIN, 587 rc = mutex_lock_interruptible(&efx->spi_lock);
588 if (rc)
589 return rc;
590 rc = falcon_spi_write(spi, eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
579 eeprom->len, &len, buf); 591 eeprom->len, &len, buf);
592 mutex_unlock(&efx->spi_lock);
593
580 eeprom->len = len; 594 eeprom->len = len;
581 return rc; 595 return rc;
582} 596}
@@ -666,23 +680,52 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
666 struct ethtool_pauseparam *pause) 680 struct ethtool_pauseparam *pause)
667{ 681{
668 struct efx_nic *efx = netdev_priv(net_dev); 682 struct efx_nic *efx = netdev_priv(net_dev);
669 enum efx_fc_type flow_control = efx->flow_control; 683 enum efx_fc_type wanted_fc;
670 int rc; 684 bool reset;
671 685
672 flow_control &= ~(EFX_FC_RX | EFX_FC_TX | EFX_FC_AUTO); 686 wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
673 flow_control |= pause->rx_pause ? EFX_FC_RX : 0; 687 (pause->tx_pause ? EFX_FC_TX : 0) |
674 flow_control |= pause->tx_pause ? EFX_FC_TX : 0; 688 (pause->autoneg ? EFX_FC_AUTO : 0));
675 flow_control |= pause->autoneg ? EFX_FC_AUTO : 0; 689
690 if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
691 EFX_LOG(efx, "Flow control unsupported: tx ON rx OFF\n");
692 return -EINVAL;
693 }
694
695 if (!(efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_AN)) &&
696 (wanted_fc & EFX_FC_AUTO)) {
697 EFX_LOG(efx, "PHY does not support flow control "
698 "autonegotiation\n");
699 return -EINVAL;
700 }
701
702 /* TX flow control may automatically turn itself off if the
703 * link partner (intermittently) stops responding to pause
704 * frames. There isn't any indication that this has happened,
705 * so the best we do is leave it up to the user to spot this
706 * and fix it be cycling transmit flow control on this end. */
707 reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
708 if (EFX_WORKAROUND_11482(efx) && reset) {
709 if (falcon_rev(efx) >= FALCON_REV_B0) {
710 /* Recover by resetting the EM block */
711 if (efx->link_up)
712 falcon_drain_tx_fifo(efx);
713 } else {
714 /* Schedule a reset to recover */
715 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
716 }
717 }
676 718
677 /* Try to push the pause parameters */ 719 /* Try to push the pause parameters */
678 mutex_lock(&efx->mac_lock); 720 mutex_lock(&efx->mac_lock);
679 rc = falcon_xmac_set_pause(efx, flow_control);
680 mutex_unlock(&efx->mac_lock);
681 721
682 if (!rc) 722 efx->wanted_fc = wanted_fc;
683 efx_reconfigure_port(efx); 723 mdio_clause45_set_pause(efx);
724 __efx_reconfigure_port(efx);
684 725
685 return rc; 726 mutex_unlock(&efx->mac_lock);
727
728 return 0;
686} 729}
687 730
688static void efx_ethtool_get_pauseparam(struct net_device *net_dev, 731static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
@@ -690,9 +733,9 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
690{ 733{
691 struct efx_nic *efx = netdev_priv(net_dev); 734 struct efx_nic *efx = netdev_priv(net_dev);
692 735
693 pause->rx_pause = !!(efx->flow_control & EFX_FC_RX); 736 pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
694 pause->tx_pause = !!(efx->flow_control & EFX_FC_TX); 737 pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
695 pause->autoneg = !!(efx->flow_control & EFX_FC_AUTO); 738 pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
696} 739}
697 740
698 741
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 31ed1f49de00..6884dc8c1f82 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -15,11 +15,11 @@
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/i2c.h> 16#include <linux/i2c.h>
17#include <linux/i2c-algo-bit.h> 17#include <linux/i2c-algo-bit.h>
18#include <linux/mii.h>
18#include "net_driver.h" 19#include "net_driver.h"
19#include "bitfield.h" 20#include "bitfield.h"
20#include "efx.h" 21#include "efx.h"
21#include "mac.h" 22#include "mac.h"
22#include "gmii.h"
23#include "spi.h" 23#include "spi.h"
24#include "falcon.h" 24#include "falcon.h"
25#include "falcon_hwdefs.h" 25#include "falcon_hwdefs.h"
@@ -70,6 +70,20 @@ static int disable_dma_stats;
70#define RX_DC_ENTRIES_ORDER 2 70#define RX_DC_ENTRIES_ORDER 2
71#define RX_DC_BASE 0x100000 71#define RX_DC_BASE 0x100000
72 72
73static const unsigned int
74/* "Large" EEPROM device: Atmel AT25640 or similar
75 * 8 KB, 16-bit address, 32 B write block */
76large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
77 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
78 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
79/* Default flash device: Atmel AT25F1024
80 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
81default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
82 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
83 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
84 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
85 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
86
73/* RX FIFO XOFF watermark 87/* RX FIFO XOFF watermark
74 * 88 *
75 * When the amount of the RX FIFO increases used increases past this 89 * When the amount of the RX FIFO increases used increases past this
@@ -770,15 +784,18 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
770 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 784 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
771 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 785 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
772 786
773 /* Count errors that are not in MAC stats. */ 787 /* Count errors that are not in MAC stats. Ignore expected
788 * checksum errors during self-test. */
774 if (rx_ev_frm_trunc) 789 if (rx_ev_frm_trunc)
775 ++rx_queue->channel->n_rx_frm_trunc; 790 ++rx_queue->channel->n_rx_frm_trunc;
776 else if (rx_ev_tobe_disc) 791 else if (rx_ev_tobe_disc)
777 ++rx_queue->channel->n_rx_tobe_disc; 792 ++rx_queue->channel->n_rx_tobe_disc;
778 else if (rx_ev_ip_hdr_chksum_err) 793 else if (!efx->loopback_selftest) {
779 ++rx_queue->channel->n_rx_ip_hdr_chksum_err; 794 if (rx_ev_ip_hdr_chksum_err)
780 else if (rx_ev_tcp_udp_chksum_err) 795 ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
781 ++rx_queue->channel->n_rx_tcp_udp_chksum_err; 796 else if (rx_ev_tcp_udp_chksum_err)
797 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
798 }
782 if (rx_ev_ip_frag_err) 799 if (rx_ev_ip_frag_err)
783 ++rx_queue->channel->n_rx_ip_frag_err; 800 ++rx_queue->channel->n_rx_ip_frag_err;
784 801
@@ -809,7 +826,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
809#endif 826#endif
810 827
811 if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) && 828 if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
812 efx->phy_type == PHY_TYPE_10XPRESS)) 829 efx->phy_type == PHY_TYPE_SFX7101))
813 tenxpress_crc_err(efx); 830 tenxpress_crc_err(efx);
814} 831}
815 832
@@ -893,22 +910,20 @@ static void falcon_handle_global_event(struct efx_channel *channel,
893 efx_qword_t *event) 910 efx_qword_t *event)
894{ 911{
895 struct efx_nic *efx = channel->efx; 912 struct efx_nic *efx = channel->efx;
896 bool is_phy_event = false, handled = false; 913 bool handled = false;
897 914
898 /* Check for interrupt on either port. Some boards have a
899 * single PHY wired to the interrupt line for port 1. */
900 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || 915 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
901 EFX_QWORD_FIELD(*event, G_PHY1_INTR) || 916 EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
902 EFX_QWORD_FIELD(*event, XG_PHY_INTR)) 917 EFX_QWORD_FIELD(*event, XG_PHY_INTR) ||
903 is_phy_event = true; 918 EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) {
919 efx->phy_op->clear_interrupt(efx);
920 queue_work(efx->workqueue, &efx->phy_work);
921 handled = true;
922 }
904 923
905 if ((falcon_rev(efx) >= FALCON_REV_B0) && 924 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
906 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) 925 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) {
907 is_phy_event = true; 926 queue_work(efx->workqueue, &efx->mac_work);
908
909 if (is_phy_event) {
910 efx->phy_op->clear_interrupt(efx);
911 queue_work(efx->workqueue, &efx->reconfigure_work);
912 handled = true; 927 handled = true;
913 } 928 }
914 929
@@ -1151,6 +1166,19 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1151 falcon_generate_event(channel, &test_event); 1166 falcon_generate_event(channel, &test_event);
1152} 1167}
1153 1168
1169void falcon_sim_phy_event(struct efx_nic *efx)
1170{
1171 efx_qword_t phy_event;
1172
1173 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE);
1174 if (EFX_IS10G(efx))
1175 EFX_SET_OWORD_FIELD(phy_event, XG_PHY_INTR, 1);
1176 else
1177 EFX_SET_OWORD_FIELD(phy_event, G_PHY0_INTR, 1);
1178
1179 falcon_generate_event(&efx->channel[0], &phy_event);
1180}
1181
1154/************************************************************************** 1182/**************************************************************************
1155 * 1183 *
1156 * Flush handling 1184 * Flush handling
@@ -1560,7 +1588,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1560 efx_for_each_channel(channel, efx) { 1588 efx_for_each_channel(channel, efx) {
1561 rc = request_irq(channel->irq, falcon_msi_interrupt, 1589 rc = request_irq(channel->irq, falcon_msi_interrupt,
1562 IRQF_PROBE_SHARED, /* Not shared */ 1590 IRQF_PROBE_SHARED, /* Not shared */
1563 efx->name, channel); 1591 channel->name, channel);
1564 if (rc) { 1592 if (rc) {
1565 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq); 1593 EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
1566 goto fail2; 1594 goto fail2;
@@ -1605,32 +1633,45 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1605 ************************************************************************** 1633 **************************************************************************
1606 */ 1634 */
1607 1635
1608#define FALCON_SPI_MAX_LEN ((unsigned) sizeof(efx_oword_t)) 1636#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
1637
1638static int falcon_spi_poll(struct efx_nic *efx)
1639{
1640 efx_oword_t reg;
1641 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
1642 return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1643}
1609 1644
1610/* Wait for SPI command completion */ 1645/* Wait for SPI command completion */
1611static int falcon_spi_wait(struct efx_nic *efx) 1646static int falcon_spi_wait(struct efx_nic *efx)
1612{ 1647{
1613 unsigned long timeout = jiffies + DIV_ROUND_UP(HZ, 10); 1648 /* Most commands will finish quickly, so we start polling at
1614 efx_oword_t reg; 1649 * very short intervals. Sometimes the command may have to
1615 bool cmd_en, timer_active; 1650 * wait for VPD or expansion ROM access outside of our
1651 * control, so we allow up to 100 ms. */
1652 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
1653 int i;
1654
1655 for (i = 0; i < 10; i++) {
1656 if (!falcon_spi_poll(efx))
1657 return 0;
1658 udelay(10);
1659 }
1616 1660
1617 for (;;) { 1661 for (;;) {
1618 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER); 1662 if (!falcon_spi_poll(efx))
1619 cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
1620 timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
1621 if (!cmd_en && !timer_active)
1622 return 0; 1663 return 0;
1623 if (time_after_eq(jiffies, timeout)) { 1664 if (time_after_eq(jiffies, timeout)) {
1624 EFX_ERR(efx, "timed out waiting for SPI\n"); 1665 EFX_ERR(efx, "timed out waiting for SPI\n");
1625 return -ETIMEDOUT; 1666 return -ETIMEDOUT;
1626 } 1667 }
1627 cpu_relax(); 1668 schedule_timeout_uninterruptible(1);
1628 } 1669 }
1629} 1670}
1630 1671
1631static int falcon_spi_cmd(const struct efx_spi_device *spi, 1672int falcon_spi_cmd(const struct efx_spi_device *spi,
1632 unsigned int command, int address, 1673 unsigned int command, int address,
1633 const void *in, void *out, unsigned int len) 1674 const void *in, void *out, size_t len)
1634{ 1675{
1635 struct efx_nic *efx = spi->efx; 1676 struct efx_nic *efx = spi->efx;
1636 bool addressed = (address >= 0); 1677 bool addressed = (address >= 0);
@@ -1641,9 +1682,10 @@ static int falcon_spi_cmd(const struct efx_spi_device *spi,
1641 /* Input validation */ 1682 /* Input validation */
1642 if (len > FALCON_SPI_MAX_LEN) 1683 if (len > FALCON_SPI_MAX_LEN)
1643 return -EINVAL; 1684 return -EINVAL;
1685 BUG_ON(!mutex_is_locked(&efx->spi_lock));
1644 1686
1645 /* Check SPI not currently being accessed */ 1687 /* Check that previous command is not still running */
1646 rc = falcon_spi_wait(efx); 1688 rc = falcon_spi_poll(efx);
1647 if (rc) 1689 if (rc)
1648 return rc; 1690 return rc;
1649 1691
@@ -1685,8 +1727,8 @@ static int falcon_spi_cmd(const struct efx_spi_device *spi,
1685 return 0; 1727 return 0;
1686} 1728}
1687 1729
1688static unsigned int 1730static size_t
1689falcon_spi_write_limit(const struct efx_spi_device *spi, unsigned int start) 1731falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
1690{ 1732{
1691 return min(FALCON_SPI_MAX_LEN, 1733 return min(FALCON_SPI_MAX_LEN,
1692 (spi->block_size - (start & (spi->block_size - 1)))); 1734 (spi->block_size - (start & (spi->block_size - 1))));
@@ -1699,38 +1741,40 @@ efx_spi_munge_command(const struct efx_spi_device *spi,
1699 return command | (((address >> 8) & spi->munge_address) << 3); 1741 return command | (((address >> 8) & spi->munge_address) << 3);
1700} 1742}
1701 1743
1702 1744/* Wait up to 10 ms for buffered write completion */
1703static int falcon_spi_fast_wait(const struct efx_spi_device *spi) 1745int falcon_spi_wait_write(const struct efx_spi_device *spi)
1704{ 1746{
1747 struct efx_nic *efx = spi->efx;
1748 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
1705 u8 status; 1749 u8 status;
1706 int i, rc; 1750 int rc;
1707
1708 /* Wait up to 1000us for flash/EEPROM to finish a fast operation. */
1709 for (i = 0; i < 50; i++) {
1710 udelay(20);
1711 1751
1752 for (;;) {
1712 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL, 1753 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
1713 &status, sizeof(status)); 1754 &status, sizeof(status));
1714 if (rc) 1755 if (rc)
1715 return rc; 1756 return rc;
1716 if (!(status & SPI_STATUS_NRDY)) 1757 if (!(status & SPI_STATUS_NRDY))
1717 return 0; 1758 return 0;
1759 if (time_after_eq(jiffies, timeout)) {
1760 EFX_ERR(efx, "SPI write timeout on device %d"
1761 " last status=0x%02x\n",
1762 spi->device_id, status);
1763 return -ETIMEDOUT;
1764 }
1765 schedule_timeout_uninterruptible(1);
1718 } 1766 }
1719 EFX_ERR(spi->efx,
1720 "timed out waiting for device %d last status=0x%02x\n",
1721 spi->device_id, status);
1722 return -ETIMEDOUT;
1723} 1767}
1724 1768
1725int falcon_spi_read(const struct efx_spi_device *spi, loff_t start, 1769int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
1726 size_t len, size_t *retlen, u8 *buffer) 1770 size_t len, size_t *retlen, u8 *buffer)
1727{ 1771{
1728 unsigned int command, block_len, pos = 0; 1772 size_t block_len, pos = 0;
1773 unsigned int command;
1729 int rc = 0; 1774 int rc = 0;
1730 1775
1731 while (pos < len) { 1776 while (pos < len) {
1732 block_len = min((unsigned int)len - pos, 1777 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
1733 FALCON_SPI_MAX_LEN);
1734 1778
1735 command = efx_spi_munge_command(spi, SPI_READ, start + pos); 1779 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
1736 rc = falcon_spi_cmd(spi, command, start + pos, NULL, 1780 rc = falcon_spi_cmd(spi, command, start + pos, NULL,
@@ -1756,7 +1800,8 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1756 size_t len, size_t *retlen, const u8 *buffer) 1800 size_t len, size_t *retlen, const u8 *buffer)
1757{ 1801{
1758 u8 verify_buffer[FALCON_SPI_MAX_LEN]; 1802 u8 verify_buffer[FALCON_SPI_MAX_LEN];
1759 unsigned int command, block_len, pos = 0; 1803 size_t block_len, pos = 0;
1804 unsigned int command;
1760 int rc = 0; 1805 int rc = 0;
1761 1806
1762 while (pos < len) { 1807 while (pos < len) {
@@ -1764,7 +1809,7 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1764 if (rc) 1809 if (rc)
1765 break; 1810 break;
1766 1811
1767 block_len = min((unsigned int)len - pos, 1812 block_len = min(len - pos,
1768 falcon_spi_write_limit(spi, start + pos)); 1813 falcon_spi_write_limit(spi, start + pos));
1769 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); 1814 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
1770 rc = falcon_spi_cmd(spi, command, start + pos, 1815 rc = falcon_spi_cmd(spi, command, start + pos,
@@ -1772,7 +1817,7 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1772 if (rc) 1817 if (rc)
1773 break; 1818 break;
1774 1819
1775 rc = falcon_spi_fast_wait(spi); 1820 rc = falcon_spi_wait_write(spi);
1776 if (rc) 1821 if (rc)
1777 break; 1822 break;
1778 1823
@@ -1805,40 +1850,61 @@ int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
1805 * 1850 *
1806 ************************************************************************** 1851 **************************************************************************
1807 */ 1852 */
1808void falcon_drain_tx_fifo(struct efx_nic *efx) 1853
1854static int falcon_reset_macs(struct efx_nic *efx)
1809{ 1855{
1810 efx_oword_t temp; 1856 efx_oword_t reg;
1811 int count; 1857 int count;
1812 1858
1813 if ((falcon_rev(efx) < FALCON_REV_B0) || 1859 if (falcon_rev(efx) < FALCON_REV_B0) {
1814 (efx->loopback_mode != LOOPBACK_NONE)) 1860 /* It's not safe to use GLB_CTL_REG to reset the
1815 return; 1861 * macs, so instead use the internal MAC resets
1862 */
1863 if (!EFX_IS10G(efx)) {
1864 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1);
1865 falcon_write(efx, &reg, GM_CFG1_REG);
1866 udelay(1000);
1867
1868 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0);
1869 falcon_write(efx, &reg, GM_CFG1_REG);
1870 udelay(1000);
1871 return 0;
1872 } else {
1873 EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
1874 falcon_write(efx, &reg, XM_GLB_CFG_REG);
1875
1876 for (count = 0; count < 10000; count++) {
1877 falcon_read(efx, &reg, XM_GLB_CFG_REG);
1878 if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
1879 return 0;
1880 udelay(10);
1881 }
1816 1882
1817 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 1883 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
1818 /* There is no point in draining more than once */ 1884 return -ETIMEDOUT;
1819 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) 1885 }
1820 return; 1886 }
1821 1887
1822 /* MAC stats will fail whilst the TX fifo is draining. Serialise 1888 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1823 * the drain sequence with the statistics fetch */ 1889 * the drain sequence with the statistics fetch */
1824 spin_lock(&efx->stats_lock); 1890 spin_lock(&efx->stats_lock);
1825 1891
1826 EFX_SET_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0, 1); 1892 falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
1827 falcon_write(efx, &temp, MAC0_CTRL_REG_KER); 1893 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
1894 falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
1828 1895
1829 /* Reset the MAC and EM block. */ 1896 falcon_read(efx, &reg, GLB_CTL_REG_KER);
1830 falcon_read(efx, &temp, GLB_CTL_REG_KER); 1897 EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1);
1831 EFX_SET_OWORD_FIELD(temp, RST_XGTX, 1); 1898 EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1);
1832 EFX_SET_OWORD_FIELD(temp, RST_XGRX, 1); 1899 EFX_SET_OWORD_FIELD(reg, RST_EM, 1);
1833 EFX_SET_OWORD_FIELD(temp, RST_EM, 1); 1900 falcon_write(efx, &reg, GLB_CTL_REG_KER);
1834 falcon_write(efx, &temp, GLB_CTL_REG_KER);
1835 1901
1836 count = 0; 1902 count = 0;
1837 while (1) { 1903 while (1) {
1838 falcon_read(efx, &temp, GLB_CTL_REG_KER); 1904 falcon_read(efx, &reg, GLB_CTL_REG_KER);
1839 if (!EFX_OWORD_FIELD(temp, RST_XGTX) && 1905 if (!EFX_OWORD_FIELD(reg, RST_XGTX) &&
1840 !EFX_OWORD_FIELD(temp, RST_XGRX) && 1906 !EFX_OWORD_FIELD(reg, RST_XGRX) &&
1841 !EFX_OWORD_FIELD(temp, RST_EM)) { 1907 !EFX_OWORD_FIELD(reg, RST_EM)) {
1842 EFX_LOG(efx, "Completed MAC reset after %d loops\n", 1908 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1843 count); 1909 count);
1844 break; 1910 break;
@@ -1855,21 +1921,39 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
1855 1921
1856 /* If we've reset the EM block and the link is up, then 1922 /* If we've reset the EM block and the link is up, then
1857 * we'll have to kick the XAUI link so the PHY can recover */ 1923 * we'll have to kick the XAUI link so the PHY can recover */
1858 if (efx->link_up && EFX_WORKAROUND_5147(efx)) 1924 if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
1859 falcon_reset_xaui(efx); 1925 falcon_reset_xaui(efx);
1926
1927 return 0;
1928}
1929
1930void falcon_drain_tx_fifo(struct efx_nic *efx)
1931{
1932 efx_oword_t reg;
1933
1934 if ((falcon_rev(efx) < FALCON_REV_B0) ||
1935 (efx->loopback_mode != LOOPBACK_NONE))
1936 return;
1937
1938 falcon_read(efx, &reg, MAC0_CTRL_REG_KER);
1939 /* There is no point in draining more than once */
1940 if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0))
1941 return;
1942
1943 falcon_reset_macs(efx);
1860} 1944}
1861 1945
1862void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) 1946void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1863{ 1947{
1864 efx_oword_t temp; 1948 efx_oword_t reg;
1865 1949
1866 if (falcon_rev(efx) < FALCON_REV_B0) 1950 if (falcon_rev(efx) < FALCON_REV_B0)
1867 return; 1951 return;
1868 1952
1869 /* Isolate the MAC -> RX */ 1953 /* Isolate the MAC -> RX */
1870 falcon_read(efx, &temp, RX_CFG_REG_KER); 1954 falcon_read(efx, &reg, RX_CFG_REG_KER);
1871 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 0); 1955 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0);
1872 falcon_write(efx, &temp, RX_CFG_REG_KER); 1956 falcon_write(efx, &reg, RX_CFG_REG_KER);
1873 1957
1874 if (!efx->link_up) 1958 if (!efx->link_up)
1875 falcon_drain_tx_fifo(efx); 1959 falcon_drain_tx_fifo(efx);
@@ -1881,14 +1965,12 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1881 int link_speed; 1965 int link_speed;
1882 bool tx_fc; 1966 bool tx_fc;
1883 1967
1884 if (efx->link_options & GM_LPA_10000) 1968 switch (efx->link_speed) {
1885 link_speed = 0x3; 1969 case 10000: link_speed = 3; break;
1886 else if (efx->link_options & GM_LPA_1000) 1970 case 1000: link_speed = 2; break;
1887 link_speed = 0x2; 1971 case 100: link_speed = 1; break;
1888 else if (efx->link_options & GM_LPA_100) 1972 default: link_speed = 0; break;
1889 link_speed = 0x1; 1973 }
1890 else
1891 link_speed = 0x0;
1892 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work 1974 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1893 * as advertised. Disable to ensure packets are not 1975 * as advertised. Disable to ensure packets are not
1894 * indefinitely held and TX queue can be flushed at any point 1976 * indefinitely held and TX queue can be flushed at any point
@@ -1914,7 +1996,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1914 /* Transmission of pause frames when RX crosses the threshold is 1996 /* Transmission of pause frames when RX crosses the threshold is
1915 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. 1997 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
1916 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ 1998 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
1917 tx_fc = !!(efx->flow_control & EFX_FC_TX); 1999 tx_fc = !!(efx->link_fc & EFX_FC_TX);
1918 falcon_read(efx, &reg, RX_CFG_REG_KER); 2000 falcon_read(efx, &reg, RX_CFG_REG_KER);
1919 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 2001 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
1920 2002
@@ -1998,7 +2080,8 @@ static int falcon_gmii_wait(struct efx_nic *efx)
1998 efx_dword_t md_stat; 2080 efx_dword_t md_stat;
1999 int count; 2081 int count;
2000 2082
2001 for (count = 0; count < 1000; count++) { /* wait upto 10ms */ 2083 /* wait upto 50ms - taken max from datasheet */
2084 for (count = 0; count < 5000; count++) {
2002 falcon_readl(efx, &md_stat, MD_STAT_REG_KER); 2085 falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
2003 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) { 2086 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
2004 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 || 2087 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
@@ -2162,10 +2245,14 @@ static void falcon_init_mdio(struct mii_if_info *gmii)
2162static int falcon_probe_phy(struct efx_nic *efx) 2245static int falcon_probe_phy(struct efx_nic *efx)
2163{ 2246{
2164 switch (efx->phy_type) { 2247 switch (efx->phy_type) {
2165 case PHY_TYPE_10XPRESS: 2248 case PHY_TYPE_SFX7101:
2166 efx->phy_op = &falcon_tenxpress_phy_ops; 2249 efx->phy_op = &falcon_sfx7101_phy_ops;
2250 break;
2251 case PHY_TYPE_SFT9001A:
2252 case PHY_TYPE_SFT9001B:
2253 efx->phy_op = &falcon_sft9001_phy_ops;
2167 break; 2254 break;
2168 case PHY_TYPE_XFP: 2255 case PHY_TYPE_QT2022C2:
2169 efx->phy_op = &falcon_xfp_phy_ops; 2256 efx->phy_op = &falcon_xfp_phy_ops;
2170 break; 2257 break;
2171 default: 2258 default:
@@ -2174,10 +2261,59 @@ static int falcon_probe_phy(struct efx_nic *efx)
2174 return -1; 2261 return -1;
2175 } 2262 }
2176 2263
2177 efx->loopback_modes = LOOPBACKS_10G_INTERNAL | efx->phy_op->loopbacks; 2264 if (efx->phy_op->macs & EFX_XMAC)
2265 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2266 (1 << LOOPBACK_XGXS) |
2267 (1 << LOOPBACK_XAUI));
2268 if (efx->phy_op->macs & EFX_GMAC)
2269 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2270 efx->loopback_modes |= efx->phy_op->loopbacks;
2271
2178 return 0; 2272 return 0;
2179} 2273}
2180 2274
2275int falcon_switch_mac(struct efx_nic *efx)
2276{
2277 struct efx_mac_operations *old_mac_op = efx->mac_op;
2278 efx_oword_t nic_stat;
2279 unsigned strap_val;
2280
2281 /* Internal loopbacks override the phy speed setting */
2282 if (efx->loopback_mode == LOOPBACK_GMAC) {
2283 efx->link_speed = 1000;
2284 efx->link_fd = true;
2285 } else if (LOOPBACK_INTERNAL(efx)) {
2286 efx->link_speed = 10000;
2287 efx->link_fd = true;
2288 }
2289
2290 efx->mac_op = (EFX_IS10G(efx) ?
2291 &falcon_xmac_operations : &falcon_gmac_operations);
2292 if (old_mac_op == efx->mac_op)
2293 return 0;
2294
2295 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2296
2297 /* Not all macs support a mac-level link state */
2298 efx->mac_up = true;
2299
2300 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2301 strap_val = EFX_IS10G(efx) ? 5 : 3;
2302 if (falcon_rev(efx) >= FALCON_REV_B0) {
2303 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1);
2304 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val);
2305 falcon_write(efx, &nic_stat, NIC_STAT_REG);
2306 } else {
2307 /* Falcon A1 does not support 1G/10G speed switching
2308 * and must not be used with a PHY that does. */
2309 BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
2310 }
2311
2312
2313 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
2314 return falcon_reset_macs(efx);
2315}
2316
2181/* This call is responsible for hooking in the MAC and PHY operations */ 2317/* This call is responsible for hooking in the MAC and PHY operations */
2182int falcon_probe_port(struct efx_nic *efx) 2318int falcon_probe_port(struct efx_nic *efx)
2183{ 2319{
@@ -2194,9 +2330,9 @@ int falcon_probe_port(struct efx_nic *efx)
2194 2330
2195 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2331 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2196 if (falcon_rev(efx) >= FALCON_REV_B0) 2332 if (falcon_rev(efx) >= FALCON_REV_B0)
2197 efx->flow_control = EFX_FC_RX | EFX_FC_TX; 2333 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
2198 else 2334 else
2199 efx->flow_control = EFX_FC_RX; 2335 efx->wanted_fc = EFX_FC_RX;
2200 2336
2201 /* Allocate buffer for stats */ 2337 /* Allocate buffer for stats */
2202 rc = falcon_alloc_buffer(efx, &efx->stats_buffer, 2338 rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
@@ -2253,13 +2389,18 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2253 __le16 *word, *limit; 2389 __le16 *word, *limit;
2254 u32 csum; 2390 u32 csum;
2255 2391
2256 region = kmalloc(NVCONFIG_END, GFP_KERNEL); 2392 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
2393 if (!spi)
2394 return -EINVAL;
2395
2396 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
2257 if (!region) 2397 if (!region)
2258 return -ENOMEM; 2398 return -ENOMEM;
2259 nvconfig = region + NVCONFIG_OFFSET; 2399 nvconfig = region + NVCONFIG_OFFSET;
2260 2400
2261 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom; 2401 mutex_lock(&efx->spi_lock);
2262 rc = falcon_spi_read(spi, 0, NVCONFIG_END, NULL, region); 2402 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
2403 mutex_unlock(&efx->spi_lock);
2263 if (rc) { 2404 if (rc) {
2264 EFX_ERR(efx, "Failed to read %s\n", 2405 EFX_ERR(efx, "Failed to read %s\n",
2265 efx->spi_flash ? "flash" : "EEPROM"); 2406 efx->spi_flash ? "flash" : "EEPROM");
@@ -2283,7 +2424,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2283 limit = (__le16 *) (nvconfig + 1); 2424 limit = (__le16 *) (nvconfig + 1);
2284 } else { 2425 } else {
2285 word = region; 2426 word = region;
2286 limit = region + NVCONFIG_END; 2427 limit = region + FALCON_NVCONFIG_END;
2287 } 2428 }
2288 for (csum = 0; word < limit; ++word) 2429 for (csum = 0; word < limit; ++word)
2289 csum += le16_to_cpu(*word); 2430 csum += le16_to_cpu(*word);
@@ -2325,6 +2466,10 @@ static struct {
2325 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, 2466 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2326 { DP_CTRL_REG, 2467 { DP_CTRL_REG,
2327 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, 2468 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2469 { GM_CFG2_REG,
2470 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2471 { GMF_CFG0_REG,
2472 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2328 { XM_GLB_CFG_REG, 2473 { XM_GLB_CFG_REG,
2329 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, 2474 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2330 { XM_TX_CFG_REG, 2475 { XM_TX_CFG_REG,
@@ -2545,7 +2690,7 @@ static int falcon_spi_device_init(struct efx_nic *efx,
2545 struct efx_spi_device *spi_device; 2690 struct efx_spi_device *spi_device;
2546 2691
2547 if (device_type != 0) { 2692 if (device_type != 0) {
2548 spi_device = kmalloc(sizeof(*spi_device), GFP_KERNEL); 2693 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
2549 if (!spi_device) 2694 if (!spi_device)
2550 return -ENOMEM; 2695 return -ENOMEM;
2551 spi_device->device_id = device_id; 2696 spi_device->device_id = device_id;
@@ -2555,6 +2700,11 @@ static int falcon_spi_device_init(struct efx_nic *efx,
2555 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN); 2700 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
2556 spi_device->munge_address = (spi_device->size == 1 << 9 && 2701 spi_device->munge_address = (spi_device->size == 1 << 9 &&
2557 spi_device->addr_len == 1); 2702 spi_device->addr_len == 1);
2703 spi_device->erase_command =
2704 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
2705 spi_device->erase_size =
2706 1 << SPI_DEV_TYPE_FIELD(device_type,
2707 SPI_DEV_TYPE_ERASE_SIZE);
2558 spi_device->block_size = 2708 spi_device->block_size =
2559 1 << SPI_DEV_TYPE_FIELD(device_type, 2709 1 << SPI_DEV_TYPE_FIELD(device_type,
2560 SPI_DEV_TYPE_BLOCK_SIZE); 2710 SPI_DEV_TYPE_BLOCK_SIZE);
@@ -2645,6 +2795,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2645static int falcon_probe_nic_variant(struct efx_nic *efx) 2795static int falcon_probe_nic_variant(struct efx_nic *efx)
2646{ 2796{
2647 efx_oword_t altera_build; 2797 efx_oword_t altera_build;
2798 efx_oword_t nic_stat;
2648 2799
2649 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER); 2800 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
2650 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) { 2801 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
@@ -2652,27 +2803,20 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2652 return -ENODEV; 2803 return -ENODEV;
2653 } 2804 }
2654 2805
2806 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2807
2655 switch (falcon_rev(efx)) { 2808 switch (falcon_rev(efx)) {
2656 case FALCON_REV_A0: 2809 case FALCON_REV_A0:
2657 case 0xff: 2810 case 0xff:
2658 EFX_ERR(efx, "Falcon rev A0 not supported\n"); 2811 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2659 return -ENODEV; 2812 return -ENODEV;
2660 2813
2661 case FALCON_REV_A1:{ 2814 case FALCON_REV_A1:
2662 efx_oword_t nic_stat;
2663
2664 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2665
2666 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) { 2815 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
2667 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); 2816 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2668 return -ENODEV; 2817 return -ENODEV;
2669 } 2818 }
2670 if (!EFX_OWORD_FIELD(nic_stat, STRAP_10G)) {
2671 EFX_ERR(efx, "1G mode not supported\n");
2672 return -ENODEV;
2673 }
2674 break; 2819 break;
2675 }
2676 2820
2677 case FALCON_REV_B0: 2821 case FALCON_REV_B0:
2678 break; 2822 break;
@@ -2682,6 +2826,9 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2682 return -ENODEV; 2826 return -ENODEV;
2683 } 2827 }
2684 2828
2829 /* Initial assumed speed */
2830 efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000;
2831
2685 return 0; 2832 return 0;
2686} 2833}
2687 2834
@@ -2689,80 +2836,37 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2689static void falcon_probe_spi_devices(struct efx_nic *efx) 2836static void falcon_probe_spi_devices(struct efx_nic *efx)
2690{ 2837{
2691 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; 2838 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2692 bool has_flash, has_eeprom, boot_is_external; 2839 int boot_dev;
2693 2840
2694 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER); 2841 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
2695 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2842 falcon_read(efx, &nic_stat, NIC_STAT_REG);
2696 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); 2843 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2697 2844
2698 has_flash = EFX_OWORD_FIELD(nic_stat, SF_PRST); 2845 if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) {
2699 has_eeprom = EFX_OWORD_FIELD(nic_stat, EE_PRST); 2846 boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ?
2700 boot_is_external = EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE); 2847 EE_SPI_FLASH : EE_SPI_EEPROM);
2701 2848 EFX_LOG(efx, "Booted from %s\n",
2702 if (has_flash) { 2849 boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM");
2703 /* Default flash SPI device: Atmel AT25F1024 2850 } else {
2704 * 128 KB, 24-bit address, 32 KB erase block, 2851 /* Disable VPD and set clock dividers to safe
2705 * 256 B write block 2852 * values for initial programming. */
2706 */ 2853 boot_dev = -1;
2707 u32 flash_device_type = 2854 EFX_LOG(efx, "Booted from internal ASIC settings;"
2708 (17 << SPI_DEV_TYPE_SIZE_LBN) 2855 " setting SPI config\n");
2709 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN) 2856 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
2710 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN) 2857 /* 125 MHz / 7 ~= 20 MHz */
2711 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) 2858 EE_SF_CLOCK_DIV, 7,
2712 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN); 2859 /* 125 MHz / 63 ~= 2 MHz */
2713 2860 EE_EE_CLOCK_DIV, 63);
2714 falcon_spi_device_init(efx, &efx->spi_flash, 2861 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2715 EE_SPI_FLASH, flash_device_type); 2862 }
2716 2863
2717 if (!boot_is_external) { 2864 if (boot_dev == EE_SPI_FLASH)
2718 /* Disable VPD and set clock dividers to safe 2865 falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH,
2719 * values for initial programming. 2866 default_flash_type);
2720 */ 2867 if (boot_dev == EE_SPI_EEPROM)
2721 EFX_LOG(efx, "Booted from internal ASIC settings;" 2868 falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM,
2722 " setting SPI config\n"); 2869 large_eeprom_type);
2723 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
2724 /* 125 MHz / 7 ~= 20 MHz */
2725 EE_SF_CLOCK_DIV, 7,
2726 /* 125 MHz / 63 ~= 2 MHz */
2727 EE_EE_CLOCK_DIV, 63);
2728 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
2729 }
2730 }
2731
2732 if (has_eeprom) {
2733 u32 eeprom_device_type;
2734
2735 /* If it has no flash, it must have a large EEPROM
2736 * for chip config; otherwise check whether 9-bit
2737 * addressing is used for VPD configuration
2738 */
2739 if (has_flash &&
2740 (!boot_is_external ||
2741 EFX_OWORD_FIELD(ee_vpd_cfg, EE_VPD_EN_AD9_MODE))) {
2742 /* Default SPI device: Atmel AT25040 or similar
2743 * 512 B, 9-bit address, 8 B write block
2744 */
2745 eeprom_device_type =
2746 (9 << SPI_DEV_TYPE_SIZE_LBN)
2747 | (1 << SPI_DEV_TYPE_ADDR_LEN_LBN)
2748 | (3 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
2749 } else {
2750 /* "Large" SPI device: Atmel AT25640 or similar
2751 * 8 KB, 16-bit address, 32 B write block
2752 */
2753 eeprom_device_type =
2754 (13 << SPI_DEV_TYPE_SIZE_LBN)
2755 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
2756 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN);
2757 }
2758
2759 falcon_spi_device_init(efx, &efx->spi_eeprom,
2760 EE_SPI_EEPROM, eeprom_device_type);
2761 }
2762
2763 EFX_LOG(efx, "flash is %s, EEPROM is %s\n",
2764 (has_flash ? "present" : "absent"),
2765 (has_eeprom ? "present" : "absent"));
2766} 2870}
2767 2871
2768int falcon_probe_nic(struct efx_nic *efx) 2872int falcon_probe_nic(struct efx_nic *efx)
@@ -2825,10 +2929,10 @@ int falcon_probe_nic(struct efx_nic *efx)
2825 goto fail5; 2929 goto fail5;
2826 2930
2827 /* Initialise I2C adapter */ 2931 /* Initialise I2C adapter */
2828 efx->i2c_adap.owner = THIS_MODULE; 2932 efx->i2c_adap.owner = THIS_MODULE;
2829 nic_data->i2c_data = falcon_i2c_bit_operations; 2933 nic_data->i2c_data = falcon_i2c_bit_operations;
2830 nic_data->i2c_data.data = efx; 2934 nic_data->i2c_data.data = efx;
2831 efx->i2c_adap.algo_data = &nic_data->i2c_data; 2935 efx->i2c_adap.algo_data = &nic_data->i2c_data;
2832 efx->i2c_adap.dev.parent = &efx->pci_dev->dev; 2936 efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
2833 strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name)); 2937 strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name));
2834 rc = i2c_bit_add_bus(&efx->i2c_adap); 2938 rc = i2c_bit_add_bus(&efx->i2c_adap);
@@ -2862,20 +2966,18 @@ int falcon_init_nic(struct efx_nic *efx)
2862 unsigned thresh; 2966 unsigned thresh;
2863 int rc; 2967 int rc;
2864 2968
2865 /* Set up the address region register. This is only needed
2866 * for the B0 FPGA, but since we are just pushing in the
2867 * reset defaults this may as well be unconditional. */
2868 EFX_POPULATE_OWORD_4(temp, ADR_REGION0, 0,
2869 ADR_REGION1, (1 << 16),
2870 ADR_REGION2, (2 << 16),
2871 ADR_REGION3, (3 << 16));
2872 falcon_write(efx, &temp, ADR_REGION_REG_KER);
2873
2874 /* Use on-chip SRAM */ 2969 /* Use on-chip SRAM */
2875 falcon_read(efx, &temp, NIC_STAT_REG); 2970 falcon_read(efx, &temp, NIC_STAT_REG);
2876 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1); 2971 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
2877 falcon_write(efx, &temp, NIC_STAT_REG); 2972 falcon_write(efx, &temp, NIC_STAT_REG);
2878 2973
2974 /* Set the source of the GMAC clock */
2975 if (falcon_rev(efx) == FALCON_REV_B0) {
2976 falcon_read(efx, &temp, GPIO_CTL_REG_KER);
2977 EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true);
2978 falcon_write(efx, &temp, GPIO_CTL_REG_KER);
2979 }
2980
2879 /* Set buffer table mode */ 2981 /* Set buffer table mode */
2880 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL); 2982 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
2881 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER); 2983 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index be025ba7a6c6..7869c3d74383 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -12,6 +12,7 @@
12#define EFX_FALCON_H 12#define EFX_FALCON_H
13 13
14#include "net_driver.h" 14#include "net_driver.h"
15#include "efx.h"
15 16
16/* 17/*
17 * Falcon hardware control 18 * Falcon hardware control
@@ -65,6 +66,7 @@ extern int falcon_probe_port(struct efx_nic *efx);
65extern void falcon_remove_port(struct efx_nic *efx); 66extern void falcon_remove_port(struct efx_nic *efx);
66 67
67/* MAC/PHY */ 68/* MAC/PHY */
69extern int falcon_switch_mac(struct efx_nic *efx);
68extern bool falcon_xaui_link_ok(struct efx_nic *efx); 70extern bool falcon_xaui_link_ok(struct efx_nic *efx);
69extern int falcon_dma_stats(struct efx_nic *efx, 71extern int falcon_dma_stats(struct efx_nic *efx,
70 unsigned int done_offset); 72 unsigned int done_offset);
@@ -77,6 +79,7 @@ extern int falcon_init_interrupt(struct efx_nic *efx);
77extern void falcon_enable_interrupts(struct efx_nic *efx); 79extern void falcon_enable_interrupts(struct efx_nic *efx);
78extern void falcon_generate_test_event(struct efx_channel *channel, 80extern void falcon_generate_test_event(struct efx_channel *channel,
79 unsigned int magic); 81 unsigned int magic);
82extern void falcon_sim_phy_event(struct efx_nic *efx);
80extern void falcon_generate_interrupt(struct efx_nic *efx); 83extern void falcon_generate_interrupt(struct efx_nic *efx);
81extern void falcon_set_int_moderation(struct efx_channel *channel); 84extern void falcon_set_int_moderation(struct efx_channel *channel);
82extern void falcon_disable_interrupts(struct efx_nic *efx); 85extern void falcon_disable_interrupts(struct efx_nic *efx);
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c
new file mode 100644
index 000000000000..8865eae20ac5
--- /dev/null
+++ b/drivers/net/sfc/falcon_gmac.c
@@ -0,0 +1,229 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/delay.h>
12#include "net_driver.h"
13#include "efx.h"
14#include "falcon.h"
15#include "mac.h"
16#include "falcon_hwdefs.h"
17#include "falcon_io.h"
18#include "gmii.h"
19
20/**************************************************************************
21 *
22 * MAC operations
23 *
24 *************************************************************************/
25
26static void falcon_reconfigure_gmac(struct efx_nic *efx)
27{
28 bool loopback, tx_fc, rx_fc, bytemode;
29 int if_mode;
30 unsigned int max_frame_len;
31 efx_oword_t reg;
32
33 /* Configuration register 1 */
34 tx_fc = (efx->link_fc & EFX_FC_TX) || !efx->link_fd;
35 rx_fc = !!(efx->link_fc & EFX_FC_RX);
36 loopback = (efx->loopback_mode == LOOPBACK_GMAC);
37 bytemode = (efx->link_speed == 1000);
38
39 EFX_POPULATE_OWORD_5(reg,
40 GM_LOOP, loopback,
41 GM_TX_EN, 1,
42 GM_TX_FC_EN, tx_fc,
43 GM_RX_EN, 1,
44 GM_RX_FC_EN, rx_fc);
45 falcon_write(efx, &reg, GM_CFG1_REG);
46 udelay(10);
47
48 /* Configuration register 2 */
49 if_mode = (bytemode) ? 2 : 1;
50 EFX_POPULATE_OWORD_5(reg,
51 GM_IF_MODE, if_mode,
52 GM_PAD_CRC_EN, 1,
53 GM_LEN_CHK, 1,
54 GM_FD, efx->link_fd,
55 GM_PAMBL_LEN, 0x7/*datasheet recommended */);
56
57 falcon_write(efx, &reg, GM_CFG2_REG);
58 udelay(10);
59
60 /* Max frame len register */
61 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
62 EFX_POPULATE_OWORD_1(reg, GM_MAX_FLEN, max_frame_len);
63 falcon_write(efx, &reg, GM_MAX_FLEN_REG);
64 udelay(10);
65
66 /* FIFO configuration register 0 */
67 EFX_POPULATE_OWORD_5(reg,
68 GMF_FTFENREQ, 1,
69 GMF_STFENREQ, 1,
70 GMF_FRFENREQ, 1,
71 GMF_SRFENREQ, 1,
72 GMF_WTMENREQ, 1);
73 falcon_write(efx, &reg, GMF_CFG0_REG);
74 udelay(10);
75
76 /* FIFO configuration register 1 */
77 EFX_POPULATE_OWORD_2(reg,
78 GMF_CFGFRTH, 0x12,
79 GMF_CFGXOFFRTX, 0xffff);
80 falcon_write(efx, &reg, GMF_CFG1_REG);
81 udelay(10);
82
83 /* FIFO configuration register 2 */
84 EFX_POPULATE_OWORD_2(reg,
85 GMF_CFGHWM, 0x3f,
86 GMF_CFGLWM, 0xa);
87 falcon_write(efx, &reg, GMF_CFG2_REG);
88 udelay(10);
89
90 /* FIFO configuration register 3 */
91 EFX_POPULATE_OWORD_2(reg,
92 GMF_CFGHWMFT, 0x1c,
93 GMF_CFGFTTH, 0x08);
94 falcon_write(efx, &reg, GMF_CFG3_REG);
95 udelay(10);
96
97 /* FIFO configuration register 4 */
98 EFX_POPULATE_OWORD_1(reg, GMF_HSTFLTRFRM_PAUSE, 1);
99 falcon_write(efx, &reg, GMF_CFG4_REG);
100 udelay(10);
101
102 /* FIFO configuration register 5 */
103 falcon_read(efx, &reg, GMF_CFG5_REG);
104 EFX_SET_OWORD_FIELD(reg, GMF_CFGBYTMODE, bytemode);
105 EFX_SET_OWORD_FIELD(reg, GMF_CFGHDPLX, !efx->link_fd);
106 EFX_SET_OWORD_FIELD(reg, GMF_HSTDRPLT64, !efx->link_fd);
107 EFX_SET_OWORD_FIELD(reg, GMF_HSTFLTRFRMDC_PAUSE, 0);
108 falcon_write(efx, &reg, GMF_CFG5_REG);
109 udelay(10);
110
111 /* MAC address */
112 EFX_POPULATE_OWORD_4(reg,
113 GM_HWADDR_5, efx->net_dev->dev_addr[5],
114 GM_HWADDR_4, efx->net_dev->dev_addr[4],
115 GM_HWADDR_3, efx->net_dev->dev_addr[3],
116 GM_HWADDR_2, efx->net_dev->dev_addr[2]);
117 falcon_write(efx, &reg, GM_ADR1_REG);
118 udelay(10);
119 EFX_POPULATE_OWORD_2(reg,
120 GM_HWADDR_1, efx->net_dev->dev_addr[1],
121 GM_HWADDR_0, efx->net_dev->dev_addr[0]);
122 falcon_write(efx, &reg, GM_ADR2_REG);
123 udelay(10);
124
125 falcon_reconfigure_mac_wrapper(efx);
126}
127
128static void falcon_update_stats_gmac(struct efx_nic *efx)
129{
130 struct efx_mac_stats *mac_stats = &efx->mac_stats;
131 unsigned long old_rx_pause, old_tx_pause;
132 unsigned long new_rx_pause, new_tx_pause;
133 int rc;
134
135 rc = falcon_dma_stats(efx, GDmaDone_offset);
136 if (rc)
137 return;
138
139 /* Pause frames are erroneously counted as errors (SFC bug 3269) */
140 old_rx_pause = mac_stats->rx_pause;
141 old_tx_pause = mac_stats->tx_pause;
142
143 /* Update MAC stats from DMAed values */
144 FALCON_STAT(efx, GRxGoodOct, rx_good_bytes);
145 FALCON_STAT(efx, GRxBadOct, rx_bad_bytes);
146 FALCON_STAT(efx, GRxMissPkt, rx_missed);
147 FALCON_STAT(efx, GRxFalseCRS, rx_false_carrier);
148 FALCON_STAT(efx, GRxPausePkt, rx_pause);
149 FALCON_STAT(efx, GRxBadPkt, rx_bad);
150 FALCON_STAT(efx, GRxUcastPkt, rx_unicast);
151 FALCON_STAT(efx, GRxMcastPkt, rx_multicast);
152 FALCON_STAT(efx, GRxBcastPkt, rx_broadcast);
153 FALCON_STAT(efx, GRxGoodLt64Pkt, rx_good_lt64);
154 FALCON_STAT(efx, GRxBadLt64Pkt, rx_bad_lt64);
155 FALCON_STAT(efx, GRx64Pkt, rx_64);
156 FALCON_STAT(efx, GRx65to127Pkt, rx_65_to_127);
157 FALCON_STAT(efx, GRx128to255Pkt, rx_128_to_255);
158 FALCON_STAT(efx, GRx256to511Pkt, rx_256_to_511);
159 FALCON_STAT(efx, GRx512to1023Pkt, rx_512_to_1023);
160 FALCON_STAT(efx, GRx1024to15xxPkt, rx_1024_to_15xx);
161 FALCON_STAT(efx, GRx15xxtoJumboPkt, rx_15xx_to_jumbo);
162 FALCON_STAT(efx, GRxGtJumboPkt, rx_gtjumbo);
163 FALCON_STAT(efx, GRxFcsErr64to15xxPkt, rx_bad_64_to_15xx);
164 FALCON_STAT(efx, GRxFcsErr15xxtoJumboPkt, rx_bad_15xx_to_jumbo);
165 FALCON_STAT(efx, GRxFcsErrGtJumboPkt, rx_bad_gtjumbo);
166 FALCON_STAT(efx, GTxGoodBadOct, tx_bytes);
167 FALCON_STAT(efx, GTxGoodOct, tx_good_bytes);
168 FALCON_STAT(efx, GTxSglColPkt, tx_single_collision);
169 FALCON_STAT(efx, GTxMultColPkt, tx_multiple_collision);
170 FALCON_STAT(efx, GTxExColPkt, tx_excessive_collision);
171 FALCON_STAT(efx, GTxDefPkt, tx_deferred);
172 FALCON_STAT(efx, GTxLateCol, tx_late_collision);
173 FALCON_STAT(efx, GTxExDefPkt, tx_excessive_deferred);
174 FALCON_STAT(efx, GTxPausePkt, tx_pause);
175 FALCON_STAT(efx, GTxBadPkt, tx_bad);
176 FALCON_STAT(efx, GTxUcastPkt, tx_unicast);
177 FALCON_STAT(efx, GTxMcastPkt, tx_multicast);
178 FALCON_STAT(efx, GTxBcastPkt, tx_broadcast);
179 FALCON_STAT(efx, GTxLt64Pkt, tx_lt64);
180 FALCON_STAT(efx, GTx64Pkt, tx_64);
181 FALCON_STAT(efx, GTx65to127Pkt, tx_65_to_127);
182 FALCON_STAT(efx, GTx128to255Pkt, tx_128_to_255);
183 FALCON_STAT(efx, GTx256to511Pkt, tx_256_to_511);
184 FALCON_STAT(efx, GTx512to1023Pkt, tx_512_to_1023);
185 FALCON_STAT(efx, GTx1024to15xxPkt, tx_1024_to_15xx);
186 FALCON_STAT(efx, GTx15xxtoJumboPkt, tx_15xx_to_jumbo);
187 FALCON_STAT(efx, GTxGtJumboPkt, tx_gtjumbo);
188 FALCON_STAT(efx, GTxNonTcpUdpPkt, tx_non_tcpudp);
189 FALCON_STAT(efx, GTxMacSrcErrPkt, tx_mac_src_error);
190 FALCON_STAT(efx, GTxIpSrcErrPkt, tx_ip_src_error);
191
192 /* Pause frames are erroneously counted as errors (SFC bug 3269) */
193 new_rx_pause = mac_stats->rx_pause;
194 new_tx_pause = mac_stats->tx_pause;
195 mac_stats->rx_bad -= (new_rx_pause - old_rx_pause);
196 mac_stats->tx_bad -= (new_tx_pause - old_tx_pause);
197
198 /* Derive stats that the MAC doesn't provide directly */
199 mac_stats->tx_bad_bytes =
200 mac_stats->tx_bytes - mac_stats->tx_good_bytes;
201 mac_stats->tx_packets =
202 mac_stats->tx_lt64 + mac_stats->tx_64 +
203 mac_stats->tx_65_to_127 + mac_stats->tx_128_to_255 +
204 mac_stats->tx_256_to_511 + mac_stats->tx_512_to_1023 +
205 mac_stats->tx_1024_to_15xx + mac_stats->tx_15xx_to_jumbo +
206 mac_stats->tx_gtjumbo;
207 mac_stats->tx_collision =
208 mac_stats->tx_single_collision +
209 mac_stats->tx_multiple_collision +
210 mac_stats->tx_excessive_collision +
211 mac_stats->tx_late_collision;
212 mac_stats->rx_bytes =
213 mac_stats->rx_good_bytes + mac_stats->rx_bad_bytes;
214 mac_stats->rx_packets =
215 mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64 +
216 mac_stats->rx_64 + mac_stats->rx_65_to_127 +
217 mac_stats->rx_128_to_255 + mac_stats->rx_256_to_511 +
218 mac_stats->rx_512_to_1023 + mac_stats->rx_1024_to_15xx +
219 mac_stats->rx_15xx_to_jumbo + mac_stats->rx_gtjumbo;
220 mac_stats->rx_good = mac_stats->rx_packets - mac_stats->rx_bad;
221 mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64;
222}
223
224struct efx_mac_operations falcon_gmac_operations = {
225 .reconfigure = falcon_reconfigure_gmac,
226 .update_stats = falcon_update_stats_gmac,
227 .irq = efx_port_dummy_op_void,
228 .poll = efx_port_dummy_op_void,
229};
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
index 5d584b0dbb51..bda8d5bb72e4 100644
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ b/drivers/net/sfc/falcon_hwdefs.h
@@ -111,12 +111,18 @@
111 111
112/* NIC status register */ 112/* NIC status register */
113#define NIC_STAT_REG 0x0200 113#define NIC_STAT_REG 0x0200
114#define EE_STRAP_EN_LBN 31
115#define EE_STRAP_EN_WIDTH 1
116#define EE_STRAP_OVR_LBN 24
117#define EE_STRAP_OVR_WIDTH 4
114#define ONCHIP_SRAM_LBN 16 118#define ONCHIP_SRAM_LBN 16
115#define ONCHIP_SRAM_WIDTH 1 119#define ONCHIP_SRAM_WIDTH 1
116#define SF_PRST_LBN 9 120#define SF_PRST_LBN 9
117#define SF_PRST_WIDTH 1 121#define SF_PRST_WIDTH 1
118#define EE_PRST_LBN 8 122#define EE_PRST_LBN 8
119#define EE_PRST_WIDTH 1 123#define EE_PRST_WIDTH 1
124#define STRAP_PINS_LBN 0
125#define STRAP_PINS_WIDTH 3
120/* These bit definitions are extrapolated from the list of numerical 126/* These bit definitions are extrapolated from the list of numerical
121 * values for STRAP_PINS. 127 * values for STRAP_PINS.
122 */ 128 */
@@ -130,6 +136,8 @@
130 136
131/* GPIO control register */ 137/* GPIO control register */
132#define GPIO_CTL_REG_KER 0x0210 138#define GPIO_CTL_REG_KER 0x0210
139#define GPIO_USE_NIC_CLK_LBN (30)
140#define GPIO_USE_NIC_CLK_WIDTH (1)
133#define GPIO_OUTPUTS_LBN (16) 141#define GPIO_OUTPUTS_LBN (16)
134#define GPIO_OUTPUTS_WIDTH (4) 142#define GPIO_OUTPUTS_WIDTH (4)
135#define GPIO_INPUTS_LBN (8) 143#define GPIO_INPUTS_LBN (8)
@@ -492,6 +500,107 @@
492#define MAC_MCAST_HASH_REG0_KER 0xca0 500#define MAC_MCAST_HASH_REG0_KER 0xca0
493#define MAC_MCAST_HASH_REG1_KER 0xcb0 501#define MAC_MCAST_HASH_REG1_KER 0xcb0
494 502
503/* GMAC configuration register 1 */
504#define GM_CFG1_REG 0xe00
505#define GM_SW_RST_LBN 31
506#define GM_SW_RST_WIDTH 1
507#define GM_LOOP_LBN 8
508#define GM_LOOP_WIDTH 1
509#define GM_RX_FC_EN_LBN 5
510#define GM_RX_FC_EN_WIDTH 1
511#define GM_TX_FC_EN_LBN 4
512#define GM_TX_FC_EN_WIDTH 1
513#define GM_RX_EN_LBN 2
514#define GM_RX_EN_WIDTH 1
515#define GM_TX_EN_LBN 0
516#define GM_TX_EN_WIDTH 1
517
518/* GMAC configuration register 2 */
519#define GM_CFG2_REG 0xe10
520#define GM_PAMBL_LEN_LBN 12
521#define GM_PAMBL_LEN_WIDTH 4
522#define GM_IF_MODE_LBN 8
523#define GM_IF_MODE_WIDTH 2
524#define GM_LEN_CHK_LBN 4
525#define GM_LEN_CHK_WIDTH 1
526#define GM_PAD_CRC_EN_LBN 2
527#define GM_PAD_CRC_EN_WIDTH 1
528#define GM_FD_LBN 0
529#define GM_FD_WIDTH 1
530
531/* GMAC maximum frame length register */
532#define GM_MAX_FLEN_REG 0xe40
533#define GM_MAX_FLEN_LBN 0
534#define GM_MAX_FLEN_WIDTH 16
535
536/* GMAC station address register 1 */
537#define GM_ADR1_REG 0xf00
538#define GM_HWADDR_5_LBN 24
539#define GM_HWADDR_5_WIDTH 8
540#define GM_HWADDR_4_LBN 16
541#define GM_HWADDR_4_WIDTH 8
542#define GM_HWADDR_3_LBN 8
543#define GM_HWADDR_3_WIDTH 8
544#define GM_HWADDR_2_LBN 0
545#define GM_HWADDR_2_WIDTH 8
546
547/* GMAC station address register 2 */
548#define GM_ADR2_REG 0xf10
549#define GM_HWADDR_1_LBN 24
550#define GM_HWADDR_1_WIDTH 8
551#define GM_HWADDR_0_LBN 16
552#define GM_HWADDR_0_WIDTH 8
553
554/* GMAC FIFO configuration register 0 */
555#define GMF_CFG0_REG 0xf20
556#define GMF_FTFENREQ_LBN 12
557#define GMF_FTFENREQ_WIDTH 1
558#define GMF_STFENREQ_LBN 11
559#define GMF_STFENREQ_WIDTH 1
560#define GMF_FRFENREQ_LBN 10
561#define GMF_FRFENREQ_WIDTH 1
562#define GMF_SRFENREQ_LBN 9
563#define GMF_SRFENREQ_WIDTH 1
564#define GMF_WTMENREQ_LBN 8
565#define GMF_WTMENREQ_WIDTH 1
566
567/* GMAC FIFO configuration register 1 */
568#define GMF_CFG1_REG 0xf30
569#define GMF_CFGFRTH_LBN 16
570#define GMF_CFGFRTH_WIDTH 5
571#define GMF_CFGXOFFRTX_LBN 0
572#define GMF_CFGXOFFRTX_WIDTH 16
573
574/* GMAC FIFO configuration register 2 */
575#define GMF_CFG2_REG 0xf40
576#define GMF_CFGHWM_LBN 16
577#define GMF_CFGHWM_WIDTH 6
578#define GMF_CFGLWM_LBN 0
579#define GMF_CFGLWM_WIDTH 6
580
581/* GMAC FIFO configuration register 3 */
582#define GMF_CFG3_REG 0xf50
583#define GMF_CFGHWMFT_LBN 16
584#define GMF_CFGHWMFT_WIDTH 6
585#define GMF_CFGFTTH_LBN 0
586#define GMF_CFGFTTH_WIDTH 6
587
588/* GMAC FIFO configuration register 4 */
589#define GMF_CFG4_REG 0xf60
590#define GMF_HSTFLTRFRM_PAUSE_LBN 12
591#define GMF_HSTFLTRFRM_PAUSE_WIDTH 12
592
593/* GMAC FIFO configuration register 5 */
594#define GMF_CFG5_REG 0xf70
595#define GMF_CFGHDPLX_LBN 22
596#define GMF_CFGHDPLX_WIDTH 1
597#define GMF_CFGBYTMODE_LBN 19
598#define GMF_CFGBYTMODE_WIDTH 1
599#define GMF_HSTDRPLT64_LBN 18
600#define GMF_HSTDRPLT64_WIDTH 1
601#define GMF_HSTFLTRFRMDC_PAUSE_LBN 12
602#define GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
603
495/* XGMAC address register low */ 604/* XGMAC address register low */
496#define XM_ADR_LO_REG 0x1200 605#define XM_ADR_LO_REG 0x1200
497#define XM_ADR_3_LBN 24 606#define XM_ADR_3_LBN 24
@@ -944,6 +1053,8 @@
944#define XG_MNT_INTR_B0_WIDTH 1 1053#define XG_MNT_INTR_B0_WIDTH 1
945#define RX_RECOVERY_A1_LBN 11 1054#define RX_RECOVERY_A1_LBN 11
946#define RX_RECOVERY_A1_WIDTH 1 1055#define RX_RECOVERY_A1_WIDTH 1
1056#define XFP_PHY_INTR_LBN 10
1057#define XFP_PHY_INTR_WIDTH 1
947#define XG_PHY_INTR_LBN 9 1058#define XG_PHY_INTR_LBN 9
948#define XG_PHY_INTR_WIDTH 1 1059#define XG_PHY_INTR_WIDTH 1
949#define G_PHY1_INTR_LBN 8 1060#define G_PHY1_INTR_LBN 8
@@ -962,54 +1073,103 @@
962 ************************************************************************** 1073 **************************************************************************
963 * 1074 *
964 */ 1075 */
1076
965#define GRxGoodOct_offset 0x0 1077#define GRxGoodOct_offset 0x0
1078#define GRxGoodOct_WIDTH 48
966#define GRxBadOct_offset 0x8 1079#define GRxBadOct_offset 0x8
1080#define GRxBadOct_WIDTH 48
967#define GRxMissPkt_offset 0x10 1081#define GRxMissPkt_offset 0x10
1082#define GRxMissPkt_WIDTH 32
968#define GRxFalseCRS_offset 0x14 1083#define GRxFalseCRS_offset 0x14
1084#define GRxFalseCRS_WIDTH 32
969#define GRxPausePkt_offset 0x18 1085#define GRxPausePkt_offset 0x18
1086#define GRxPausePkt_WIDTH 32
970#define GRxBadPkt_offset 0x1C 1087#define GRxBadPkt_offset 0x1C
1088#define GRxBadPkt_WIDTH 32
971#define GRxUcastPkt_offset 0x20 1089#define GRxUcastPkt_offset 0x20
1090#define GRxUcastPkt_WIDTH 32
972#define GRxMcastPkt_offset 0x24 1091#define GRxMcastPkt_offset 0x24
1092#define GRxMcastPkt_WIDTH 32
973#define GRxBcastPkt_offset 0x28 1093#define GRxBcastPkt_offset 0x28
1094#define GRxBcastPkt_WIDTH 32
974#define GRxGoodLt64Pkt_offset 0x2C 1095#define GRxGoodLt64Pkt_offset 0x2C
1096#define GRxGoodLt64Pkt_WIDTH 32
975#define GRxBadLt64Pkt_offset 0x30 1097#define GRxBadLt64Pkt_offset 0x30
1098#define GRxBadLt64Pkt_WIDTH 32
976#define GRx64Pkt_offset 0x34 1099#define GRx64Pkt_offset 0x34
1100#define GRx64Pkt_WIDTH 32
977#define GRx65to127Pkt_offset 0x38 1101#define GRx65to127Pkt_offset 0x38
1102#define GRx65to127Pkt_WIDTH 32
978#define GRx128to255Pkt_offset 0x3C 1103#define GRx128to255Pkt_offset 0x3C
1104#define GRx128to255Pkt_WIDTH 32
979#define GRx256to511Pkt_offset 0x40 1105#define GRx256to511Pkt_offset 0x40
1106#define GRx256to511Pkt_WIDTH 32
980#define GRx512to1023Pkt_offset 0x44 1107#define GRx512to1023Pkt_offset 0x44
1108#define GRx512to1023Pkt_WIDTH 32
981#define GRx1024to15xxPkt_offset 0x48 1109#define GRx1024to15xxPkt_offset 0x48
1110#define GRx1024to15xxPkt_WIDTH 32
982#define GRx15xxtoJumboPkt_offset 0x4C 1111#define GRx15xxtoJumboPkt_offset 0x4C
1112#define GRx15xxtoJumboPkt_WIDTH 32
983#define GRxGtJumboPkt_offset 0x50 1113#define GRxGtJumboPkt_offset 0x50
1114#define GRxGtJumboPkt_WIDTH 32
984#define GRxFcsErr64to15xxPkt_offset 0x54 1115#define GRxFcsErr64to15xxPkt_offset 0x54
1116#define GRxFcsErr64to15xxPkt_WIDTH 32
985#define GRxFcsErr15xxtoJumboPkt_offset 0x58 1117#define GRxFcsErr15xxtoJumboPkt_offset 0x58
1118#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
986#define GRxFcsErrGtJumboPkt_offset 0x5C 1119#define GRxFcsErrGtJumboPkt_offset 0x5C
1120#define GRxFcsErrGtJumboPkt_WIDTH 32
987#define GTxGoodBadOct_offset 0x80 1121#define GTxGoodBadOct_offset 0x80
1122#define GTxGoodBadOct_WIDTH 48
988#define GTxGoodOct_offset 0x88 1123#define GTxGoodOct_offset 0x88
1124#define GTxGoodOct_WIDTH 48
989#define GTxSglColPkt_offset 0x90 1125#define GTxSglColPkt_offset 0x90
1126#define GTxSglColPkt_WIDTH 32
990#define GTxMultColPkt_offset 0x94 1127#define GTxMultColPkt_offset 0x94
1128#define GTxMultColPkt_WIDTH 32
991#define GTxExColPkt_offset 0x98 1129#define GTxExColPkt_offset 0x98
1130#define GTxExColPkt_WIDTH 32
992#define GTxDefPkt_offset 0x9C 1131#define GTxDefPkt_offset 0x9C
1132#define GTxDefPkt_WIDTH 32
993#define GTxLateCol_offset 0xA0 1133#define GTxLateCol_offset 0xA0
1134#define GTxLateCol_WIDTH 32
994#define GTxExDefPkt_offset 0xA4 1135#define GTxExDefPkt_offset 0xA4
1136#define GTxExDefPkt_WIDTH 32
995#define GTxPausePkt_offset 0xA8 1137#define GTxPausePkt_offset 0xA8
1138#define GTxPausePkt_WIDTH 32
996#define GTxBadPkt_offset 0xAC 1139#define GTxBadPkt_offset 0xAC
1140#define GTxBadPkt_WIDTH 32
997#define GTxUcastPkt_offset 0xB0 1141#define GTxUcastPkt_offset 0xB0
1142#define GTxUcastPkt_WIDTH 32
998#define GTxMcastPkt_offset 0xB4 1143#define GTxMcastPkt_offset 0xB4
1144#define GTxMcastPkt_WIDTH 32
999#define GTxBcastPkt_offset 0xB8 1145#define GTxBcastPkt_offset 0xB8
1146#define GTxBcastPkt_WIDTH 32
1000#define GTxLt64Pkt_offset 0xBC 1147#define GTxLt64Pkt_offset 0xBC
1148#define GTxLt64Pkt_WIDTH 32
1001#define GTx64Pkt_offset 0xC0 1149#define GTx64Pkt_offset 0xC0
1150#define GTx64Pkt_WIDTH 32
1002#define GTx65to127Pkt_offset 0xC4 1151#define GTx65to127Pkt_offset 0xC4
1152#define GTx65to127Pkt_WIDTH 32
1003#define GTx128to255Pkt_offset 0xC8 1153#define GTx128to255Pkt_offset 0xC8
1154#define GTx128to255Pkt_WIDTH 32
1004#define GTx256to511Pkt_offset 0xCC 1155#define GTx256to511Pkt_offset 0xCC
1156#define GTx256to511Pkt_WIDTH 32
1005#define GTx512to1023Pkt_offset 0xD0 1157#define GTx512to1023Pkt_offset 0xD0
1158#define GTx512to1023Pkt_WIDTH 32
1006#define GTx1024to15xxPkt_offset 0xD4 1159#define GTx1024to15xxPkt_offset 0xD4
1160#define GTx1024to15xxPkt_WIDTH 32
1007#define GTx15xxtoJumboPkt_offset 0xD8 1161#define GTx15xxtoJumboPkt_offset 0xD8
1162#define GTx15xxtoJumboPkt_WIDTH 32
1008#define GTxGtJumboPkt_offset 0xDC 1163#define GTxGtJumboPkt_offset 0xDC
1164#define GTxGtJumboPkt_WIDTH 32
1009#define GTxNonTcpUdpPkt_offset 0xE0 1165#define GTxNonTcpUdpPkt_offset 0xE0
1166#define GTxNonTcpUdpPkt_WIDTH 16
1010#define GTxMacSrcErrPkt_offset 0xE4 1167#define GTxMacSrcErrPkt_offset 0xE4
1168#define GTxMacSrcErrPkt_WIDTH 16
1011#define GTxIpSrcErrPkt_offset 0xE8 1169#define GTxIpSrcErrPkt_offset 0xE8
1170#define GTxIpSrcErrPkt_WIDTH 16
1012#define GDmaDone_offset 0xEC 1171#define GDmaDone_offset 0xEC
1172#define GDmaDone_WIDTH 32
1013 1173
1014#define XgRxOctets_offset 0x0 1174#define XgRxOctets_offset 0x0
1015#define XgRxOctets_WIDTH 48 1175#define XgRxOctets_WIDTH 48
@@ -1150,7 +1310,6 @@ struct falcon_nvconfig_board_v3 {
1150 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field))) 1310 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
1151 1311
1152#define NVCONFIG_OFFSET 0x300 1312#define NVCONFIG_OFFSET 0x300
1153#define NVCONFIG_END 0x400
1154 1313
1155#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C 1314#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
1156struct falcon_nvconfig { 1315struct falcon_nvconfig {
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index d4012314dd01..5a03713685ac 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -15,7 +15,6 @@
15#include "falcon_hwdefs.h" 15#include "falcon_hwdefs.h"
16#include "falcon_io.h" 16#include "falcon_io.h"
17#include "mac.h" 17#include "mac.h"
18#include "gmii.h"
19#include "mdio_10g.h" 18#include "mdio_10g.h"
20#include "phy.h" 19#include "phy.h"
21#include "boards.h" 20#include "boards.h"
@@ -26,24 +25,6 @@
26 * MAC operations 25 * MAC operations
27 * 26 *
28 *************************************************************************/ 27 *************************************************************************/
29static int falcon_reset_xmac(struct efx_nic *efx)
30{
31 efx_oword_t reg;
32 int count;
33
34 EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
35 falcon_write(efx, &reg, XM_GLB_CFG_REG);
36
37 for (count = 0; count < 10000; count++) { /* wait upto 100ms */
38 falcon_read(efx, &reg, XM_GLB_CFG_REG);
39 if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
40 return 0;
41 udelay(10);
42 }
43
44 EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
45 return -ETIMEDOUT;
46}
47 28
48/* Configure the XAUI driver that is an output from Falcon */ 29/* Configure the XAUI driver that is an output from Falcon */
49static void falcon_setup_xaui(struct efx_nic *efx) 30static void falcon_setup_xaui(struct efx_nic *efx)
@@ -99,31 +80,20 @@ int falcon_reset_xaui(struct efx_nic *efx)
99 return -ETIMEDOUT; 80 return -ETIMEDOUT;
100} 81}
101 82
102static bool falcon_xgmii_status(struct efx_nic *efx) 83static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
103{ 84{
104 efx_oword_t reg; 85 efx_oword_t reg;
105 86
106 if (falcon_rev(efx) < FALCON_REV_B0) 87 if ((falcon_rev(efx) != FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
107 return true; 88 return;
108
109 /* The ISR latches, so clear it and re-read */
110 falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
111 falcon_read(efx, &reg, XM_MGT_INT_REG_B0);
112
113 if (EFX_OWORD_FIELD(reg, XM_LCLFLT) ||
114 EFX_OWORD_FIELD(reg, XM_RMTFLT)) {
115 EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
116 return false;
117 }
118
119 return true;
120}
121 89
122static void falcon_mask_status_intr(struct efx_nic *efx, bool enable) 90 /* We expect xgmii faults if the wireside link is up */
123{ 91 if (!EFX_WORKAROUND_5147(efx) || !efx->link_up)
124 efx_oword_t reg; 92 return;
125 93
126 if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 94 /* We can only use this interrupt to signal the negative edge of
95 * xaui_align [we have to poll the positive edge]. */
96 if (!efx->mac_up)
127 return; 97 return;
128 98
129 /* Flush the ISR */ 99 /* Flush the ISR */
@@ -136,35 +106,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
136 falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0); 106 falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0);
137} 107}
138 108
139int falcon_init_xmac(struct efx_nic *efx) 109/* Get status of XAUI link */
140{
141 int rc;
142
143 /* Initialize the PHY first so the clock is around */
144 rc = efx->phy_op->init(efx);
145 if (rc)
146 goto fail1;
147
148 rc = falcon_reset_xaui(efx);
149 if (rc)
150 goto fail2;
151
152 /* Wait again. Give the PHY and MAC time to come back */
153 schedule_timeout_uninterruptible(HZ / 10);
154
155 rc = falcon_reset_xmac(efx);
156 if (rc)
157 goto fail2;
158
159 falcon_mask_status_intr(efx, true);
160 return 0;
161
162 fail2:
163 efx->phy_op->fini(efx);
164 fail1:
165 return rc;
166}
167
168bool falcon_xaui_link_ok(struct efx_nic *efx) 110bool falcon_xaui_link_ok(struct efx_nic *efx)
169{ 111{
170 efx_oword_t reg; 112 efx_oword_t reg;
@@ -188,18 +130,10 @@ bool falcon_xaui_link_ok(struct efx_nic *efx)
188 EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET); 130 EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
189 falcon_write(efx, &reg, XX_CORE_STAT_REG); 131 falcon_write(efx, &reg, XX_CORE_STAT_REG);
190 132
191 /* If the link is up, then check the phy side of the xaui link 133 /* If the link is up, then check the phy side of the xaui link */
192 * (error conditions from the wire side propoagate back through 134 if (efx->link_up && link_ok)
193 * the phy to the xaui side). */
194 if (efx->link_up && link_ok) {
195 if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS)) 135 if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS))
196 link_ok = mdio_clause45_phyxgxs_lane_sync(efx); 136 link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
197 }
198
199 /* If the PHY and XAUI links are up, then check the mac's xgmii
200 * fault state */
201 if (efx->link_up && link_ok)
202 link_ok = falcon_xgmii_status(efx);
203 137
204 return link_ok; 138 return link_ok;
205} 139}
@@ -208,7 +142,7 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
208{ 142{
209 unsigned int max_frame_len; 143 unsigned int max_frame_len;
210 efx_oword_t reg; 144 efx_oword_t reg;
211 bool rx_fc = !!(efx->flow_control & EFX_FC_RX); 145 bool rx_fc = !!(efx->link_fc & EFX_FC_RX);
212 146
213 /* Configure MAC - cut-thru mode is hard wired on */ 147 /* Configure MAC - cut-thru mode is hard wired on */
214 EFX_POPULATE_DWORD_3(reg, 148 EFX_POPULATE_DWORD_3(reg,
@@ -311,70 +245,39 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
311 245
312/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails 246/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
313 * to come back up. Bash it until it comes back up */ 247 * to come back up. Bash it until it comes back up */
314static bool falcon_check_xaui_link_up(struct efx_nic *efx) 248static void falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
315{ 249{
316 int max_tries, tries; 250 efx->mac_up = falcon_xaui_link_ok(efx);
317 tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
318 max_tries = tries;
319 251
320 if ((efx->loopback_mode == LOOPBACK_NETWORK) || 252 if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
321 (efx->phy_type == PHY_TYPE_NONE) ||
322 efx_phy_mode_disabled(efx->phy_mode)) 253 efx_phy_mode_disabled(efx->phy_mode))
323 return false; 254 /* XAUI link is expected to be down */
324 255 return;
325 while (tries) {
326 if (falcon_xaui_link_ok(efx))
327 return true;
328 256
329 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", 257 while (!efx->mac_up && tries) {
330 __func__, tries); 258 EFX_LOG(efx, "bashing xaui\n");
331 falcon_reset_xaui(efx); 259 falcon_reset_xaui(efx);
332 udelay(200); 260 udelay(200);
333 tries--;
334 }
335 261
336 EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n", 262 efx->mac_up = falcon_xaui_link_ok(efx);
337 max_tries); 263 --tries;
338 return false; 264 }
339} 265}
340 266
341void falcon_reconfigure_xmac(struct efx_nic *efx) 267static void falcon_reconfigure_xmac(struct efx_nic *efx)
342{ 268{
343 bool xaui_link_ok;
344
345 falcon_mask_status_intr(efx, false); 269 falcon_mask_status_intr(efx, false);
346 270
347 falcon_deconfigure_mac_wrapper(efx);
348
349 /* Reconfigure the PHY, disabling transmit in mac level loopback. */
350 if (LOOPBACK_INTERNAL(efx))
351 efx->phy_mode |= PHY_MODE_TX_DISABLED;
352 else
353 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
354 efx->phy_op->reconfigure(efx);
355
356 falcon_reconfigure_xgxs_core(efx); 271 falcon_reconfigure_xgxs_core(efx);
357 falcon_reconfigure_xmac_core(efx); 272 falcon_reconfigure_xmac_core(efx);
358 273
359 falcon_reconfigure_mac_wrapper(efx); 274 falcon_reconfigure_mac_wrapper(efx);
360 275
361 /* Ensure XAUI link is up */ 276 falcon_check_xaui_link_up(efx, 5);
362 xaui_link_ok = falcon_check_xaui_link_up(efx); 277 falcon_mask_status_intr(efx, true);
363
364 if (xaui_link_ok && efx->link_up)
365 falcon_mask_status_intr(efx, true);
366}
367
368void falcon_fini_xmac(struct efx_nic *efx)
369{
370 /* Isolate the MAC - PHY */
371 falcon_deconfigure_mac_wrapper(efx);
372
373 /* Potentially power down the PHY */
374 efx->phy_op->fini(efx);
375} 278}
376 279
377void falcon_update_stats_xmac(struct efx_nic *efx) 280static void falcon_update_stats_xmac(struct efx_nic *efx)
378{ 281{
379 struct efx_mac_stats *mac_stats = &efx->mac_stats; 282 struct efx_mac_stats *mac_stats = &efx->mac_stats;
380 int rc; 283 int rc;
@@ -439,97 +342,35 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
439 mac_stats->rx_control * 64); 342 mac_stats->rx_control * 64);
440} 343}
441 344
442int falcon_check_xmac(struct efx_nic *efx) 345static void falcon_xmac_irq(struct efx_nic *efx)
443{ 346{
444 bool xaui_link_ok; 347 /* The XGMII link has a transient fault, which indicates either:
445 int rc; 348 * - there's a transient xgmii fault
446 349 * - falcon's end of the xaui link may need a kick
447 if ((efx->loopback_mode == LOOPBACK_NETWORK) || 350 * - the wire-side link may have gone down, but the lasi/poll()
448 efx_phy_mode_disabled(efx->phy_mode)) 351 * hasn't noticed yet.
449 return 0; 352 *
450 353 * We only want to even bother polling XAUI if we're confident it's
451 falcon_mask_status_intr(efx, false); 354 * not (1) or (3). In both cases, the only reliable way to spot this
452 xaui_link_ok = falcon_xaui_link_ok(efx); 355 * is to wait a bit. We do this here by forcing the mac link state
453 356 * to down, and waiting for the mac poll to come round and check
454 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) 357 */
455 falcon_reset_xaui(efx); 358 efx->mac_up = false;
456
457 /* Call the PHY check_hw routine */
458 rc = efx->phy_op->check_hw(efx);
459
460 /* Unmask interrupt if everything was (and still is) ok */
461 if (xaui_link_ok && efx->link_up)
462 falcon_mask_status_intr(efx, true);
463
464 return rc;
465}
466
467/* Simulate a PHY event */
468void falcon_xmac_sim_phy_event(struct efx_nic *efx)
469{
470 efx_qword_t phy_event;
471
472 EFX_POPULATE_QWORD_2(phy_event,
473 EV_CODE, GLOBAL_EV_DECODE,
474 XG_PHY_INTR, 1);
475 falcon_generate_event(&efx->channel[0], &phy_event);
476} 359}
477 360
478int falcon_xmac_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 361static void falcon_poll_xmac(struct efx_nic *efx)
479{ 362{
480 mdio_clause45_get_settings(efx, ecmd); 363 if (!EFX_WORKAROUND_5147(efx) || !efx->link_up || efx->mac_up)
481 ecmd->transceiver = XCVR_INTERNAL; 364 return;
482 ecmd->phy_address = efx->mii.phy_id;
483 ecmd->autoneg = AUTONEG_DISABLE;
484 ecmd->duplex = DUPLEX_FULL;
485 return 0;
486}
487 365
488int falcon_xmac_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 366 falcon_mask_status_intr(efx, false);
489{ 367 falcon_check_xaui_link_up(efx, 1);
490 if (ecmd->transceiver != XCVR_INTERNAL) 368 falcon_mask_status_intr(efx, true);
491 return -EINVAL;
492 if (ecmd->autoneg != AUTONEG_DISABLE)
493 return -EINVAL;
494 if (ecmd->duplex != DUPLEX_FULL)
495 return -EINVAL;
496
497 return mdio_clause45_set_settings(efx, ecmd);
498} 369}
499 370
500 371struct efx_mac_operations falcon_xmac_operations = {
501int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control) 372 .reconfigure = falcon_reconfigure_xmac,
502{ 373 .update_stats = falcon_update_stats_xmac,
503 bool reset; 374 .irq = falcon_xmac_irq,
504 375 .poll = falcon_poll_xmac,
505 if (flow_control & EFX_FC_AUTO) { 376};
506 EFX_LOG(efx, "10G does not support flow control "
507 "autonegotiation\n");
508 return -EINVAL;
509 }
510
511 if ((flow_control & EFX_FC_TX) && !(flow_control & EFX_FC_RX))
512 return -EINVAL;
513
514 /* TX flow control may automatically turn itself off if the
515 * link partner (intermittently) stops responding to pause
516 * frames. There isn't any indication that this has happened,
517 * so the best we do is leave it up to the user to spot this
518 * and fix it be cycling transmit flow control on this end. */
519 reset = ((flow_control & EFX_FC_TX) &&
520 !(efx->flow_control & EFX_FC_TX));
521 if (EFX_WORKAROUND_11482(efx) && reset) {
522 if (falcon_rev(efx) >= FALCON_REV_B0) {
523 /* Recover by resetting the EM block */
524 if (efx->link_up)
525 falcon_drain_tx_fifo(efx);
526 } else {
527 /* Schedule a reset to recover */
528 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
529 }
530 }
531
532 efx->flow_control = flow_control;
533
534 return 0;
535}
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h
index d25bbd1297f4..dfccaa7b573e 100644
--- a/drivers/net/sfc/gmii.h
+++ b/drivers/net/sfc/gmii.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc. 4 * Copyright 2006-2008 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -57,139 +57,4 @@
57#define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */ 57#define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
58#define ISR_JABBER 0x0001 /* Bit 0 - jabber */ 58#define ISR_JABBER 0x0001 /* Bit 0 - jabber */
59 59
60/* Logically extended advertisement register */
61#define GM_ADVERTISE_SLCT ADVERTISE_SLCT
62#define GM_ADVERTISE_CSMA ADVERTISE_CSMA
63#define GM_ADVERTISE_10HALF ADVERTISE_10HALF
64#define GM_ADVERTISE_1000XFULL ADVERTISE_1000XFULL
65#define GM_ADVERTISE_10FULL ADVERTISE_10FULL
66#define GM_ADVERTISE_1000XHALF ADVERTISE_1000XHALF
67#define GM_ADVERTISE_100HALF ADVERTISE_100HALF
68#define GM_ADVERTISE_1000XPAUSE ADVERTISE_1000XPAUSE
69#define GM_ADVERTISE_100FULL ADVERTISE_100FULL
70#define GM_ADVERTISE_1000XPSE_ASYM ADVERTISE_1000XPSE_ASYM
71#define GM_ADVERTISE_100BASE4 ADVERTISE_100BASE4
72#define GM_ADVERTISE_PAUSE_CAP ADVERTISE_PAUSE_CAP
73#define GM_ADVERTISE_PAUSE_ASYM ADVERTISE_PAUSE_ASYM
74#define GM_ADVERTISE_RESV ADVERTISE_RESV
75#define GM_ADVERTISE_RFAULT ADVERTISE_RFAULT
76#define GM_ADVERTISE_LPACK ADVERTISE_LPACK
77#define GM_ADVERTISE_NPAGE ADVERTISE_NPAGE
78#define GM_ADVERTISE_1000FULL (ADVERTISE_1000FULL << 8)
79#define GM_ADVERTISE_1000HALF (ADVERTISE_1000HALF << 8)
80#define GM_ADVERTISE_1000 (GM_ADVERTISE_1000FULL | \
81 GM_ADVERTISE_1000HALF)
82#define GM_ADVERTISE_FULL (GM_ADVERTISE_1000FULL | \
83 ADVERTISE_FULL)
84#define GM_ADVERTISE_ALL (GM_ADVERTISE_1000FULL | \
85 GM_ADVERTISE_1000HALF | \
86 ADVERTISE_ALL)
87
88/* Logically extended link partner ability register */
89#define GM_LPA_SLCT LPA_SLCT
90#define GM_LPA_10HALF LPA_10HALF
91#define GM_LPA_1000XFULL LPA_1000XFULL
92#define GM_LPA_10FULL LPA_10FULL
93#define GM_LPA_1000XHALF LPA_1000XHALF
94#define GM_LPA_100HALF LPA_100HALF
95#define GM_LPA_1000XPAUSE LPA_1000XPAUSE
96#define GM_LPA_100FULL LPA_100FULL
97#define GM_LPA_1000XPAUSE_ASYM LPA_1000XPAUSE_ASYM
98#define GM_LPA_100BASE4 LPA_100BASE4
99#define GM_LPA_PAUSE_CAP LPA_PAUSE_CAP
100#define GM_LPA_PAUSE_ASYM LPA_PAUSE_ASYM
101#define GM_LPA_RESV LPA_RESV
102#define GM_LPA_RFAULT LPA_RFAULT
103#define GM_LPA_LPACK LPA_LPACK
104#define GM_LPA_NPAGE LPA_NPAGE
105#define GM_LPA_1000FULL (LPA_1000FULL << 6)
106#define GM_LPA_1000HALF (LPA_1000HALF << 6)
107#define GM_LPA_10000FULL 0x00040000
108#define GM_LPA_10000HALF 0x00080000
109#define GM_LPA_DUPLEX (GM_LPA_1000FULL | GM_LPA_10000FULL \
110 | LPA_DUPLEX)
111#define GM_LPA_10 (LPA_10FULL | LPA_10HALF)
112#define GM_LPA_100 LPA_100
113#define GM_LPA_1000 (GM_LPA_1000FULL | GM_LPA_1000HALF)
114#define GM_LPA_10000 (GM_LPA_10000FULL | GM_LPA_10000HALF)
115
116/* Retrieve GMII autonegotiation advertised abilities
117 *
118 * The MII advertisment register (MII_ADVERTISE) is logically extended
119 * to include advertisement bits ADVERTISE_1000FULL and
120 * ADVERTISE_1000HALF from MII_CTRL1000. The result can be tested
121 * against the GM_ADVERTISE_xxx constants.
122 */
123static inline unsigned int gmii_advertised(struct mii_if_info *gmii)
124{
125 unsigned int advertise;
126 unsigned int ctrl1000;
127
128 advertise = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_ADVERTISE);
129 ctrl1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_CTRL1000);
130 return (((ctrl1000 << 8) & GM_ADVERTISE_1000) | advertise);
131}
132
133/* Retrieve GMII autonegotiation link partner abilities
134 *
135 * The MII link partner ability register (MII_LPA) is logically
136 * extended by adding bits LPA_1000HALF and LPA_1000FULL from
137 * MII_STAT1000. The result can be tested against the GM_LPA_xxx
138 * constants.
139 */
140static inline unsigned int gmii_lpa(struct mii_if_info *gmii)
141{
142 unsigned int lpa;
143 unsigned int stat1000;
144
145 lpa = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_LPA);
146 stat1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_STAT1000);
147 return (((stat1000 << 6) & GM_LPA_1000) | lpa);
148}
149
150/* Calculate GMII autonegotiated link technology
151 *
152 * "negotiated" should be the result of gmii_advertised() logically
153 * ANDed with the result of gmii_lpa().
154 *
155 * "tech" will be negotiated with the unused bits masked out. For
156 * example, if both ends of the link are capable of both
157 * GM_LPA_1000FULL and GM_LPA_100FULL, GM_LPA_100FULL will be masked
158 * out.
159 */
160static inline unsigned int gmii_nway_result(unsigned int negotiated)
161{
162 unsigned int other_bits;
163
164 /* Mask out the speed and duplexity bits */
165 other_bits = negotiated & ~(GM_LPA_10 | GM_LPA_100 | GM_LPA_1000);
166
167 if (negotiated & GM_LPA_1000FULL)
168 return (other_bits | GM_LPA_1000FULL);
169 else if (negotiated & GM_LPA_1000HALF)
170 return (other_bits | GM_LPA_1000HALF);
171 else
172 return (other_bits | mii_nway_result(negotiated));
173}
174
175/* Calculate GMII non-autonegotiated link technology
176 *
177 * This provides an equivalent to gmii_nway_result for the case when
178 * autonegotiation is disabled.
179 */
180static inline unsigned int gmii_forced_result(unsigned int bmcr)
181{
182 unsigned int result;
183 int full_duplex;
184
185 full_duplex = bmcr & BMCR_FULLDPLX;
186 if (bmcr & BMCR_SPEED1000)
187 result = full_duplex ? GM_LPA_1000FULL : GM_LPA_1000HALF;
188 else if (bmcr & BMCR_SPEED100)
189 result = full_duplex ? GM_LPA_100FULL : GM_LPA_100HALF;
190 else
191 result = full_duplex ? GM_LPA_10FULL : GM_LPA_10HALF;
192 return result;
193}
194
195#endif /* EFX_GMII_H */ 60#endif /* EFX_GMII_H */
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index a31571c69137..4e7074278fe1 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -1,7 +1,7 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2007 Solarflare Communications Inc. 4 * Copyright 2006-2008 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
@@ -13,17 +13,7 @@
13 13
14#include "net_driver.h" 14#include "net_driver.h"
15 15
16extern int falcon_init_xmac(struct efx_nic *efx); 16extern struct efx_mac_operations falcon_gmac_operations;
17extern void falcon_reconfigure_xmac(struct efx_nic *efx); 17extern struct efx_mac_operations falcon_xmac_operations;
18extern void falcon_update_stats_xmac(struct efx_nic *efx);
19extern void falcon_fini_xmac(struct efx_nic *efx);
20extern int falcon_check_xmac(struct efx_nic *efx);
21extern void falcon_xmac_sim_phy_event(struct efx_nic *efx);
22extern int falcon_xmac_get_settings(struct efx_nic *efx,
23 struct ethtool_cmd *ecmd);
24extern int falcon_xmac_set_settings(struct efx_nic *efx,
25 struct ethtool_cmd *ecmd);
26extern int falcon_xmac_set_pause(struct efx_nic *efx,
27 enum efx_fc_type pause_params);
28 18
29#endif 19#endif
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 003e48dcb2f3..f6a16428113d 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -47,13 +47,16 @@ static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
47 if (LOOPBACK_INTERNAL(efx)) 47 if (LOOPBACK_INTERNAL(efx))
48 return 0; 48 return 0;
49 49
50 /* Read MMD STATUS2 to check it is responding. */ 50 if (mmd != MDIO_MMD_AN) {
51 status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); 51 /* Read MMD STATUS2 to check it is responding. */
52 if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & 52 status = mdio_clause45_read(efx, phy_id, mmd,
53 ((1 << MDIO_MMDREG_STAT2_PRESENT_WIDTH) - 1)) != 53 MDIO_MMDREG_STAT2);
54 MDIO_MMDREG_STAT2_PRESENT_VAL) { 54 if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
55 EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd); 55 ((1 << MDIO_MMDREG_STAT2_PRESENT_WIDTH) - 1)) !=
56 return -EIO; 56 MDIO_MMDREG_STAT2_PRESENT_VAL) {
57 EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd);
58 return -EIO;
59 }
57 } 60 }
58 61
59 /* Read MMD STATUS 1 to check for fault. */ 62 /* Read MMD STATUS 1 to check for fault. */
@@ -121,16 +124,18 @@ int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
121int mdio_clause45_check_mmds(struct efx_nic *efx, 124int mdio_clause45_check_mmds(struct efx_nic *efx,
122 unsigned int mmd_mask, unsigned int fatal_mask) 125 unsigned int mmd_mask, unsigned int fatal_mask)
123{ 126{
124 int devices, mmd = 0; 127 u32 devices;
125 int probe_mmd; 128 int mmd = 0, probe_mmd;
126 129
127 /* Historically we have probed the PHYXS to find out what devices are 130 /* Historically we have probed the PHYXS to find out what devices are
128 * present,but that doesn't work so well if the PHYXS isn't expected 131 * present,but that doesn't work so well if the PHYXS isn't expected
129 * to exist, if so just find the first item in the list supplied. */ 132 * to exist, if so just find the first item in the list supplied. */
130 probe_mmd = (mmd_mask & MDIO_MMDREG_DEVS0_PHYXS) ? MDIO_MMD_PHYXS : 133 probe_mmd = (mmd_mask & MDIO_MMDREG_DEVS_PHYXS) ? MDIO_MMD_PHYXS :
131 __ffs(mmd_mask); 134 __ffs(mmd_mask);
132 devices = mdio_clause45_read(efx, efx->mii.phy_id, 135 devices = (mdio_clause45_read(efx, efx->mii.phy_id,
133 probe_mmd, MDIO_MMDREG_DEVS0); 136 probe_mmd, MDIO_MMDREG_DEVS0) |
137 mdio_clause45_read(efx, efx->mii.phy_id,
138 probe_mmd, MDIO_MMDREG_DEVS1) << 16);
134 139
135 /* Check all the expected MMDs are present */ 140 /* Check all the expected MMDs are present */
136 if (devices < 0) { 141 if (devices < 0) {
@@ -162,7 +167,7 @@ int mdio_clause45_check_mmds(struct efx_nic *efx,
162bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) 167bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
163{ 168{
164 int phy_id = efx->mii.phy_id; 169 int phy_id = efx->mii.phy_id;
165 int status; 170 u32 reg;
166 bool ok = true; 171 bool ok = true;
167 int mmd = 0; 172 int mmd = 0;
168 173
@@ -174,26 +179,33 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
174 return false; 179 return false;
175 else if (efx_phy_mode_disabled(efx->phy_mode)) 180 else if (efx_phy_mode_disabled(efx->phy_mode))
176 return false; 181 return false;
177 else if (efx->loopback_mode == LOOPBACK_PHYXS) 182 else if (efx->loopback_mode == LOOPBACK_PHYXS) {
178 mmd_mask &= ~(MDIO_MMDREG_DEVS0_PHYXS | 183 mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS |
179 MDIO_MMDREG_DEVS0_PCS | 184 MDIO_MMDREG_DEVS_PCS |
180 MDIO_MMDREG_DEVS0_PMAPMD); 185 MDIO_MMDREG_DEVS_PMAPMD |
181 else if (efx->loopback_mode == LOOPBACK_PCS) 186 MDIO_MMDREG_DEVS_AN);
182 mmd_mask &= ~(MDIO_MMDREG_DEVS0_PCS | 187 if (!mmd_mask) {
183 MDIO_MMDREG_DEVS0_PMAPMD); 188 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS,
189 MDIO_PHYXS_STATUS2);
190 return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN));
191 }
192 } else if (efx->loopback_mode == LOOPBACK_PCS)
193 mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS |
194 MDIO_MMDREG_DEVS_PMAPMD |
195 MDIO_MMDREG_DEVS_AN);
184 else if (efx->loopback_mode == LOOPBACK_PMAPMD) 196 else if (efx->loopback_mode == LOOPBACK_PMAPMD)
185 mmd_mask &= ~MDIO_MMDREG_DEVS0_PMAPMD; 197 mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD |
198 MDIO_MMDREG_DEVS_AN);
186 199
187 while (mmd_mask) { 200 while (mmd_mask) {
188 if (mmd_mask & 1) { 201 if (mmd_mask & 1) {
189 /* Double reads because link state is latched, and a 202 /* Double reads because link state is latched, and a
190 * read moves the current state into the register */ 203 * read moves the current state into the register */
191 status = mdio_clause45_read(efx, phy_id, 204 reg = mdio_clause45_read(efx, phy_id,
192 mmd, MDIO_MMDREG_STAT1); 205 mmd, MDIO_MMDREG_STAT1);
193 status = mdio_clause45_read(efx, phy_id, 206 reg = mdio_clause45_read(efx, phy_id,
194 mmd, MDIO_MMDREG_STAT1); 207 mmd, MDIO_MMDREG_STAT1);
195 208 ok = ok && (reg & (1 << MDIO_MMDREG_STAT1_LINK_LBN));
196 ok = ok && (status & (1 << MDIO_MMDREG_STAT1_LINK_LBN));
197 } 209 }
198 mmd_mask = (mmd_mask >> 1); 210 mmd_mask = (mmd_mask >> 1);
199 mmd++; 211 mmd++;
@@ -203,61 +215,73 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
203 215
204void mdio_clause45_transmit_disable(struct efx_nic *efx) 216void mdio_clause45_transmit_disable(struct efx_nic *efx)
205{ 217{
206 int phy_id = efx->mii.phy_id; 218 mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
207 int ctrl1, ctrl2; 219 MDIO_MMDREG_TXDIS, MDIO_MMDREG_TXDIS_GLOBAL_LBN,
208 220 efx->phy_mode & PHY_MODE_TX_DISABLED);
209 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
210 MDIO_MMDREG_TXDIS);
211 if (efx->phy_mode & PHY_MODE_TX_DISABLED)
212 ctrl2 |= (1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
213 else
214 ctrl1 &= ~(1 << MDIO_MMDREG_TXDIS_GLOBAL_LBN);
215 if (ctrl1 != ctrl2)
216 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
217 MDIO_MMDREG_TXDIS, ctrl2);
218} 221}
219 222
220void mdio_clause45_phy_reconfigure(struct efx_nic *efx) 223void mdio_clause45_phy_reconfigure(struct efx_nic *efx)
221{ 224{
222 int phy_id = efx->mii.phy_id; 225 int phy_id = efx->mii.phy_id;
223 int ctrl1, ctrl2;
224 226
225 /* Handle (with debouncing) PMA/PMD loopback */ 227 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_PMAPMD,
226 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, 228 MDIO_MMDREG_CTRL1, MDIO_PMAPMD_CTRL1_LBACK_LBN,
227 MDIO_MMDREG_CTRL1); 229 efx->loopback_mode == LOOPBACK_PMAPMD);
228 230 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_PCS,
229 if (efx->loopback_mode == LOOPBACK_PMAPMD) 231 MDIO_MMDREG_CTRL1, MDIO_MMDREG_CTRL1_LBACK_LBN,
230 ctrl2 |= (1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); 232 efx->loopback_mode == LOOPBACK_PCS);
231 else 233 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_PHYXS,
232 ctrl2 &= ~(1 << MDIO_PMAPMD_CTRL1_LBACK_LBN); 234 MDIO_MMDREG_CTRL1, MDIO_MMDREG_CTRL1_LBACK_LBN,
235 efx->loopback_mode == LOOPBACK_NETWORK);
236}
233 237
234 if (ctrl1 != ctrl2) 238static void mdio_clause45_set_mmd_lpower(struct efx_nic *efx,
235 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, 239 int lpower, int mmd)
236 MDIO_MMDREG_CTRL1, ctrl2); 240{
241 int phy = efx->mii.phy_id;
242 int stat = mdio_clause45_read(efx, phy, mmd, MDIO_MMDREG_STAT1);
237 243
238 /* Handle (with debouncing) PCS loopback */ 244 EFX_TRACE(efx, "Setting low power mode for MMD %d to %d\n",
239 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS, 245 mmd, lpower);
240 MDIO_MMDREG_CTRL1);
241 if (efx->loopback_mode == LOOPBACK_PCS)
242 ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
243 else
244 ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN);
245 246
246 if (ctrl1 != ctrl2) 247 if (stat & (1 << MDIO_MMDREG_STAT1_LPABLE_LBN)) {
247 mdio_clause45_write(efx, phy_id, MDIO_MMD_PCS, 248 mdio_clause45_set_flag(efx, phy, mmd, MDIO_MMDREG_CTRL1,
248 MDIO_MMDREG_CTRL1, ctrl2); 249 MDIO_MMDREG_CTRL1_LPOWER_LBN, lpower);
250 }
251}
249 252
250 /* Handle (with debouncing) PHYXS network loopback */ 253void mdio_clause45_set_mmds_lpower(struct efx_nic *efx,
251 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, 254 int low_power, unsigned int mmd_mask)
252 MDIO_MMDREG_CTRL1); 255{
253 if (efx->loopback_mode == LOOPBACK_NETWORK) 256 int mmd = 0;
254 ctrl2 |= (1 << MDIO_MMDREG_CTRL1_LBACK_LBN); 257 mmd_mask &= ~MDIO_MMDREG_DEVS_AN;
255 else 258 while (mmd_mask) {
256 ctrl2 &= ~(1 << MDIO_MMDREG_CTRL1_LBACK_LBN); 259 if (mmd_mask & 1)
260 mdio_clause45_set_mmd_lpower(efx, low_power, mmd);
261 mmd_mask = (mmd_mask >> 1);
262 mmd++;
263 }
264}
257 265
258 if (ctrl1 != ctrl2) 266static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp)
259 mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, 267{
260 MDIO_MMDREG_CTRL1, ctrl2); 268 int phy_id = efx->mii.phy_id;
269 u32 result = 0;
270 int reg;
271
272 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, addr);
273 if (reg & ADVERTISE_10HALF)
274 result |= ADVERTISED_10baseT_Half;
275 if (reg & ADVERTISE_10FULL)
276 result |= ADVERTISED_10baseT_Full;
277 if (reg & ADVERTISE_100HALF)
278 result |= ADVERTISED_100baseT_Half;
279 if (reg & ADVERTISE_100FULL)
280 result |= ADVERTISED_100baseT_Full;
281 if (reg & LPA_RESV)
282 result |= xnp;
283
284 return result;
261} 285}
262 286
263/** 287/**
@@ -266,95 +290,290 @@ void mdio_clause45_phy_reconfigure(struct efx_nic *efx)
266 * @ecmd: Buffer for settings 290 * @ecmd: Buffer for settings
267 * 291 *
268 * On return the 'port', 'speed', 'supported' and 'advertising' fields of 292 * On return the 'port', 'speed', 'supported' and 'advertising' fields of
269 * ecmd have been filled out based on the PMA type. 293 * ecmd have been filled out.
270 */ 294 */
271void mdio_clause45_get_settings(struct efx_nic *efx, 295void mdio_clause45_get_settings(struct efx_nic *efx,
272 struct ethtool_cmd *ecmd) 296 struct ethtool_cmd *ecmd)
273{ 297{
274 int pma_type; 298 mdio_clause45_get_settings_ext(efx, ecmd, 0, 0);
299}
275 300
276 /* If no PMA is present we are presumably talking something XAUI-ish 301/**
277 * like CX4. Which we report as FIBRE (see below) */ 302 * mdio_clause45_get_settings_ext - Read (some of) the PHY settings over MDIO.
278 if ((efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)) == 0) { 303 * @efx: Efx NIC
279 ecmd->speed = SPEED_10000; 304 * @ecmd: Buffer for settings
280 ecmd->port = PORT_FIBRE; 305 * @xnp: Advertised Extended Next Page state
281 ecmd->supported = SUPPORTED_FIBRE; 306 * @xnp_lpa: Link Partner's advertised XNP state
282 ecmd->advertising = ADVERTISED_FIBRE; 307 *
283 return; 308 * On return the 'port', 'speed', 'supported' and 'advertising' fields of
284 } 309 * ecmd have been filled out.
310 */
311void mdio_clause45_get_settings_ext(struct efx_nic *efx,
312 struct ethtool_cmd *ecmd,
313 u32 xnp, u32 xnp_lpa)
314{
315 int phy_id = efx->mii.phy_id;
316 int reg;
285 317
286 pma_type = mdio_clause45_read(efx, efx->mii.phy_id, 318 ecmd->transceiver = XCVR_INTERNAL;
287 MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL2); 319 ecmd->phy_address = phy_id;
288 pma_type &= MDIO_PMAPMD_CTRL2_TYPE_MASK;
289 320
290 switch (pma_type) { 321 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
291 /* We represent CX4 as fibre in the absence of anything 322 MDIO_MMDREG_CTRL2);
292 better. */ 323 switch (reg & MDIO_PMAPMD_CTRL2_TYPE_MASK) {
293 case MDIO_PMAPMD_CTRL2_10G_CX4:
294 ecmd->speed = SPEED_10000;
295 ecmd->port = PORT_FIBRE;
296 ecmd->supported = SUPPORTED_FIBRE;
297 ecmd->advertising = ADVERTISED_FIBRE;
298 break;
299 /* 10G Base-T */
300 case MDIO_PMAPMD_CTRL2_10G_BT: 324 case MDIO_PMAPMD_CTRL2_10G_BT:
301 ecmd->speed = SPEED_10000;
302 ecmd->port = PORT_TP;
303 ecmd->supported = SUPPORTED_TP | SUPPORTED_10000baseT_Full;
304 ecmd->advertising = (ADVERTISED_FIBRE
305 | ADVERTISED_10000baseT_Full);
306 break;
307 case MDIO_PMAPMD_CTRL2_1G_BT: 325 case MDIO_PMAPMD_CTRL2_1G_BT:
308 ecmd->speed = SPEED_1000;
309 ecmd->port = PORT_TP;
310 ecmd->supported = SUPPORTED_TP | SUPPORTED_1000baseT_Full;
311 ecmd->advertising = (ADVERTISED_FIBRE
312 | ADVERTISED_1000baseT_Full);
313 break;
314 case MDIO_PMAPMD_CTRL2_100_BT: 326 case MDIO_PMAPMD_CTRL2_100_BT:
315 ecmd->speed = SPEED_100;
316 ecmd->port = PORT_TP;
317 ecmd->supported = SUPPORTED_TP | SUPPORTED_100baseT_Full;
318 ecmd->advertising = (ADVERTISED_FIBRE
319 | ADVERTISED_100baseT_Full);
320 break;
321 case MDIO_PMAPMD_CTRL2_10_BT: 327 case MDIO_PMAPMD_CTRL2_10_BT:
322 ecmd->speed = SPEED_10;
323 ecmd->port = PORT_TP; 328 ecmd->port = PORT_TP;
324 ecmd->supported = SUPPORTED_TP | SUPPORTED_10baseT_Full; 329 ecmd->supported = SUPPORTED_TP;
325 ecmd->advertising = ADVERTISED_FIBRE | ADVERTISED_10baseT_Full; 330 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
331 MDIO_MMDREG_SPEED);
332 if (reg & (1 << MDIO_MMDREG_SPEED_10G_LBN))
333 ecmd->supported |= SUPPORTED_10000baseT_Full;
334 if (reg & (1 << MDIO_MMDREG_SPEED_1000M_LBN))
335 ecmd->supported |= (SUPPORTED_1000baseT_Full |
336 SUPPORTED_1000baseT_Half);
337 if (reg & (1 << MDIO_MMDREG_SPEED_100M_LBN))
338 ecmd->supported |= (SUPPORTED_100baseT_Full |
339 SUPPORTED_100baseT_Half);
340 if (reg & (1 << MDIO_MMDREG_SPEED_10M_LBN))
341 ecmd->supported |= (SUPPORTED_10baseT_Full |
342 SUPPORTED_10baseT_Half);
343 ecmd->advertising = ADVERTISED_TP;
326 break; 344 break;
327 /* All the other defined modes are flavours of 345
328 * 10G optical */ 346 /* We represent CX4 as fibre in the absence of anything better */
347 case MDIO_PMAPMD_CTRL2_10G_CX4:
348 /* All the other defined modes are flavours of optical */
329 default: 349 default:
330 ecmd->speed = SPEED_10000;
331 ecmd->port = PORT_FIBRE; 350 ecmd->port = PORT_FIBRE;
332 ecmd->supported = SUPPORTED_FIBRE; 351 ecmd->supported = SUPPORTED_FIBRE;
333 ecmd->advertising = ADVERTISED_FIBRE; 352 ecmd->advertising = ADVERTISED_FIBRE;
334 break; 353 break;
335 } 354 }
355
356 if (efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_AN)) {
357 ecmd->supported |= SUPPORTED_Autoneg;
358 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
359 MDIO_MMDREG_CTRL1);
360 if (reg & BMCR_ANENABLE) {
361 ecmd->autoneg = AUTONEG_ENABLE;
362 ecmd->advertising |=
363 ADVERTISED_Autoneg |
364 mdio_clause45_get_an(efx,
365 MDIO_AN_ADVERTISE, xnp);
366 } else
367 ecmd->autoneg = AUTONEG_DISABLE;
368 } else
369 ecmd->autoneg = AUTONEG_DISABLE;
370
371 if (ecmd->autoneg) {
372 /* If AN is complete, report best common mode,
373 * otherwise report best advertised mode. */
374 u32 common = ecmd->advertising;
375 if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
376 MDIO_MMDREG_STAT1) &
377 (1 << MDIO_AN_STATUS_AN_DONE_LBN)) {
378 common &= mdio_clause45_get_an(efx, MDIO_AN_LPA,
379 xnp_lpa);
380 }
381 if (common & ADVERTISED_10000baseT_Full) {
382 ecmd->speed = SPEED_10000;
383 ecmd->duplex = DUPLEX_FULL;
384 } else if (common & (ADVERTISED_1000baseT_Full |
385 ADVERTISED_1000baseT_Half)) {
386 ecmd->speed = SPEED_1000;
387 ecmd->duplex = !!(common & ADVERTISED_1000baseT_Full);
388 } else if (common & (ADVERTISED_100baseT_Full |
389 ADVERTISED_100baseT_Half)) {
390 ecmd->speed = SPEED_100;
391 ecmd->duplex = !!(common & ADVERTISED_100baseT_Full);
392 } else {
393 ecmd->speed = SPEED_10;
394 ecmd->duplex = !!(common & ADVERTISED_10baseT_Full);
395 }
396 } else {
397 /* Report forced settings */
398 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
399 MDIO_MMDREG_CTRL1);
400 ecmd->speed = (((reg & BMCR_SPEED1000) ? 100 : 1) *
401 ((reg & BMCR_SPEED100) ? 100 : 10));
402 ecmd->duplex = (reg & BMCR_FULLDPLX ||
403 ecmd->speed == SPEED_10000);
404 }
336} 405}
337 406
338/** 407/**
339 * mdio_clause45_set_settings - Set (some of) the PHY settings over MDIO. 408 * mdio_clause45_set_settings - Set (some of) the PHY settings over MDIO.
340 * @efx: Efx NIC 409 * @efx: Efx NIC
341 * @ecmd: New settings 410 * @ecmd: New settings
342 *
343 * Currently this just enforces that we are _not_ changing the
344 * 'port', 'speed', 'supported' or 'advertising' settings as these
345 * cannot be changed on any currently supported PHY.
346 */ 411 */
347int mdio_clause45_set_settings(struct efx_nic *efx, 412int mdio_clause45_set_settings(struct efx_nic *efx,
348 struct ethtool_cmd *ecmd) 413 struct ethtool_cmd *ecmd)
349{ 414{
350 struct ethtool_cmd tmpcmd; 415 int phy_id = efx->mii.phy_id;
351 mdio_clause45_get_settings(efx, &tmpcmd); 416 struct ethtool_cmd prev;
352 /* None of the current PHYs support more than one mode 417 u32 required;
353 * of operation (and only 10GBT ever will), so keep things 418 int ctrl1_bits, reg;
354 * simple for now */ 419
355 if ((ecmd->speed == tmpcmd.speed) && (ecmd->port == tmpcmd.port) && 420 efx->phy_op->get_settings(efx, &prev);
356 (ecmd->supported == tmpcmd.supported) && 421
357 (ecmd->advertising == tmpcmd.advertising)) 422 if (ecmd->advertising == prev.advertising &&
423 ecmd->speed == prev.speed &&
424 ecmd->duplex == prev.duplex &&
425 ecmd->port == prev.port &&
426 ecmd->autoneg == prev.autoneg)
358 return 0; 427 return 0;
359 return -EOPNOTSUPP; 428
429 /* We can only change these settings for -T PHYs */
430 if (prev.port != PORT_TP || ecmd->port != PORT_TP)
431 return -EINVAL;
432
433 /* Check that PHY supports these settings and work out the
434 * basic control bits */
435 if (ecmd->duplex) {
436 switch (ecmd->speed) {
437 case SPEED_10:
438 ctrl1_bits = BMCR_FULLDPLX;
439 required = SUPPORTED_10baseT_Full;
440 break;
441 case SPEED_100:
442 ctrl1_bits = BMCR_SPEED100 | BMCR_FULLDPLX;
443 required = SUPPORTED_100baseT_Full;
444 break;
445 case SPEED_1000:
446 ctrl1_bits = BMCR_SPEED1000 | BMCR_FULLDPLX;
447 required = SUPPORTED_1000baseT_Full;
448 break;
449 case SPEED_10000:
450 ctrl1_bits = (BMCR_SPEED1000 | BMCR_SPEED100 |
451 BMCR_FULLDPLX);
452 required = SUPPORTED_10000baseT_Full;
453 break;
454 default:
455 return -EINVAL;
456 }
457 } else {
458 switch (ecmd->speed) {
459 case SPEED_10:
460 ctrl1_bits = 0;
461 required = SUPPORTED_10baseT_Half;
462 break;
463 case SPEED_100:
464 ctrl1_bits = BMCR_SPEED100;
465 required = SUPPORTED_100baseT_Half;
466 break;
467 case SPEED_1000:
468 ctrl1_bits = BMCR_SPEED1000;
469 required = SUPPORTED_1000baseT_Half;
470 break;
471 default:
472 return -EINVAL;
473 }
474 }
475 if (ecmd->autoneg)
476 required |= SUPPORTED_Autoneg;
477 required |= ecmd->advertising;
478 if (required & ~prev.supported)
479 return -EINVAL;
480
481 /* Set the basic control bits */
482 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
483 MDIO_MMDREG_CTRL1);
484 reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | 0x003c);
485 reg |= ctrl1_bits;
486 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL1,
487 reg);
488
489 /* Set the AN registers */
490 if (ecmd->autoneg != prev.autoneg ||
491 ecmd->advertising != prev.advertising) {
492 bool xnp = false;
493
494 if (efx->phy_op->set_xnp_advertise)
495 xnp = efx->phy_op->set_xnp_advertise(efx,
496 ecmd->advertising);
497
498 if (ecmd->autoneg) {
499 reg = 0;
500 if (ecmd->advertising & ADVERTISED_10baseT_Half)
501 reg |= ADVERTISE_10HALF;
502 if (ecmd->advertising & ADVERTISED_10baseT_Full)
503 reg |= ADVERTISE_10FULL;
504 if (ecmd->advertising & ADVERTISED_100baseT_Half)
505 reg |= ADVERTISE_100HALF;
506 if (ecmd->advertising & ADVERTISED_100baseT_Full)
507 reg |= ADVERTISE_100FULL;
508 if (xnp)
509 reg |= ADVERTISE_RESV;
510 mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
511 MDIO_AN_ADVERTISE, reg);
512 }
513
514 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
515 MDIO_MMDREG_CTRL1);
516 if (ecmd->autoneg)
517 reg |= BMCR_ANENABLE | BMCR_ANRESTART;
518 else
519 reg &= ~BMCR_ANENABLE;
520 if (xnp)
521 reg |= 1 << MDIO_AN_CTRL_XNP_LBN;
522 else
523 reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN);
524 mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
525 MDIO_MMDREG_CTRL1, reg);
526 }
527
528 return 0;
529}
530
531void mdio_clause45_set_pause(struct efx_nic *efx)
532{
533 int phy_id = efx->mii.phy_id;
534 int reg;
535
536 if (efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_AN)) {
537 /* Set pause capability advertising */
538 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
539 MDIO_AN_ADVERTISE);
540 reg &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
541 reg |= efx_fc_advertise(efx->wanted_fc);
542 mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
543 MDIO_AN_ADVERTISE, reg);
544
545 /* Restart auto-negotiation */
546 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
547 MDIO_MMDREG_CTRL1);
548 if (reg & BMCR_ANENABLE) {
549 reg |= BMCR_ANRESTART;
550 mdio_clause45_write(efx, phy_id, MDIO_MMD_AN,
551 MDIO_MMDREG_CTRL1, reg);
552 }
553 }
554}
555
556enum efx_fc_type mdio_clause45_get_pause(struct efx_nic *efx)
557{
558 int phy_id = efx->mii.phy_id;
559 int lpa;
560
561 if (!(efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_AN)))
562 return efx->wanted_fc;
563 lpa = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, MDIO_AN_LPA);
564 return efx_fc_resolve(efx->wanted_fc, lpa);
565}
566
567void mdio_clause45_set_flag(struct efx_nic *efx, u8 prt, u8 dev,
568 u16 addr, int bit, bool sense)
569{
570 int old_val = mdio_clause45_read(efx, prt, dev, addr);
571 int new_val;
572
573 if (sense)
574 new_val = old_val | (1 << bit);
575 else
576 new_val = old_val & ~(1 << bit);
577 if (old_val != new_val)
578 mdio_clause45_write(efx, prt, dev, addr, new_val);
360} 579}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 19c42eaf7fb4..09bf801d0569 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -33,6 +33,8 @@
33#define MDIO_MMD_TC (6) 33#define MDIO_MMD_TC (6)
34/* Auto negotiation */ 34/* Auto negotiation */
35#define MDIO_MMD_AN (7) 35#define MDIO_MMD_AN (7)
36/* Clause 22 extension */
37#define MDIO_MMD_C22EXT 29
36 38
37/* Generic register locations */ 39/* Generic register locations */
38#define MDIO_MMDREG_CTRL1 (0) 40#define MDIO_MMDREG_CTRL1 (0)
@@ -54,6 +56,9 @@
54/* Loopback bit for WIS, PCS, PHYSX and DTEXS */ 56/* Loopback bit for WIS, PCS, PHYSX and DTEXS */
55#define MDIO_MMDREG_CTRL1_LBACK_LBN (14) 57#define MDIO_MMDREG_CTRL1_LBACK_LBN (14)
56#define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1) 58#define MDIO_MMDREG_CTRL1_LBACK_WIDTH (1)
59/* Low power */
60#define MDIO_MMDREG_CTRL1_LPOWER_LBN (11)
61#define MDIO_MMDREG_CTRL1_LPOWER_WIDTH (1)
57 62
58/* Bits in MMDREG_STAT1 */ 63/* Bits in MMDREG_STAT1 */
59#define MDIO_MMDREG_STAT1_FAULT_LBN (7) 64#define MDIO_MMDREG_STAT1_FAULT_LBN (7)
@@ -70,14 +75,26 @@
70#define MDIO_ID_MODEL(_id32) ((_id32 >> 4) & 0x3f) 75#define MDIO_ID_MODEL(_id32) ((_id32 >> 4) & 0x3f)
71#define MDIO_ID_OUI(_id32) (_id32 >> 10) 76#define MDIO_ID_OUI(_id32) (_id32 >> 10)
72 77
73/* Bits in MMDREG_DEVS0. Someone thoughtfully layed things out 78/* Bits in MMDREG_DEVS0/1. Someone thoughtfully layed things out
74 * so the 'bit present' bit number of an MMD is the number of 79 * so the 'bit present' bit number of an MMD is the number of
75 * that MMD */ 80 * that MMD */
76#define DEV_PRESENT_BIT(_b) (1 << _b) 81#define DEV_PRESENT_BIT(_b) (1 << _b)
77 82
78#define MDIO_MMDREG_DEVS0_PHYXS DEV_PRESENT_BIT(MDIO_MMD_PHYXS) 83#define MDIO_MMDREG_DEVS_PHYXS DEV_PRESENT_BIT(MDIO_MMD_PHYXS)
79#define MDIO_MMDREG_DEVS0_PCS DEV_PRESENT_BIT(MDIO_MMD_PCS) 84#define MDIO_MMDREG_DEVS_PCS DEV_PRESENT_BIT(MDIO_MMD_PCS)
80#define MDIO_MMDREG_DEVS0_PMAPMD DEV_PRESENT_BIT(MDIO_MMD_PMAPMD) 85#define MDIO_MMDREG_DEVS_PMAPMD DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)
86#define MDIO_MMDREG_DEVS_AN DEV_PRESENT_BIT(MDIO_MMD_AN)
87#define MDIO_MMDREG_DEVS_C22EXT DEV_PRESENT_BIT(MDIO_MMD_C22EXT)
88
89/* Bits in MMDREG_SPEED */
90#define MDIO_MMDREG_SPEED_10G_LBN 0
91#define MDIO_MMDREG_SPEED_10G_WIDTH 1
92#define MDIO_MMDREG_SPEED_1000M_LBN 4
93#define MDIO_MMDREG_SPEED_1000M_WIDTH 1
94#define MDIO_MMDREG_SPEED_100M_LBN 5
95#define MDIO_MMDREG_SPEED_100M_WIDTH 1
96#define MDIO_MMDREG_SPEED_10M_LBN 6
97#define MDIO_MMDREG_SPEED_10M_WIDTH 1
81 98
82/* Bits in MMDREG_STAT2 */ 99/* Bits in MMDREG_STAT2 */
83#define MDIO_MMDREG_STAT2_PRESENT_VAL (2) 100#define MDIO_MMDREG_STAT2_PRESENT_VAL (2)
@@ -111,17 +128,34 @@
111#define MDIO_PMAPMD_CTRL2_10_BT (0xf) 128#define MDIO_PMAPMD_CTRL2_10_BT (0xf)
112#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) 129#define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf)
113 130
131/* PMA 10GBT registers */
132#define MDIO_PMAPMD_10GBT_TXPWR (131)
133#define MDIO_PMAPMD_10GBT_TXPWR_SHORT_LBN (0)
134#define MDIO_PMAPMD_10GBT_TXPWR_SHORT_WIDTH (1)
135
136/* PHY XGXS Status 2 */
137#define MDIO_PHYXS_STATUS2 (8)
138#define MDIO_PHYXS_STATUS2_RX_FAULT_LBN 10
139
114/* PHY XGXS lane state */ 140/* PHY XGXS lane state */
115#define MDIO_PHYXS_LANE_STATE (0x18) 141#define MDIO_PHYXS_LANE_STATE (0x18)
116#define MDIO_PHYXS_LANE_ALIGNED_LBN (12) 142#define MDIO_PHYXS_LANE_ALIGNED_LBN (12)
117 143
118/* AN registers */ 144/* AN registers */
145#define MDIO_AN_CTRL_XNP_LBN 13
119#define MDIO_AN_STATUS (1) 146#define MDIO_AN_STATUS (1)
120#define MDIO_AN_STATUS_XNP_LBN (7) 147#define MDIO_AN_STATUS_XNP_LBN (7)
121#define MDIO_AN_STATUS_PAGE_LBN (6) 148#define MDIO_AN_STATUS_PAGE_LBN (6)
122#define MDIO_AN_STATUS_AN_DONE_LBN (5) 149#define MDIO_AN_STATUS_AN_DONE_LBN (5)
123#define MDIO_AN_STATUS_LP_AN_CAP_LBN (0) 150#define MDIO_AN_STATUS_LP_AN_CAP_LBN (0)
124 151
152#define MDIO_AN_ADVERTISE 16
153#define MDIO_AN_ADVERTISE_XNP_LBN 12
154#define MDIO_AN_LPA 19
155#define MDIO_AN_XNP 22
156#define MDIO_AN_LPA_XNP 25
157
158#define MDIO_AN_10GBT_ADVERTISE 32
125#define MDIO_AN_10GBT_STATUS (33) 159#define MDIO_AN_10GBT_STATUS (33)
126#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */ 160#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
127#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */ 161#define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */
@@ -240,16 +274,37 @@ extern void mdio_clause45_transmit_disable(struct efx_nic *efx);
240/* Generic part of reconfigure: set/clear loopback bits */ 274/* Generic part of reconfigure: set/clear loopback bits */
241extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx); 275extern void mdio_clause45_phy_reconfigure(struct efx_nic *efx);
242 276
277/* Set the power state of the specified MMDs */
278extern void mdio_clause45_set_mmds_lpower(struct efx_nic *efx,
279 int low_power, unsigned int mmd_mask);
280
243/* Read (some of) the PHY settings over MDIO */ 281/* Read (some of) the PHY settings over MDIO */
244extern void mdio_clause45_get_settings(struct efx_nic *efx, 282extern void mdio_clause45_get_settings(struct efx_nic *efx,
245 struct ethtool_cmd *ecmd); 283 struct ethtool_cmd *ecmd);
246 284
285/* Read (some of) the PHY settings over MDIO */
286extern void
287mdio_clause45_get_settings_ext(struct efx_nic *efx, struct ethtool_cmd *ecmd,
288 u32 xnp, u32 xnp_lpa);
289
247/* Set (some of) the PHY settings over MDIO */ 290/* Set (some of) the PHY settings over MDIO */
248extern int mdio_clause45_set_settings(struct efx_nic *efx, 291extern int mdio_clause45_set_settings(struct efx_nic *efx,
249 struct ethtool_cmd *ecmd); 292 struct ethtool_cmd *ecmd);
250 293
294/* Set pause parameters to be advertised through AN (if available) */
295extern void mdio_clause45_set_pause(struct efx_nic *efx);
296
297/* Get pause parameters from AN if available (otherwise return
298 * requested pause parameters)
299 */
300enum efx_fc_type mdio_clause45_get_pause(struct efx_nic *efx);
301
251/* Wait for specified MMDs to exit reset within a timeout */ 302/* Wait for specified MMDs to exit reset within a timeout */
252extern int mdio_clause45_wait_reset_mmds(struct efx_nic *efx, 303extern int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
253 unsigned int mmd_mask); 304 unsigned int mmd_mask);
254 305
306/* Set or clear flag, debouncing */
307extern void mdio_clause45_set_flag(struct efx_nic *efx, u8 prt, u8 dev,
308 u16 addr, int bit, bool sense);
309
255#endif /* EFX_MDIO_10G_H */ 310#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
new file mode 100644
index 000000000000..665cafb88d6a
--- /dev/null
+++ b/drivers/net/sfc/mtd.c
@@ -0,0 +1,268 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/module.h>
12#include <linux/mtd/mtd.h>
13#include <linux/delay.h>
14
15#define EFX_DRIVER_NAME "sfc_mtd"
16#include "net_driver.h"
17#include "spi.h"
18
19#define EFX_SPI_VERIFY_BUF_LEN 16
20
21struct efx_mtd {
22 const struct efx_spi_device *spi;
23 struct mtd_info mtd;
24 char name[IFNAMSIZ + 20];
25};
26
27/* SPI utilities */
28
29static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
30{
31 const struct efx_spi_device *spi = efx_mtd->spi;
32 struct efx_nic *efx = spi->efx;
33 u8 status;
34 int rc, i;
35
36 /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
37 for (i = 0; i < 40; i++) {
38 __set_current_state(uninterruptible ?
39 TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
40 schedule_timeout(HZ / 10);
41 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
42 &status, sizeof(status));
43 if (rc)
44 return rc;
45 if (!(status & SPI_STATUS_NRDY))
46 return 0;
47 if (signal_pending(current))
48 return -EINTR;
49 }
50 EFX_ERR(efx, "timed out waiting for %s\n", efx_mtd->name);
51 return -ETIMEDOUT;
52}
53
54static int efx_spi_unlock(const struct efx_spi_device *spi)
55{
56 const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
57 SPI_STATUS_BP0);
58 u8 status;
59 int rc;
60
61 rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL, &status, sizeof(status));
62 if (rc)
63 return rc;
64
65 if (!(status & unlock_mask))
66 return 0; /* already unlocked */
67
68 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
69 if (rc)
70 return rc;
71 rc = falcon_spi_cmd(spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
72 if (rc)
73 return rc;
74
75 status &= ~unlock_mask;
76 rc = falcon_spi_cmd(spi, SPI_WRSR, -1, &status, NULL, sizeof(status));
77 if (rc)
78 return rc;
79 rc = falcon_spi_wait_write(spi);
80 if (rc)
81 return rc;
82
83 return 0;
84}
85
86static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
87{
88 const struct efx_spi_device *spi = efx_mtd->spi;
89 unsigned pos, block_len;
90 u8 empty[EFX_SPI_VERIFY_BUF_LEN];
91 u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
92 int rc;
93
94 if (len != spi->erase_size)
95 return -EINVAL;
96
97 if (spi->erase_command == 0)
98 return -EOPNOTSUPP;
99
100 rc = efx_spi_unlock(spi);
101 if (rc)
102 return rc;
103 rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
104 if (rc)
105 return rc;
106 rc = falcon_spi_cmd(spi, spi->erase_command, start, NULL, NULL, 0);
107 if (rc)
108 return rc;
109 rc = efx_spi_slow_wait(efx_mtd, false);
110
111 /* Verify the entire region has been wiped */
112 memset(empty, 0xff, sizeof(empty));
113 for (pos = 0; pos < len; pos += block_len) {
114 block_len = min(len - pos, sizeof(buffer));
115 rc = falcon_spi_read(spi, start + pos, block_len, NULL, buffer);
116 if (rc)
117 return rc;
118 if (memcmp(empty, buffer, block_len))
119 return -EIO;
120
121 /* Avoid locking up the system */
122 cond_resched();
123 if (signal_pending(current))
124 return -EINTR;
125 }
126
127 return rc;
128}
129
130/* MTD interface */
131
132static int efx_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
133 size_t *retlen, u8 *buffer)
134{
135 struct efx_mtd *efx_mtd = mtd->priv;
136 const struct efx_spi_device *spi = efx_mtd->spi;
137 struct efx_nic *efx = spi->efx;
138 int rc;
139
140 rc = mutex_lock_interruptible(&efx->spi_lock);
141 if (rc)
142 return rc;
143 rc = falcon_spi_read(spi, FALCON_FLASH_BOOTCODE_START + start,
144 len, retlen, buffer);
145 mutex_unlock(&efx->spi_lock);
146 return rc;
147}
148
149static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
150{
151 struct efx_mtd *efx_mtd = mtd->priv;
152 struct efx_nic *efx = efx_mtd->spi->efx;
153 int rc;
154
155 rc = mutex_lock_interruptible(&efx->spi_lock);
156 if (rc)
157 return rc;
158 rc = efx_spi_erase(efx_mtd, FALCON_FLASH_BOOTCODE_START + erase->addr,
159 erase->len);
160 mutex_unlock(&efx->spi_lock);
161
162 if (rc == 0) {
163 erase->state = MTD_ERASE_DONE;
164 } else {
165 erase->state = MTD_ERASE_FAILED;
166 erase->fail_addr = 0xffffffff;
167 }
168 mtd_erase_callback(erase);
169 return rc;
170}
171
172static int efx_mtd_write(struct mtd_info *mtd, loff_t start,
173 size_t len, size_t *retlen, const u8 *buffer)
174{
175 struct efx_mtd *efx_mtd = mtd->priv;
176 const struct efx_spi_device *spi = efx_mtd->spi;
177 struct efx_nic *efx = spi->efx;
178 int rc;
179
180 rc = mutex_lock_interruptible(&efx->spi_lock);
181 if (rc)
182 return rc;
183 rc = falcon_spi_write(spi, FALCON_FLASH_BOOTCODE_START + start,
184 len, retlen, buffer);
185 mutex_unlock(&efx->spi_lock);
186 return rc;
187}
188
189static void efx_mtd_sync(struct mtd_info *mtd)
190{
191 struct efx_mtd *efx_mtd = mtd->priv;
192 struct efx_nic *efx = efx_mtd->spi->efx;
193 int rc;
194
195 mutex_lock(&efx->spi_lock);
196 rc = efx_spi_slow_wait(efx_mtd, true);
197 mutex_unlock(&efx->spi_lock);
198
199 if (rc)
200 EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc);
201 return;
202}
203
204void efx_mtd_remove(struct efx_nic *efx)
205{
206 if (efx->spi_flash && efx->spi_flash->mtd) {
207 struct efx_mtd *efx_mtd = efx->spi_flash->mtd;
208 int rc;
209
210 for (;;) {
211 rc = del_mtd_device(&efx_mtd->mtd);
212 if (rc != -EBUSY)
213 break;
214 ssleep(1);
215 }
216 WARN_ON(rc);
217 kfree(efx_mtd);
218 }
219}
220
221void efx_mtd_rename(struct efx_nic *efx)
222{
223 if (efx->spi_flash && efx->spi_flash->mtd) {
224 struct efx_mtd *efx_mtd = efx->spi_flash->mtd;
225 snprintf(efx_mtd->name, sizeof(efx_mtd->name),
226 "%s sfc_flash_bootrom", efx->name);
227 }
228}
229
230int efx_mtd_probe(struct efx_nic *efx)
231{
232 struct efx_spi_device *spi = efx->spi_flash;
233 struct efx_mtd *efx_mtd;
234
235 if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START)
236 return -ENODEV;
237
238 efx_mtd = kzalloc(sizeof(*efx_mtd), GFP_KERNEL);
239 if (!efx_mtd)
240 return -ENOMEM;
241
242 efx_mtd->spi = spi;
243 spi->mtd = efx_mtd;
244
245 efx_mtd->mtd.type = MTD_NORFLASH;
246 efx_mtd->mtd.flags = MTD_CAP_NORFLASH;
247 efx_mtd->mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
248 efx_mtd->mtd.erasesize = spi->erase_size;
249 efx_mtd->mtd.writesize = 1;
250 efx_mtd_rename(efx);
251
252 efx_mtd->mtd.owner = THIS_MODULE;
253 efx_mtd->mtd.priv = efx_mtd;
254 efx_mtd->mtd.name = efx_mtd->name;
255 efx_mtd->mtd.erase = efx_mtd_erase;
256 efx_mtd->mtd.read = efx_mtd_read;
257 efx_mtd->mtd.write = efx_mtd_write;
258 efx_mtd->mtd.sync = efx_mtd_sync;
259
260 if (add_mtd_device(&efx_mtd->mtd)) {
261 kfree(efx_mtd);
262 spi->mtd = NULL;
263 /* add_mtd_device() returns 1 if the MTD table is full */
264 return -ENOMEM;
265 }
266
267 return 0;
268}
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index cdb11fad6050..5f255f75754e 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -42,7 +42,7 @@
42#ifndef EFX_DRIVER_NAME 42#ifndef EFX_DRIVER_NAME
43#define EFX_DRIVER_NAME "sfc" 43#define EFX_DRIVER_NAME "sfc"
44#endif 44#endif
45#define EFX_DRIVER_VERSION "2.2" 45#define EFX_DRIVER_VERSION "2.3"
46 46
47#ifdef EFX_ENABLE_DEBUG 47#ifdef EFX_ENABLE_DEBUG
48#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 48#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -327,6 +327,7 @@ enum efx_rx_alloc_method {
327 * 327 *
328 * @efx: Associated Efx NIC 328 * @efx: Associated Efx NIC
329 * @channel: Channel instance number 329 * @channel: Channel instance number
330 * @name: Name for channel and IRQ
330 * @used_flags: Channel is used by net driver 331 * @used_flags: Channel is used by net driver
331 * @enabled: Channel enabled indicator 332 * @enabled: Channel enabled indicator
332 * @irq: IRQ number (MSI and MSI-X only) 333 * @irq: IRQ number (MSI and MSI-X only)
@@ -357,6 +358,7 @@ enum efx_rx_alloc_method {
357struct efx_channel { 358struct efx_channel {
358 struct efx_nic *efx; 359 struct efx_nic *efx;
359 int channel; 360 int channel;
361 char name[IFNAMSIZ + 6];
360 int used_flags; 362 int used_flags;
361 bool enabled; 363 bool enabled;
362 int irq; 364 int irq;
@@ -414,6 +416,7 @@ struct efx_blinker {
414 * @init_leds: Sets up board LEDs 416 * @init_leds: Sets up board LEDs
415 * @set_fault_led: Turns the fault LED on or off 417 * @set_fault_led: Turns the fault LED on or off
416 * @blink: Starts/stops blinking 418 * @blink: Starts/stops blinking
419 * @monitor: Board-specific health check function
417 * @fini: Cleanup function 420 * @fini: Cleanup function
418 * @blinker: used to blink LEDs in software 421 * @blinker: used to blink LEDs in software
419 * @hwmon_client: I2C client for hardware monitor 422 * @hwmon_client: I2C client for hardware monitor
@@ -428,6 +431,7 @@ struct efx_board {
428 * have a separate init callback that happens later than 431 * have a separate init callback that happens later than
429 * board init. */ 432 * board init. */
430 int (*init_leds)(struct efx_nic *efx); 433 int (*init_leds)(struct efx_nic *efx);
434 int (*monitor) (struct efx_nic *nic);
431 void (*set_fault_led) (struct efx_nic *efx, bool state); 435 void (*set_fault_led) (struct efx_nic *efx, bool state);
432 void (*blink) (struct efx_nic *efx, bool start); 436 void (*blink) (struct efx_nic *efx, bool start);
433 void (*fini) (struct efx_nic *nic); 437 void (*fini) (struct efx_nic *nic);
@@ -449,16 +453,20 @@ enum efx_int_mode {
449 453
450enum phy_type { 454enum phy_type {
451 PHY_TYPE_NONE = 0, 455 PHY_TYPE_NONE = 0,
452 PHY_TYPE_CX4_RTMR = 1, 456 PHY_TYPE_TXC43128 = 1,
453 PHY_TYPE_1G_ALASKA = 2, 457 PHY_TYPE_88E1111 = 2,
454 PHY_TYPE_10XPRESS = 3, 458 PHY_TYPE_SFX7101 = 3,
455 PHY_TYPE_XFP = 4, 459 PHY_TYPE_QT2022C2 = 4,
456 PHY_TYPE_PM8358 = 6, 460 PHY_TYPE_PM8358 = 6,
461 PHY_TYPE_SFT9001A = 8,
462 PHY_TYPE_SFT9001B = 10,
457 PHY_TYPE_MAX /* Insert any new items before this */ 463 PHY_TYPE_MAX /* Insert any new items before this */
458}; 464};
459 465
460#define PHY_ADDR_INVALID 0xff 466#define PHY_ADDR_INVALID 0xff
461 467
468#define EFX_IS10G(efx) ((efx)->link_speed == 10000)
469
462enum nic_state { 470enum nic_state {
463 STATE_INIT = 0, 471 STATE_INIT = 0,
464 STATE_RUNNING = 1, 472 STATE_RUNNING = 1,
@@ -499,6 +507,55 @@ enum efx_fc_type {
499 EFX_FC_AUTO = 4, 507 EFX_FC_AUTO = 4,
500}; 508};
501 509
510/* Supported MAC bit-mask */
511enum efx_mac_type {
512 EFX_GMAC = 1,
513 EFX_XMAC = 2,
514};
515
516static inline unsigned int efx_fc_advertise(enum efx_fc_type wanted_fc)
517{
518 unsigned int adv = 0;
519 if (wanted_fc & EFX_FC_RX)
520 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
521 if (wanted_fc & EFX_FC_TX)
522 adv ^= ADVERTISE_PAUSE_ASYM;
523 return adv;
524}
525
526static inline enum efx_fc_type efx_fc_resolve(enum efx_fc_type wanted_fc,
527 unsigned int lpa)
528{
529 unsigned int adv = efx_fc_advertise(wanted_fc);
530
531 if (!(wanted_fc & EFX_FC_AUTO))
532 return wanted_fc;
533
534 if (adv & lpa & ADVERTISE_PAUSE_CAP)
535 return EFX_FC_RX | EFX_FC_TX;
536 if (adv & lpa & ADVERTISE_PAUSE_ASYM) {
537 if (adv & ADVERTISE_PAUSE_CAP)
538 return EFX_FC_RX;
539 if (lpa & ADVERTISE_PAUSE_CAP)
540 return EFX_FC_TX;
541 }
542 return 0;
543}
544
545/**
546 * struct efx_mac_operations - Efx MAC operations table
547 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock
548 * @update_stats: Update statistics
549 * @irq: Hardware MAC event callback. Serialised by the mac_lock
550 * @poll: Poll for hardware state. Serialised by the mac_lock
551 */
552struct efx_mac_operations {
553 void (*reconfigure) (struct efx_nic *efx);
554 void (*update_stats) (struct efx_nic *efx);
555 void (*irq) (struct efx_nic *efx);
556 void (*poll) (struct efx_nic *efx);
557};
558
502/** 559/**
503 * struct efx_phy_operations - Efx PHY operations table 560 * struct efx_phy_operations - Efx PHY operations table
504 * @init: Initialise PHY 561 * @init: Initialise PHY
@@ -506,17 +563,33 @@ enum efx_fc_type {
506 * @reconfigure: Reconfigure PHY (e.g. for new link parameters) 563 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
507 * @clear_interrupt: Clear down interrupt 564 * @clear_interrupt: Clear down interrupt
508 * @blink: Blink LEDs 565 * @blink: Blink LEDs
509 * @check_hw: Check hardware 566 * @poll: Poll for hardware state. Serialised by the mac_lock.
567 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
568 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
569 * @set_xnp_advertise: Set abilities advertised in Extended Next Page
570 * (only needed where AN bit is set in mmds)
571 * @num_tests: Number of PHY-specific tests/results
572 * @test_names: Names of the tests/results
573 * @run_tests: Run tests and record results as appropriate.
574 * Flags are the ethtool tests flags.
510 * @mmds: MMD presence mask 575 * @mmds: MMD presence mask
511 * @loopbacks: Supported loopback modes mask 576 * @loopbacks: Supported loopback modes mask
512 */ 577 */
513struct efx_phy_operations { 578struct efx_phy_operations {
579 enum efx_mac_type macs;
514 int (*init) (struct efx_nic *efx); 580 int (*init) (struct efx_nic *efx);
515 void (*fini) (struct efx_nic *efx); 581 void (*fini) (struct efx_nic *efx);
516 void (*reconfigure) (struct efx_nic *efx); 582 void (*reconfigure) (struct efx_nic *efx);
517 void (*clear_interrupt) (struct efx_nic *efx); 583 void (*clear_interrupt) (struct efx_nic *efx);
518 int (*check_hw) (struct efx_nic *efx); 584 void (*poll) (struct efx_nic *efx);
519 int (*test) (struct efx_nic *efx); 585 void (*get_settings) (struct efx_nic *efx,
586 struct ethtool_cmd *ecmd);
587 int (*set_settings) (struct efx_nic *efx,
588 struct ethtool_cmd *ecmd);
589 bool (*set_xnp_advertise) (struct efx_nic *efx, u32);
590 u32 num_tests;
591 const char *const *test_names;
592 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
520 int mmds; 593 int mmds;
521 unsigned loopbacks; 594 unsigned loopbacks;
522}; 595};
@@ -525,11 +598,15 @@ struct efx_phy_operations {
525 * @enum efx_phy_mode - PHY operating mode flags 598 * @enum efx_phy_mode - PHY operating mode flags
526 * @PHY_MODE_NORMAL: on and should pass traffic 599 * @PHY_MODE_NORMAL: on and should pass traffic
527 * @PHY_MODE_TX_DISABLED: on with TX disabled 600 * @PHY_MODE_TX_DISABLED: on with TX disabled
601 * @PHY_MODE_LOW_POWER: set to low power through MDIO
602 * @PHY_MODE_OFF: switched off through external control
528 * @PHY_MODE_SPECIAL: on but will not pass traffic 603 * @PHY_MODE_SPECIAL: on but will not pass traffic
529 */ 604 */
530enum efx_phy_mode { 605enum efx_phy_mode {
531 PHY_MODE_NORMAL = 0, 606 PHY_MODE_NORMAL = 0,
532 PHY_MODE_TX_DISABLED = 1, 607 PHY_MODE_TX_DISABLED = 1,
608 PHY_MODE_LOW_POWER = 2,
609 PHY_MODE_OFF = 4,
533 PHY_MODE_SPECIAL = 8, 610 PHY_MODE_SPECIAL = 8,
534}; 611};
535 612
@@ -629,7 +706,7 @@ union efx_multicast_hash {
629 * @legacy_irq: IRQ number 706 * @legacy_irq: IRQ number
630 * @workqueue: Workqueue for port reconfigures and the HW monitor. 707 * @workqueue: Workqueue for port reconfigures and the HW monitor.
631 * Work items do not hold and must not acquire RTNL. 708 * Work items do not hold and must not acquire RTNL.
632 * @reset_workqueue: Workqueue for resets. Work item will acquire RTNL. 709 * @workqueue_name: Name of workqueue
633 * @reset_work: Scheduled reset workitem 710 * @reset_work: Scheduled reset workitem
634 * @monitor_work: Hardware monitor workitem 711 * @monitor_work: Hardware monitor workitem
635 * @membase_phys: Memory BAR value as physical address 712 * @membase_phys: Memory BAR value as physical address
@@ -644,6 +721,7 @@ union efx_multicast_hash {
644 * @rx_queue: RX DMA queues 721 * @rx_queue: RX DMA queues
645 * @channel: Channels 722 * @channel: Channels
646 * @n_rx_queues: Number of RX queues 723 * @n_rx_queues: Number of RX queues
724 * @n_channels: Number of channels in use
647 * @rx_buffer_len: RX buffer length 725 * @rx_buffer_len: RX buffer length
648 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 726 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
649 * @irq_status: Interrupt status buffer 727 * @irq_status: Interrupt status buffer
@@ -655,15 +733,16 @@ union efx_multicast_hash {
655 * This field will be %NULL if no flash device is present. 733 * This field will be %NULL if no flash device is present.
656 * @spi_eeprom: SPI EEPROM device 734 * @spi_eeprom: SPI EEPROM device
657 * This field will be %NULL if no EEPROM device is present. 735 * This field will be %NULL if no EEPROM device is present.
736 * @spi_lock: SPI bus lock
658 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 737 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
659 * @nic_data: Hardware dependant state 738 * @nic_data: Hardware dependant state
660 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 739 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
661 * @port_inhibited, efx_monitor() and efx_reconfigure_port() 740 * @port_inhibited, efx_monitor() and efx_reconfigure_port()
662 * @port_enabled: Port enabled indicator. 741 * @port_enabled: Port enabled indicator.
663 * Serialises efx_stop_all(), efx_start_all() and efx_monitor() and 742 * Serialises efx_stop_all(), efx_start_all(), efx_monitor(),
664 * efx_reconfigure_work with kernel interfaces. Safe to read under any 743 * efx_phy_work(), and efx_mac_work() with kernel interfaces. Safe to read
665 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must 744 * under any one of the rtnl_lock, mac_lock, or netif_tx_lock, but all
666 * be held to modify it. 745 * three must be held to modify it.
667 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock 746 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
668 * @port_initialized: Port initialized? 747 * @port_initialized: Port initialized?
669 * @net_dev: Operating system network device. Consider holding the rtnl lock 748 * @net_dev: Operating system network device. Consider holding the rtnl lock
@@ -677,6 +756,7 @@ union efx_multicast_hash {
677 * @stats_lock: Statistics update lock. Serialises statistics fetches 756 * @stats_lock: Statistics update lock. Serialises statistics fetches
678 * @stats_enabled: Temporarily disable statistics fetches. 757 * @stats_enabled: Temporarily disable statistics fetches.
679 * Serialised by @stats_lock 758 * Serialised by @stats_lock
759 * @mac_op: MAC interface
680 * @mac_address: Permanent MAC address 760 * @mac_address: Permanent MAC address
681 * @phy_type: PHY type 761 * @phy_type: PHY type
682 * @phy_lock: PHY access lock 762 * @phy_lock: PHY access lock
@@ -684,13 +764,17 @@ union efx_multicast_hash {
684 * @phy_data: PHY private data (including PHY-specific stats) 764 * @phy_data: PHY private data (including PHY-specific stats)
685 * @mii: PHY interface 765 * @mii: PHY interface
686 * @phy_mode: PHY operating mode. Serialised by @mac_lock. 766 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
767 * @mac_up: MAC link state
687 * @link_up: Link status 768 * @link_up: Link status
688 * @link_options: Link options (MII/GMII format) 769 * @link_fd: Link is full duplex
770 * @link_fc: Actualy flow control flags
771 * @link_speed: Link speed (Mbps)
689 * @n_link_state_changes: Number of times the link has changed state 772 * @n_link_state_changes: Number of times the link has changed state
690 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. 773 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
691 * @multicast_hash: Multicast hash table 774 * @multicast_hash: Multicast hash table
692 * @flow_control: Flow control flags - separate RX/TX so can't use link_options 775 * @wanted_fc: Wanted flow control flags
693 * @reconfigure_work: work item for dealing with PHY events 776 * @phy_work: work item for dealing with PHY events
777 * @mac_work: work item for dealing with MAC events
694 * @loopback_mode: Loopback status 778 * @loopback_mode: Loopback status
695 * @loopback_modes: Supported loopback mode bitmask 779 * @loopback_modes: Supported loopback mode bitmask
696 * @loopback_selftest: Offline self-test private state 780 * @loopback_selftest: Offline self-test private state
@@ -704,7 +788,7 @@ struct efx_nic {
704 const struct efx_nic_type *type; 788 const struct efx_nic_type *type;
705 int legacy_irq; 789 int legacy_irq;
706 struct workqueue_struct *workqueue; 790 struct workqueue_struct *workqueue;
707 struct workqueue_struct *reset_workqueue; 791 char workqueue_name[16];
708 struct work_struct reset_work; 792 struct work_struct reset_work;
709 struct delayed_work monitor_work; 793 struct delayed_work monitor_work;
710 resource_size_t membase_phys; 794 resource_size_t membase_phys;
@@ -723,6 +807,7 @@ struct efx_nic {
723 struct efx_channel channel[EFX_MAX_CHANNELS]; 807 struct efx_channel channel[EFX_MAX_CHANNELS];
724 808
725 int n_rx_queues; 809 int n_rx_queues;
810 int n_channels;
726 unsigned int rx_buffer_len; 811 unsigned int rx_buffer_len;
727 unsigned int rx_buffer_order; 812 unsigned int rx_buffer_order;
728 813
@@ -731,12 +816,14 @@ struct efx_nic {
731 816
732 struct efx_spi_device *spi_flash; 817 struct efx_spi_device *spi_flash;
733 struct efx_spi_device *spi_eeprom; 818 struct efx_spi_device *spi_eeprom;
819 struct mutex spi_lock;
734 820
735 unsigned n_rx_nodesc_drop_cnt; 821 unsigned n_rx_nodesc_drop_cnt;
736 822
737 struct falcon_nic_data *nic_data; 823 struct falcon_nic_data *nic_data;
738 824
739 struct mutex mac_lock; 825 struct mutex mac_lock;
826 struct work_struct mac_work;
740 bool port_enabled; 827 bool port_enabled;
741 bool port_inhibited; 828 bool port_inhibited;
742 829
@@ -752,23 +839,27 @@ struct efx_nic {
752 spinlock_t stats_lock; 839 spinlock_t stats_lock;
753 bool stats_enabled; 840 bool stats_enabled;
754 841
842 struct efx_mac_operations *mac_op;
755 unsigned char mac_address[ETH_ALEN]; 843 unsigned char mac_address[ETH_ALEN];
756 844
757 enum phy_type phy_type; 845 enum phy_type phy_type;
758 spinlock_t phy_lock; 846 spinlock_t phy_lock;
847 struct work_struct phy_work;
759 struct efx_phy_operations *phy_op; 848 struct efx_phy_operations *phy_op;
760 void *phy_data; 849 void *phy_data;
761 struct mii_if_info mii; 850 struct mii_if_info mii;
762 enum efx_phy_mode phy_mode; 851 enum efx_phy_mode phy_mode;
763 852
853 bool mac_up;
764 bool link_up; 854 bool link_up;
765 unsigned int link_options; 855 bool link_fd;
856 enum efx_fc_type link_fc;
857 unsigned int link_speed;
766 unsigned int n_link_state_changes; 858 unsigned int n_link_state_changes;
767 859
768 bool promiscuous; 860 bool promiscuous;
769 union efx_multicast_hash multicast_hash; 861 union efx_multicast_hash multicast_hash;
770 enum efx_fc_type flow_control; 862 enum efx_fc_type wanted_fc;
771 struct work_struct reconfigure_work;
772 863
773 atomic_t rx_reset; 864 atomic_t rx_reset;
774 enum efx_loopback_mode loopback_mode; 865 enum efx_loopback_mode loopback_mode;
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index f746536f4ffa..58c493ef81bb 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc. 3 * Copyright 2007-2008 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -11,9 +11,10 @@
11#define EFX_PHY_H 11#define EFX_PHY_H
12 12
13/**************************************************************************** 13/****************************************************************************
14 * 10Xpress (SFX7101) PHY 14 * 10Xpress (SFX7101 and SFT9001) PHYs
15 */ 15 */
16extern struct efx_phy_operations falcon_tenxpress_phy_ops; 16extern struct efx_phy_operations falcon_sfx7101_phy_ops;
17extern struct efx_phy_operations falcon_sft9001_phy_ops;
17 18
18extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink); 19extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
19extern void tenxpress_crc_err(struct efx_nic *efx); 20extern void tenxpress_crc_err(struct efx_nic *efx);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 0f805da4ce55..b8ba4bbad889 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -752,7 +752,7 @@ void __efx_rx_packet(struct efx_channel *channel,
752 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 752 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
753 753
754done: 754done:
755 efx->net_dev->last_rx = jiffies; 755 ;
756} 756}
757 757
758void efx_rx_strategy(struct efx_channel *channel) 758void efx_rx_strategy(struct efx_channel *channel)
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 362956e3fe17..dba0d64d50cd 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -26,7 +26,6 @@
26#include "selftest.h" 26#include "selftest.h"
27#include "boards.h" 27#include "boards.h"
28#include "workarounds.h" 28#include "workarounds.h"
29#include "mac.h"
30#include "spi.h" 29#include "spi.h"
31#include "falcon_io.h" 30#include "falcon_io.h"
32#include "mdio_10g.h" 31#include "mdio_10g.h"
@@ -105,9 +104,11 @@ static int efx_test_mii(struct efx_nic *efx, struct efx_self_tests *tests)
105 goto out; 104 goto out;
106 } 105 }
107 106
108 rc = mdio_clause45_check_mmds(efx, efx->phy_op->mmds, 0); 107 if (EFX_IS10G(efx)) {
109 if (rc) 108 rc = mdio_clause45_check_mmds(efx, efx->phy_op->mmds, 0);
110 goto out; 109 if (rc)
110 goto out;
111 }
111 112
112out: 113out:
113 mutex_unlock(&efx->mac_lock); 114 mutex_unlock(&efx->mac_lock);
@@ -246,17 +247,20 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
246 return 0; 247 return 0;
247} 248}
248 249
249static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests) 250static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
251 unsigned flags)
250{ 252{
251 int rc; 253 int rc;
252 254
253 if (!efx->phy_op->test) 255 if (!efx->phy_op->run_tests)
254 return 0; 256 return 0;
255 257
258 EFX_BUG_ON_PARANOID(efx->phy_op->num_tests == 0 ||
259 efx->phy_op->num_tests > EFX_MAX_PHY_TESTS);
260
256 mutex_lock(&efx->mac_lock); 261 mutex_lock(&efx->mac_lock);
257 rc = efx->phy_op->test(efx); 262 rc = efx->phy_op->run_tests(efx, tests->phy, flags);
258 mutex_unlock(&efx->mac_lock); 263 mutex_unlock(&efx->mac_lock);
259 tests->phy = rc ? -1 : 1;
260 return rc; 264 return rc;
261} 265}
262 266
@@ -563,8 +567,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
563 return 0; 567 return 0;
564} 568}
565 569
566static int efx_test_loopbacks(struct efx_nic *efx, struct ethtool_cmd ecmd, 570static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
567 struct efx_self_tests *tests,
568 unsigned int loopback_modes) 571 unsigned int loopback_modes)
569{ 572{
570 enum efx_loopback_mode mode; 573 enum efx_loopback_mode mode;
@@ -593,12 +596,14 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct ethtool_cmd ecmd,
593 efx->loopback_mode = mode; 596 efx->loopback_mode = mode;
594 efx_reconfigure_port(efx); 597 efx_reconfigure_port(efx);
595 598
596 /* Wait for the PHY to signal the link is up */ 599 /* Wait for the PHY to signal the link is up. Interrupts
600 * are enabled for PHY's using LASI, otherwise we poll()
601 * quickly */
597 count = 0; 602 count = 0;
598 do { 603 do {
599 struct efx_channel *channel = &efx->channel[0]; 604 struct efx_channel *channel = &efx->channel[0];
600 605
601 falcon_check_xmac(efx); 606 efx->phy_op->poll(efx);
602 schedule_timeout_uninterruptible(HZ / 10); 607 schedule_timeout_uninterruptible(HZ / 10);
603 if (channel->work_pending) 608 if (channel->work_pending)
604 efx_process_channel_now(channel); 609 efx_process_channel_now(channel);
@@ -606,13 +611,12 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct ethtool_cmd ecmd,
606 flush_workqueue(efx->workqueue); 611 flush_workqueue(efx->workqueue);
607 rmb(); 612 rmb();
608 613
609 /* efx->link_up can be 1 even if the XAUI link is down, 614 /* We need both the phy and xaui links to be ok.
610 * (bug5762). Usually, it's not worth bothering with the 615 * rather than relying on the falcon_xmac irq/poll
611 * difference, but for selftests, we need that extra 616 * regime, just poll xaui directly */
612 * guarantee that the link is really, really, up.
613 */
614 link_up = efx->link_up; 617 link_up = efx->link_up;
615 if (!falcon_xaui_link_ok(efx)) 618 if (link_up && EFX_IS10G(efx) &&
619 !falcon_xaui_link_ok(efx))
616 link_up = false; 620 link_up = false;
617 621
618 } while ((++count < 20) && !link_up); 622 } while ((++count < 20) && !link_up);
@@ -652,47 +656,48 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct ethtool_cmd ecmd,
652 656
653/************************************************************************** 657/**************************************************************************
654 * 658 *
655 * Entry points 659 * Entry point
656 * 660 *
657 *************************************************************************/ 661 *************************************************************************/
658 662
659/* Online (i.e. non-disruptive) testing 663int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
660 * This checks interrupt generation, event delivery and PHY presence. */ 664 unsigned flags)
661int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
662{ 665{
666 enum efx_loopback_mode loopback_mode = efx->loopback_mode;
667 int phy_mode = efx->phy_mode;
668 struct ethtool_cmd ecmd;
663 struct efx_channel *channel; 669 struct efx_channel *channel;
664 int rc, rc2 = 0; 670 int rc_test = 0, rc_reset = 0, rc;
671
672 /* Online (i.e. non-disruptive) testing
673 * This checks interrupt generation, event delivery and PHY presence. */
665 674
666 rc = efx_test_mii(efx, tests); 675 rc = efx_test_mii(efx, tests);
667 if (rc && !rc2) 676 if (rc && !rc_test)
668 rc2 = rc; 677 rc_test = rc;
669 678
670 rc = efx_test_nvram(efx, tests); 679 rc = efx_test_nvram(efx, tests);
671 if (rc && !rc2) 680 if (rc && !rc_test)
672 rc2 = rc; 681 rc_test = rc;
673 682
674 rc = efx_test_interrupts(efx, tests); 683 rc = efx_test_interrupts(efx, tests);
675 if (rc && !rc2) 684 if (rc && !rc_test)
676 rc2 = rc; 685 rc_test = rc;
677 686
678 efx_for_each_channel(channel, efx) { 687 efx_for_each_channel(channel, efx) {
679 rc = efx_test_eventq_irq(channel, tests); 688 rc = efx_test_eventq_irq(channel, tests);
680 if (rc && !rc2) 689 if (rc && !rc_test)
681 rc2 = rc; 690 rc_test = rc;
682 } 691 }
683 692
684 return rc2; 693 if (rc_test)
685} 694 return rc_test;
686 695
687/* Offline (i.e. disruptive) testing 696 if (!(flags & ETH_TEST_FL_OFFLINE))
688 * This checks MAC and PHY loopback on the specified port. */ 697 return efx_test_phy(efx, tests, flags);
689int efx_offline_test(struct efx_nic *efx, 698
690 struct efx_self_tests *tests, unsigned int loopback_modes) 699 /* Offline (i.e. disruptive) testing
691{ 700 * This checks MAC and PHY loopback on the specified port. */
692 enum efx_loopback_mode loopback_mode = efx->loopback_mode;
693 int phy_mode = efx->phy_mode;
694 struct ethtool_cmd ecmd, ecmd_test;
695 int rc, rc2 = 0;
696 701
697 /* force the carrier state off so the kernel doesn't transmit during 702 /* force the carrier state off so the kernel doesn't transmit during
698 * the loopback test, and the watchdog timeout doesn't fire. Also put 703 * the loopback test, and the watchdog timeout doesn't fire. Also put
@@ -700,8 +705,15 @@ int efx_offline_test(struct efx_nic *efx,
700 */ 705 */
701 mutex_lock(&efx->mac_lock); 706 mutex_lock(&efx->mac_lock);
702 efx->port_inhibited = true; 707 efx->port_inhibited = true;
703 if (efx->loopback_modes) 708 if (efx->loopback_modes) {
704 efx->loopback_mode = __ffs(efx->loopback_modes); 709 /* We need the 312 clock from the PHY to test the XMAC
710 * registers, so move into XGMII loopback if available */
711 if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
712 efx->loopback_mode = LOOPBACK_XGMII;
713 else
714 efx->loopback_mode = __ffs(efx->loopback_modes);
715 }
716
705 __efx_reconfigure_port(efx); 717 __efx_reconfigure_port(efx);
706 mutex_unlock(&efx->mac_lock); 718 mutex_unlock(&efx->mac_lock);
707 719
@@ -709,39 +721,34 @@ int efx_offline_test(struct efx_nic *efx,
709 efx_reset_down(efx, &ecmd); 721 efx_reset_down(efx, &ecmd);
710 722
711 rc = efx_test_chip(efx, tests); 723 rc = efx_test_chip(efx, tests);
712 if (rc && !rc2) 724 if (rc && !rc_test)
713 rc2 = rc; 725 rc_test = rc;
714 726
715 /* reset the chip to recover from the register test */ 727 /* reset the chip to recover from the register test */
716 rc = falcon_reset_hw(efx, RESET_TYPE_ALL); 728 rc_reset = falcon_reset_hw(efx, RESET_TYPE_ALL);
717 729
718 /* Modify the saved ecmd so that when efx_reset_up() restores the phy 730 /* Ensure that the phy is powered and out of loopback
719 * state, AN is disabled, and the phy is powered, and out of loopback */ 731 * for the bist and loopback tests */
720 memcpy(&ecmd_test, &ecmd, sizeof(ecmd_test)); 732 efx->phy_mode &= ~PHY_MODE_LOW_POWER;
721 if (ecmd_test.autoneg == AUTONEG_ENABLE) {
722 ecmd_test.autoneg = AUTONEG_DISABLE;
723 ecmd_test.duplex = DUPLEX_FULL;
724 ecmd_test.speed = SPEED_10000;
725 }
726 efx->loopback_mode = LOOPBACK_NONE; 733 efx->loopback_mode = LOOPBACK_NONE;
727 734
728 rc = efx_reset_up(efx, &ecmd_test, rc == 0); 735 rc = efx_reset_up(efx, &ecmd, rc_reset == 0);
729 if (rc) { 736 if (rc && !rc_reset)
737 rc_reset = rc;
738
739 if (rc_reset) {
730 EFX_ERR(efx, "Unable to recover from chip test\n"); 740 EFX_ERR(efx, "Unable to recover from chip test\n");
731 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 741 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
732 return rc; 742 return rc_reset;
733 } 743 }
734 744
735 tests->loopback_speed = ecmd_test.speed; 745 rc = efx_test_phy(efx, tests, flags);
736 tests->loopback_full_duplex = ecmd_test.duplex; 746 if (rc && !rc_test)
737 747 rc_test = rc;
738 rc = efx_test_phy(efx, tests);
739 if (rc && !rc2)
740 rc2 = rc;
741 748
742 rc = efx_test_loopbacks(efx, ecmd_test, tests, loopback_modes); 749 rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
743 if (rc && !rc2) 750 if (rc && !rc_test)
744 rc2 = rc; 751 rc_test = rc;
745 752
746 /* restore the PHY to the previous state */ 753 /* restore the PHY to the previous state */
747 efx->loopback_mode = loopback_mode; 754 efx->loopback_mode = loopback_mode;
@@ -749,6 +756,6 @@ int efx_offline_test(struct efx_nic *efx,
749 efx->port_inhibited = false; 756 efx->port_inhibited = false;
750 efx_ethtool_set_settings(efx->net_dev, &ecmd); 757 efx_ethtool_set_settings(efx->net_dev, &ecmd);
751 758
752 return rc2; 759 return rc_test;
753} 760}
754 761
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index fc15df15d766..39451cf938cf 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -24,6 +24,8 @@ struct efx_loopback_self_tests {
24 int rx_bad; 24 int rx_bad;
25}; 25};
26 26
27#define EFX_MAX_PHY_TESTS 20
28
27/* Efx self test results 29/* Efx self test results
28 * For fields which are not counters, 1 indicates success and -1 30 * For fields which are not counters, 1 indicates success and -1
29 * indicates failure. 31 * indicates failure.
@@ -38,18 +40,14 @@ struct efx_self_tests {
38 int eventq_poll[EFX_MAX_CHANNELS]; 40 int eventq_poll[EFX_MAX_CHANNELS];
39 /* offline tests */ 41 /* offline tests */
40 int registers; 42 int registers;
41 int phy; 43 int phy[EFX_MAX_PHY_TESTS];
42 int loopback_speed;
43 int loopback_full_duplex;
44 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1]; 44 struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
45}; 45};
46 46
47extern void efx_loopback_rx_packet(struct efx_nic *efx, 47extern void efx_loopback_rx_packet(struct efx_nic *efx,
48 const char *buf_ptr, int pkt_len); 48 const char *buf_ptr, int pkt_len);
49extern int efx_online_test(struct efx_nic *efx, 49extern int efx_selftest(struct efx_nic *efx,
50 struct efx_self_tests *tests); 50 struct efx_self_tests *tests,
51extern int efx_offline_test(struct efx_nic *efx, 51 unsigned flags);
52 struct efx_self_tests *tests,
53 unsigned int loopback_modes);
54 52
55#endif /* EFX_SELFTEST_H */ 53#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index fe4e3fd22330..16b80acb9992 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc. 3 * Copyright 2007-2008 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -8,10 +8,21 @@
8 */ 8 */
9 9
10/***************************************************************************** 10/*****************************************************************************
11 * Support for the SFE4001 NIC: driver code for the PCA9539 I/O expander that 11 * Support for the SFE4001 and SFN4111T NICs.
12 * controls the PHY power rails, and for the MAX6647 temp. sensor used to check 12 *
13 * the PHY 13 * The SFE4001 does not power-up fully at reset due to its high power
14 * consumption. We control its power via a PCA9539 I/O expander.
15 * Both boards have a MAX6647 temperature monitor which we expose to
16 * the lm90 driver.
17 *
18 * This also provides minimal support for reflashing the PHY, which is
19 * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
20 * On SFE4001 rev A2 and later this is connected to the 3V3X output of
21 * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3.
22 * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
23 * exclusive with the network device being open.
14 */ 24 */
25
15#include <linux/delay.h> 26#include <linux/delay.h>
16#include "net_driver.h" 27#include "net_driver.h"
17#include "efx.h" 28#include "efx.h"
@@ -21,6 +32,7 @@
21#include "falcon_hwdefs.h" 32#include "falcon_hwdefs.h"
22#include "falcon_io.h" 33#include "falcon_io.h"
23#include "mac.h" 34#include "mac.h"
35#include "workarounds.h"
24 36
25/************************************************************************** 37/**************************************************************************
26 * 38 *
@@ -65,48 +77,9 @@
65#define P1_SPARE_LBN 4 77#define P1_SPARE_LBN 4
66#define P1_SPARE_WIDTH 4 78#define P1_SPARE_WIDTH 4
67 79
68 80/* Temperature Sensor */
69/************************************************************************** 81#define MAX664X_REG_RSL 0x02
70 * 82#define MAX664X_REG_WLHO 0x0B
71 * Temperature Sensor
72 *
73 **************************************************************************/
74#define MAX6647 0x4e
75
76#define RLTS 0x00
77#define RLTE 0x01
78#define RSL 0x02
79#define RCL 0x03
80#define RCRA 0x04
81#define RLHN 0x05
82#define RLLI 0x06
83#define RRHI 0x07
84#define RRLS 0x08
85#define WCRW 0x0a
86#define WLHO 0x0b
87#define WRHA 0x0c
88#define WRLN 0x0e
89#define OSHT 0x0f
90#define REET 0x10
91#define RIET 0x11
92#define RWOE 0x19
93#define RWOI 0x20
94#define HYS 0x21
95#define QUEUE 0x22
96#define MFID 0xfe
97#define REVID 0xff
98
99/* Status bits */
100#define MAX6647_BUSY (1 << 7) /* ADC is converting */
101#define MAX6647_LHIGH (1 << 6) /* Local high temp. alarm */
102#define MAX6647_LLOW (1 << 5) /* Local low temp. alarm */
103#define MAX6647_RHIGH (1 << 4) /* Remote high temp. alarm */
104#define MAX6647_RLOW (1 << 3) /* Remote low temp. alarm */
105#define MAX6647_FAULT (1 << 2) /* DXN/DXP short/open circuit */
106#define MAX6647_EOT (1 << 1) /* Remote junction overtemp. */
107#define MAX6647_IOT (1 << 0) /* Local junction overtemp. */
108
109static const u8 xgphy_max_temperature = 90;
110 83
111static void sfe4001_poweroff(struct efx_nic *efx) 84static void sfe4001_poweroff(struct efx_nic *efx)
112{ 85{
@@ -119,7 +92,7 @@ static void sfe4001_poweroff(struct efx_nic *efx)
119 i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff); 92 i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
120 93
121 /* Clear any over-temperature alert */ 94 /* Clear any over-temperature alert */
122 i2c_smbus_read_byte_data(hwmon_client, RSL); 95 i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
123} 96}
124 97
125static int sfe4001_poweron(struct efx_nic *efx) 98static int sfe4001_poweron(struct efx_nic *efx)
@@ -131,7 +104,7 @@ static int sfe4001_poweron(struct efx_nic *efx)
131 u8 out; 104 u8 out;
132 105
133 /* Clear any previous over-temperature alert */ 106 /* Clear any previous over-temperature alert */
134 rc = i2c_smbus_read_byte_data(hwmon_client, RSL); 107 rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
135 if (rc < 0) 108 if (rc < 0)
136 return rc; 109 return rc;
137 110
@@ -209,10 +182,29 @@ fail_on:
209 return rc; 182 return rc;
210} 183}
211 184
212/* On SFE4001 rev A2 and later, we can control the FLASH_CFG_1 pin 185static int sfn4111t_reset(struct efx_nic *efx)
213 * using the 3V3X output of the IO-expander. Allow the user to set 186{
214 * this when the device is stopped, and keep it stopped then. 187 efx_oword_t reg;
215 */ 188
189 /* GPIO pins are also used for I2C, so block that temporarily */
190 mutex_lock(&efx->i2c_adap.bus_lock);
191
192 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
193 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true);
194 EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, false);
195 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
196 msleep(1000);
197 EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, true);
198 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, true);
199 EFX_SET_OWORD_FIELD(reg, GPIO3_OUT,
200 !(efx->phy_mode & PHY_MODE_SPECIAL));
201 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
202
203 mutex_unlock(&efx->i2c_adap.bus_lock);
204
205 ssleep(1);
206 return 0;
207}
216 208
217static ssize_t show_phy_flash_cfg(struct device *dev, 209static ssize_t show_phy_flash_cfg(struct device *dev,
218 struct device_attribute *attr, char *buf) 210 struct device_attribute *attr, char *buf)
@@ -241,7 +233,10 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
241 err = -EBUSY; 233 err = -EBUSY;
242 } else { 234 } else {
243 efx->phy_mode = new_mode; 235 efx->phy_mode = new_mode;
244 err = sfe4001_poweron(efx); 236 if (efx->board_info.type == EFX_BOARD_SFE4001)
237 err = sfe4001_poweron(efx);
238 else
239 err = sfn4111t_reset(efx);
245 efx_reconfigure_port(efx); 240 efx_reconfigure_port(efx);
246 } 241 }
247 rtnl_unlock(); 242 rtnl_unlock();
@@ -261,35 +256,62 @@ static void sfe4001_fini(struct efx_nic *efx)
261 i2c_unregister_device(efx->board_info.hwmon_client); 256 i2c_unregister_device(efx->board_info.hwmon_client);
262} 257}
263 258
259static int sfe4001_check_hw(struct efx_nic *efx)
260{
261 s32 status;
262
263 /* If XAUI link is up then do not monitor */
264 if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
265 return 0;
266
267 /* Check the powered status of the PHY. Lack of power implies that
268 * the MAX6647 has shut down power to it, probably due to a temp.
269 * alarm. Reading the power status rather than the MAX6647 status
270 * directly because the later is read-to-clear and would thus
271 * start to power up the PHY again when polled, causing us to blip
272 * the power undesirably.
273 * We know we can read from the IO expander because we did
274 * it during power-on. Assume failure now is bad news. */
275 status = i2c_smbus_read_byte_data(efx->board_info.ioexp_client, P1_IN);
276 if (status >= 0 &&
277 (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
278 return 0;
279
280 /* Use board power control, not PHY power control */
281 sfe4001_poweroff(efx);
282 efx->phy_mode = PHY_MODE_OFF;
283
284 return (status < 0) ? -EIO : -ERANGE;
285}
286
287static struct i2c_board_info sfe4001_hwmon_info = {
288 I2C_BOARD_INFO("max6647", 0x4e),
289 .irq = -1,
290};
291
264/* This board uses an I2C expander to provider power to the PHY, which needs to 292/* This board uses an I2C expander to provider power to the PHY, which needs to
265 * be turned on before the PHY can be used. 293 * be turned on before the PHY can be used.
266 * Context: Process context, rtnl lock held 294 * Context: Process context, rtnl lock held
267 */ 295 */
268int sfe4001_init(struct efx_nic *efx) 296int sfe4001_init(struct efx_nic *efx)
269{ 297{
270 struct i2c_client *hwmon_client;
271 int rc; 298 int rc;
272 299
273 hwmon_client = i2c_new_dummy(&efx->i2c_adap, MAX6647); 300#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
274 if (!hwmon_client) 301 efx->board_info.hwmon_client =
302 i2c_new_device(&efx->i2c_adap, &sfe4001_hwmon_info);
303#else
304 efx->board_info.hwmon_client =
305 i2c_new_dummy(&efx->i2c_adap, sfe4001_hwmon_info.addr);
306#endif
307 if (!efx->board_info.hwmon_client)
275 return -EIO; 308 return -EIO;
276 efx->board_info.hwmon_client = hwmon_client;
277 309
278 /* Set DSP over-temperature alert threshold */ 310 /* Raise board/PHY high limit from 85 to 90 degrees Celsius */
279 EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature); 311 rc = i2c_smbus_write_byte_data(efx->board_info.hwmon_client,
280 rc = i2c_smbus_write_byte_data(hwmon_client, WLHO, 312 MAX664X_REG_WLHO, 90);
281 xgphy_max_temperature);
282 if (rc) 313 if (rc)
283 goto fail_ioexp; 314 goto fail_hwmon;
284
285 /* Read it back and verify */
286 rc = i2c_smbus_read_byte_data(hwmon_client, RLHN);
287 if (rc < 0)
288 goto fail_ioexp;
289 if (rc != xgphy_max_temperature) {
290 rc = -EFAULT;
291 goto fail_ioexp;
292 }
293 315
294 efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539); 316 efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
295 if (!efx->board_info.ioexp_client) { 317 if (!efx->board_info.ioexp_client) {
@@ -301,6 +323,7 @@ int sfe4001_init(struct efx_nic *efx)
301 * blink code. */ 323 * blink code. */
302 efx->board_info.blink = tenxpress_phy_blink; 324 efx->board_info.blink = tenxpress_phy_blink;
303 325
326 efx->board_info.monitor = sfe4001_check_hw;
304 efx->board_info.fini = sfe4001_fini; 327 efx->board_info.fini = sfe4001_fini;
305 328
306 rc = sfe4001_poweron(efx); 329 rc = sfe4001_poweron(efx);
@@ -319,6 +342,64 @@ fail_on:
319fail_ioexp: 342fail_ioexp:
320 i2c_unregister_device(efx->board_info.ioexp_client); 343 i2c_unregister_device(efx->board_info.ioexp_client);
321fail_hwmon: 344fail_hwmon:
322 i2c_unregister_device(hwmon_client); 345 i2c_unregister_device(efx->board_info.hwmon_client);
346 return rc;
347}
348
349static int sfn4111t_check_hw(struct efx_nic *efx)
350{
351 s32 status;
352
353 /* If XAUI link is up then do not monitor */
354 if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
355 return 0;
356
357 /* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
358 status = i2c_smbus_read_byte_data(efx->board_info.hwmon_client,
359 MAX664X_REG_RSL);
360 if (status < 0)
361 return -EIO;
362 if (status & 0x57)
363 return -ERANGE;
364 return 0;
365}
366
367static void sfn4111t_fini(struct efx_nic *efx)
368{
369 EFX_INFO(efx, "%s\n", __func__);
370
371 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
372 i2c_unregister_device(efx->board_info.hwmon_client);
373}
374
375static struct i2c_board_info sfn4111t_hwmon_info = {
376 I2C_BOARD_INFO("max6647", 0x4e),
377 .irq = -1,
378};
379
380int sfn4111t_init(struct efx_nic *efx)
381{
382 int rc;
383
384 efx->board_info.hwmon_client =
385 i2c_new_device(&efx->i2c_adap, &sfn4111t_hwmon_info);
386 if (!efx->board_info.hwmon_client)
387 return -EIO;
388
389 efx->board_info.blink = tenxpress_phy_blink;
390 efx->board_info.monitor = sfn4111t_check_hw;
391 efx->board_info.fini = sfn4111t_fini;
392
393 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
394 if (rc)
395 goto fail_hwmon;
396
397 if (efx->phy_mode & PHY_MODE_SPECIAL)
398 sfn4111t_reset(efx);
399
400 return 0;
401
402fail_hwmon:
403 i2c_unregister_device(efx->board_info.hwmon_client);
323 return rc; 404 return rc;
324} 405}
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index feef61942377..1b1ceb411671 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -25,6 +25,7 @@
25#define SPI_WRDI 0x04 /* Reset write enable latch */ 25#define SPI_WRDI 0x04 /* Reset write enable latch */
26#define SPI_RDSR 0x05 /* Read status register */ 26#define SPI_RDSR 0x05 /* Read status register */
27#define SPI_WREN 0x06 /* Set write enable latch */ 27#define SPI_WREN 0x06 /* Set write enable latch */
28#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
28 29
29#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */ 30#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
30#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */ 31#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
@@ -36,6 +37,7 @@
36/** 37/**
37 * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device 38 * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
38 * @efx: The Efx controller that owns this device 39 * @efx: The Efx controller that owns this device
40 * @mtd: MTD state
39 * @device_id: Controller's id for the device 41 * @device_id: Controller's id for the device
40 * @size: Size (in bytes) 42 * @size: Size (in bytes)
41 * @addr_len: Number of address bytes in read/write commands 43 * @addr_len: Number of address bytes in read/write commands
@@ -44,23 +46,51 @@
44 * use bit 3 of the command byte as address bit A8, rather 46 * use bit 3 of the command byte as address bit A8, rather
45 * than having a two-byte address. If this flag is set, then 47 * than having a two-byte address. If this flag is set, then
46 * commands should be munged in this way. 48 * commands should be munged in this way.
49 * @erase_command: Erase command (or 0 if sector erase not needed).
50 * @erase_size: Erase sector size (in bytes)
51 * Erase commands affect sectors with this size and alignment.
52 * This must be a power of two.
47 * @block_size: Write block size (in bytes). 53 * @block_size: Write block size (in bytes).
48 * Write commands are limited to blocks with this size and alignment. 54 * Write commands are limited to blocks with this size and alignment.
49 * @read: Read function for the device
50 * @write: Write function for the device
51 */ 55 */
52struct efx_spi_device { 56struct efx_spi_device {
53 struct efx_nic *efx; 57 struct efx_nic *efx;
58#ifdef CONFIG_SFC_MTD
59 void *mtd;
60#endif
54 int device_id; 61 int device_id;
55 unsigned int size; 62 unsigned int size;
56 unsigned int addr_len; 63 unsigned int addr_len;
57 unsigned int munge_address:1; 64 unsigned int munge_address:1;
65 u8 erase_command;
66 unsigned int erase_size;
58 unsigned int block_size; 67 unsigned int block_size;
59}; 68};
60 69
70int falcon_spi_cmd(const struct efx_spi_device *spi, unsigned int command,
71 int address, const void* in, void *out, size_t len);
72int falcon_spi_wait_write(const struct efx_spi_device *spi);
61int falcon_spi_read(const struct efx_spi_device *spi, loff_t start, 73int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
62 size_t len, size_t *retlen, u8 *buffer); 74 size_t len, size_t *retlen, u8 *buffer);
63int falcon_spi_write(const struct efx_spi_device *spi, loff_t start, 75int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
64 size_t len, size_t *retlen, const u8 *buffer); 76 size_t len, size_t *retlen, const u8 *buffer);
65 77
78/*
79 * SFC4000 flash is partitioned into:
80 * 0-0x400 chip and board config (see falcon_hwdefs.h)
81 * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
82 * 0x8000-end boot code (mapped to PCI expansion ROM)
83 * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
84 * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
85 * 0-0x400 chip and board config
86 * configurable VPD
87 * 0x800-0x1800 boot config
88 * Aside from the chip and board config, all of these are optional and may
89 * be absent or truncated depending on the devices used.
90 */
91#define FALCON_NVCONFIG_END 0x400U
92#define FALCON_FLASH_BOOTCODE_START 0x8000U
93#define EFX_EEPROM_BOOTCONFIG_START 0x800U
94#define EFX_EEPROM_BOOTCONFIG_END 0x1800U
95
66#endif /* EFX_SPI_H */ 96#endif /* EFX_SPI_H */
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index d507c93d666e..b9768760fae7 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -1,6 +1,6 @@
1/**************************************************************************** 1/****************************************************************************
2 * Driver for Solarflare 802.3an compliant PHY 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007 Solarflare Communications Inc. 3 * Copyright 2007-2008 Solarflare Communications Inc.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published 6 * under the terms of the GNU General Public License version 2 as published
@@ -10,45 +10,76 @@
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/seq_file.h> 11#include <linux/seq_file.h>
12#include "efx.h" 12#include "efx.h"
13#include "gmii.h"
14#include "mdio_10g.h" 13#include "mdio_10g.h"
15#include "falcon.h" 14#include "falcon.h"
16#include "phy.h" 15#include "phy.h"
17#include "falcon_hwdefs.h" 16#include "falcon_hwdefs.h"
18#include "boards.h" 17#include "boards.h"
19#include "mac.h" 18#include "workarounds.h"
19#include "selftest.h"
20 20
21/* We expect these MMDs to be in the package */ 21/* We expect these MMDs to be in the package. SFT9001 also has a
22/* AN not here as mdio_check_mmds() requires STAT2 support */ 22 * clause 22 extension MMD, but since it doesn't have all the generic
23#define TENXPRESS_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PMAPMD | \ 23 * MMD registers it is pointless to include it here.
24 MDIO_MMDREG_DEVS0_PCS | \ 24 */
25 MDIO_MMDREG_DEVS0_PHYXS) 25#define TENXPRESS_REQUIRED_DEVS (MDIO_MMDREG_DEVS_PMAPMD | \
26 26 MDIO_MMDREG_DEVS_PCS | \
27#define TENXPRESS_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \ 27 MDIO_MMDREG_DEVS_PHYXS | \
28 (1 << LOOPBACK_PCS) | \ 28 MDIO_MMDREG_DEVS_AN)
29 (1 << LOOPBACK_PMAPMD) | \ 29
30 (1 << LOOPBACK_NETWORK)) 30#define SFX7101_LOOPBACKS ((1 << LOOPBACK_PHYXS) | \
31 (1 << LOOPBACK_PCS) | \
32 (1 << LOOPBACK_PMAPMD) | \
33 (1 << LOOPBACK_NETWORK))
34
35#define SFT9001_LOOPBACKS ((1 << LOOPBACK_GPHY) | \
36 (1 << LOOPBACK_PHYXS) | \
37 (1 << LOOPBACK_PCS) | \
38 (1 << LOOPBACK_PMAPMD) | \
39 (1 << LOOPBACK_NETWORK))
31 40
32/* We complain if we fail to see the link partner as 10G capable this many 41/* We complain if we fail to see the link partner as 10G capable this many
33 * times in a row (must be > 1 as sampling the autoneg. registers is racy) 42 * times in a row (must be > 1 as sampling the autoneg. registers is racy)
34 */ 43 */
35#define MAX_BAD_LP_TRIES (5) 44#define MAX_BAD_LP_TRIES (5)
36 45
46/* LASI Control */
47#define PMA_PMD_LASI_CTRL 36866
48#define PMA_PMD_LASI_STATUS 36869
49#define PMA_PMD_LS_ALARM_LBN 0
50#define PMA_PMD_LS_ALARM_WIDTH 1
51#define PMA_PMD_TX_ALARM_LBN 1
52#define PMA_PMD_TX_ALARM_WIDTH 1
53#define PMA_PMD_RX_ALARM_LBN 2
54#define PMA_PMD_RX_ALARM_WIDTH 1
55#define PMA_PMD_AN_ALARM_LBN 3
56#define PMA_PMD_AN_ALARM_WIDTH 1
57
37/* Extended control register */ 58/* Extended control register */
38#define PMA_PMD_XCONTROL_REG 0xc000 59#define PMA_PMD_XCONTROL_REG 49152
39#define PMA_PMD_LNPGA_POWERDOWN_LBN 8 60#define PMA_PMD_EXT_GMII_EN_LBN 1
40#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1 61#define PMA_PMD_EXT_GMII_EN_WIDTH 1
62#define PMA_PMD_EXT_CLK_OUT_LBN 2
63#define PMA_PMD_EXT_CLK_OUT_WIDTH 1
64#define PMA_PMD_LNPGA_POWERDOWN_LBN 8 /* SFX7101 only */
65#define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
66#define PMA_PMD_EXT_CLK312_LBN 8 /* SFT9001 only */
67#define PMA_PMD_EXT_CLK312_WIDTH 1
68#define PMA_PMD_EXT_LPOWER_LBN 12
69#define PMA_PMD_EXT_LPOWER_WIDTH 1
70#define PMA_PMD_EXT_SSR_LBN 15
71#define PMA_PMD_EXT_SSR_WIDTH 1
41 72
42/* extended status register */ 73/* extended status register */
43#define PMA_PMD_XSTATUS_REG 0xc001 74#define PMA_PMD_XSTATUS_REG 49153
44#define PMA_PMD_XSTAT_FLP_LBN (12) 75#define PMA_PMD_XSTAT_FLP_LBN (12)
45 76
46/* LED control register */ 77/* LED control register */
47#define PMA_PMD_LED_CTRL_REG (0xc007) 78#define PMA_PMD_LED_CTRL_REG 49159
48#define PMA_PMA_LED_ACTIVITY_LBN (3) 79#define PMA_PMA_LED_ACTIVITY_LBN (3)
49 80
50/* LED function override register */ 81/* LED function override register */
51#define PMA_PMD_LED_OVERR_REG (0xc009) 82#define PMA_PMD_LED_OVERR_REG 49161
52/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/ 83/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/
53#define PMA_PMD_LED_LINK_LBN (0) 84#define PMA_PMD_LED_LINK_LBN (0)
54#define PMA_PMD_LED_SPEED_LBN (2) 85#define PMA_PMD_LED_SPEED_LBN (2)
@@ -59,41 +90,99 @@
59#define PMA_PMD_LED_ON (1) 90#define PMA_PMD_LED_ON (1)
60#define PMA_PMD_LED_OFF (2) 91#define PMA_PMD_LED_OFF (2)
61#define PMA_PMD_LED_FLASH (3) 92#define PMA_PMD_LED_FLASH (3)
93#define PMA_PMD_LED_MASK 3
62/* All LEDs under hardware control */ 94/* All LEDs under hardware control */
63#define PMA_PMD_LED_FULL_AUTO (0) 95#define PMA_PMD_LED_FULL_AUTO (0)
64/* Green and Amber under hardware control, Red off */ 96/* Green and Amber under hardware control, Red off */
65#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) 97#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
66 98
67 99#define PMA_PMD_SPEED_ENABLE_REG 49192
68/* Special Software reset register */ 100#define PMA_PMD_100TX_ADV_LBN 1
69#define PMA_PMD_EXT_CTRL_REG 49152 101#define PMA_PMD_100TX_ADV_WIDTH 1
70#define PMA_PMD_EXT_SSR_LBN 15 102#define PMA_PMD_1000T_ADV_LBN 2
71 103#define PMA_PMD_1000T_ADV_WIDTH 1
72/* Misc register defines */ 104#define PMA_PMD_10000T_ADV_LBN 3
73#define PCS_CLOCK_CTRL_REG 0xd801 105#define PMA_PMD_10000T_ADV_WIDTH 1
106#define PMA_PMD_SPEED_LBN 4
107#define PMA_PMD_SPEED_WIDTH 4
108
109/* Cable diagnostics - SFT9001 only */
110#define PMA_PMD_CDIAG_CTRL_REG 49213
111#define CDIAG_CTRL_IMMED_LBN 15
112#define CDIAG_CTRL_BRK_LINK_LBN 12
113#define CDIAG_CTRL_IN_PROG_LBN 11
114#define CDIAG_CTRL_LEN_UNIT_LBN 10
115#define CDIAG_CTRL_LEN_METRES 1
116#define PMA_PMD_CDIAG_RES_REG 49174
117#define CDIAG_RES_A_LBN 12
118#define CDIAG_RES_B_LBN 8
119#define CDIAG_RES_C_LBN 4
120#define CDIAG_RES_D_LBN 0
121#define CDIAG_RES_WIDTH 4
122#define CDIAG_RES_OPEN 2
123#define CDIAG_RES_OK 1
124#define CDIAG_RES_INVALID 0
125/* Set of 4 registers for pairs A-D */
126#define PMA_PMD_CDIAG_LEN_REG 49175
127
128/* Serdes control registers - SFT9001 only */
129#define PMA_PMD_CSERDES_CTRL_REG 64258
130/* Set the 156.25 MHz output to 312.5 MHz to drive Falcon's XMAC */
131#define PMA_PMD_CSERDES_DEFAULT 0x000f
132
133/* Misc register defines - SFX7101 only */
134#define PCS_CLOCK_CTRL_REG 55297
74#define PLL312_RST_N_LBN 2 135#define PLL312_RST_N_LBN 2
75 136
76#define PCS_SOFT_RST2_REG 0xd806 137#define PCS_SOFT_RST2_REG 55302
77#define SERDES_RST_N_LBN 13 138#define SERDES_RST_N_LBN 13
78#define XGXS_RST_N_LBN 12 139#define XGXS_RST_N_LBN 12
79 140
80#define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ 141#define PCS_TEST_SELECT_REG 55303 /* PRM 10.5.8 */
81#define CLK312_EN_LBN 3 142#define CLK312_EN_LBN 3
82 143
83/* PHYXS registers */ 144/* PHYXS registers */
145#define PHYXS_XCONTROL_REG 49152
146#define PHYXS_RESET_LBN 15
147#define PHYXS_RESET_WIDTH 1
148
84#define PHYXS_TEST1 (49162) 149#define PHYXS_TEST1 (49162)
85#define LOOPBACK_NEAR_LBN (8) 150#define LOOPBACK_NEAR_LBN (8)
86#define LOOPBACK_NEAR_WIDTH (1) 151#define LOOPBACK_NEAR_WIDTH (1)
87 152
153#define PCS_10GBASET_STAT1 32
154#define PCS_10GBASET_BLKLK_LBN 0
155#define PCS_10GBASET_BLKLK_WIDTH 1
156
88/* Boot status register */ 157/* Boot status register */
89#define PCS_BOOT_STATUS_REG (0xd000) 158#define PCS_BOOT_STATUS_REG 53248
90#define PCS_BOOT_FATAL_ERR_LBN (0) 159#define PCS_BOOT_FATAL_ERR_LBN (0)
91#define PCS_BOOT_PROGRESS_LBN (1) 160#define PCS_BOOT_PROGRESS_LBN (1)
92#define PCS_BOOT_PROGRESS_WIDTH (2) 161#define PCS_BOOT_PROGRESS_WIDTH (2)
93#define PCS_BOOT_COMPLETE_LBN (3) 162#define PCS_BOOT_COMPLETE_LBN (3)
163
94#define PCS_BOOT_MAX_DELAY (100) 164#define PCS_BOOT_MAX_DELAY (100)
95#define PCS_BOOT_POLL_DELAY (10) 165#define PCS_BOOT_POLL_DELAY (10)
96 166
167/* 100M/1G PHY registers */
168#define GPHY_XCONTROL_REG 49152
169#define GPHY_ISOLATE_LBN 10
170#define GPHY_ISOLATE_WIDTH 1
171#define GPHY_DUPLEX_LBN 8
172#define GPHY_DUPLEX_WIDTH 1
173#define GPHY_LOOPBACK_NEAR_LBN 14
174#define GPHY_LOOPBACK_NEAR_WIDTH 1
175
176#define C22EXT_STATUS_REG 49153
177#define C22EXT_STATUS_LINK_LBN 2
178#define C22EXT_STATUS_LINK_WIDTH 1
179
180#define C22EXT_MSTSLV_REG 49162
181#define C22EXT_MSTSLV_1000_HD_LBN 10
182#define C22EXT_MSTSLV_1000_HD_WIDTH 1
183#define C22EXT_MSTSLV_1000_FD_LBN 11
184#define C22EXT_MSTSLV_1000_FD_WIDTH 1
185
97/* Time to wait between powering down the LNPGA and turning off the power 186/* Time to wait between powering down the LNPGA and turning off the power
98 * rails */ 187 * rails */
99#define LNPGA_PDOWN_WAIT (HZ / 5) 188#define LNPGA_PDOWN_WAIT (HZ / 5)
@@ -117,6 +206,38 @@ void tenxpress_crc_err(struct efx_nic *efx)
117 atomic_inc(&phy_data->bad_crc_count); 206 atomic_inc(&phy_data->bad_crc_count);
118} 207}
119 208
209static ssize_t show_phy_short_reach(struct device *dev,
210 struct device_attribute *attr, char *buf)
211{
212 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
213 int reg;
214
215 reg = mdio_clause45_read(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
216 MDIO_PMAPMD_10GBT_TXPWR);
217 return sprintf(buf, "%d\n",
218 !!(reg & (1 << MDIO_PMAPMD_10GBT_TXPWR_SHORT_LBN)));
219}
220
221static ssize_t set_phy_short_reach(struct device *dev,
222 struct device_attribute *attr,
223 const char *buf, size_t count)
224{
225 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
226
227 rtnl_lock();
228 mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
229 MDIO_PMAPMD_10GBT_TXPWR,
230 MDIO_PMAPMD_10GBT_TXPWR_SHORT_LBN,
231 count != 0 && *buf != '0');
232 efx_reconfigure_port(efx);
233 rtnl_unlock();
234
235 return count;
236}
237
238static DEVICE_ATTR(phy_short_reach, 0644, show_phy_short_reach,
239 set_phy_short_reach);
240
120/* Check that the C166 has booted successfully */ 241/* Check that the C166 has booted successfully */
121static int tenxpress_phy_check(struct efx_nic *efx) 242static int tenxpress_phy_check(struct efx_nic *efx)
122{ 243{
@@ -148,27 +269,42 @@ static int tenxpress_phy_check(struct efx_nic *efx)
148 269
149static int tenxpress_init(struct efx_nic *efx) 270static int tenxpress_init(struct efx_nic *efx)
150{ 271{
151 int rc, reg; 272 int phy_id = efx->mii.phy_id;
273 int reg;
274 int rc;
152 275
153 /* Turn on the clock */ 276 if (efx->phy_type == PHY_TYPE_SFX7101) {
154 reg = (1 << CLK312_EN_LBN); 277 /* Enable 312.5 MHz clock */
155 mdio_clause45_write(efx, efx->mii.phy_id, 278 mdio_clause45_write(efx, phy_id,
156 MDIO_MMD_PCS, PCS_TEST_SELECT_REG, reg); 279 MDIO_MMD_PCS, PCS_TEST_SELECT_REG,
280 1 << CLK312_EN_LBN);
281 } else {
282 /* Enable 312.5 MHz clock and GMII */
283 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
284 PMA_PMD_XCONTROL_REG);
285 reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) |
286 (1 << PMA_PMD_EXT_CLK_OUT_LBN) |
287 (1 << PMA_PMD_EXT_CLK312_LBN));
288 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
289 PMA_PMD_XCONTROL_REG, reg);
290 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
291 GPHY_XCONTROL_REG, GPHY_ISOLATE_LBN,
292 false);
293 }
157 294
158 rc = tenxpress_phy_check(efx); 295 rc = tenxpress_phy_check(efx);
159 if (rc < 0) 296 if (rc < 0)
160 return rc; 297 return rc;
161 298
162 /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */ 299 /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
163 reg = mdio_clause45_read(efx, efx->mii.phy_id, 300 if (efx->phy_type == PHY_TYPE_SFX7101) {
164 MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG); 301 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_PMAPMD,
165 reg |= (1 << PMA_PMA_LED_ACTIVITY_LBN); 302 PMA_PMD_LED_CTRL_REG,
166 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, 303 PMA_PMA_LED_ACTIVITY_LBN,
167 PMA_PMD_LED_CTRL_REG, reg); 304 true);
168 305 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
169 reg = PMA_PMD_LED_DEFAULT; 306 PMA_PMD_LED_OVERR_REG, PMA_PMD_LED_DEFAULT);
170 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, 307 }
171 PMA_PMD_LED_OVERR_REG, reg);
172 308
173 return rc; 309 return rc;
174} 310}
@@ -184,22 +320,43 @@ static int tenxpress_phy_init(struct efx_nic *efx)
184 efx->phy_data = phy_data; 320 efx->phy_data = phy_data;
185 phy_data->phy_mode = efx->phy_mode; 321 phy_data->phy_mode = efx->phy_mode;
186 322
187 rc = mdio_clause45_wait_reset_mmds(efx, 323 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
188 TENXPRESS_REQUIRED_DEVS); 324 if (efx->phy_type == PHY_TYPE_SFT9001A) {
189 if (rc < 0) 325 int reg;
190 goto fail; 326 reg = mdio_clause45_read(efx, efx->mii.phy_id,
327 MDIO_MMD_PMAPMD,
328 PMA_PMD_XCONTROL_REG);
329 reg |= (1 << PMA_PMD_EXT_SSR_LBN);
330 mdio_clause45_write(efx, efx->mii.phy_id,
331 MDIO_MMD_PMAPMD,
332 PMA_PMD_XCONTROL_REG, reg);
333 mdelay(200);
334 }
191 335
192 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 336 rc = mdio_clause45_wait_reset_mmds(efx,
193 if (rc < 0) 337 TENXPRESS_REQUIRED_DEVS);
194 goto fail; 338 if (rc < 0)
339 goto fail;
340
341 rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
342 if (rc < 0)
343 goto fail;
344 }
195 345
196 rc = tenxpress_init(efx); 346 rc = tenxpress_init(efx);
197 if (rc < 0) 347 if (rc < 0)
198 goto fail; 348 goto fail;
199 349
350 if (efx->phy_type == PHY_TYPE_SFT9001B) {
351 rc = device_create_file(&efx->pci_dev->dev,
352 &dev_attr_phy_short_reach);
353 if (rc)
354 goto fail;
355 }
356
200 schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ 357 schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
201 358
202 /* Let XGXS and SerDes out of reset and resets 10XPress */ 359 /* Let XGXS and SerDes out of reset */
203 falcon_reset_xaui(efx); 360 falcon_reset_xaui(efx);
204 361
205 return 0; 362 return 0;
@@ -210,21 +367,24 @@ static int tenxpress_phy_init(struct efx_nic *efx)
210 return rc; 367 return rc;
211} 368}
212 369
370/* Perform a "special software reset" on the PHY. The caller is
371 * responsible for saving and restoring the PHY hardware registers
372 * properly, and masking/unmasking LASI */
213static int tenxpress_special_reset(struct efx_nic *efx) 373static int tenxpress_special_reset(struct efx_nic *efx)
214{ 374{
215 int rc, reg; 375 int rc, reg;
216 376
217 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so 377 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
218 * a special software reset can glitch the XGMAC sufficiently for stats 378 * a special software reset can glitch the XGMAC sufficiently for stats
219 * requests to fail. Since we don't ofen special_reset, just lock. */ 379 * requests to fail. Since we don't often special_reset, just lock. */
220 spin_lock(&efx->stats_lock); 380 spin_lock(&efx->stats_lock);
221 381
222 /* Initiate reset */ 382 /* Initiate reset */
223 reg = mdio_clause45_read(efx, efx->mii.phy_id, 383 reg = mdio_clause45_read(efx, efx->mii.phy_id,
224 MDIO_MMD_PMAPMD, PMA_PMD_EXT_CTRL_REG); 384 MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
225 reg |= (1 << PMA_PMD_EXT_SSR_LBN); 385 reg |= (1 << PMA_PMD_EXT_SSR_LBN);
226 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, 386 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
227 PMA_PMD_EXT_CTRL_REG, reg); 387 PMA_PMD_XCONTROL_REG, reg);
228 388
229 mdelay(200); 389 mdelay(200);
230 390
@@ -239,174 +399,257 @@ static int tenxpress_special_reset(struct efx_nic *efx)
239 if (rc < 0) 399 if (rc < 0)
240 goto unlock; 400 goto unlock;
241 401
402 /* Wait for the XGXS state machine to churn */
403 mdelay(10);
242unlock: 404unlock:
243 spin_unlock(&efx->stats_lock); 405 spin_unlock(&efx->stats_lock);
244 return rc; 406 return rc;
245} 407}
246 408
247static void tenxpress_set_bad_lp(struct efx_nic *efx, bool bad_lp) 409static void sfx7101_check_bad_lp(struct efx_nic *efx, bool link_ok)
248{ 410{
249 struct tenxpress_phy_data *pd = efx->phy_data; 411 struct tenxpress_phy_data *pd = efx->phy_data;
412 int phy_id = efx->mii.phy_id;
413 bool bad_lp;
250 int reg; 414 int reg;
251 415
416 if (link_ok) {
417 bad_lp = false;
418 } else {
419 /* Check that AN has started but not completed. */
420 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
421 MDIO_AN_STATUS);
422 if (!(reg & (1 << MDIO_AN_STATUS_LP_AN_CAP_LBN)))
423 return; /* LP status is unknown */
424 bad_lp = !(reg & (1 << MDIO_AN_STATUS_AN_DONE_LBN));
425 if (bad_lp)
426 pd->bad_lp_tries++;
427 }
428
252 /* Nothing to do if all is well and was previously so. */ 429 /* Nothing to do if all is well and was previously so. */
253 if (!(bad_lp || pd->bad_lp_tries)) 430 if (!pd->bad_lp_tries)
254 return; 431 return;
255 432
256 reg = mdio_clause45_read(efx, efx->mii.phy_id, 433 /* Use the RX (red) LED as an error indicator once we've seen AN
257 MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG); 434 * failure several times in a row, and also log a message. */
435 if (!bad_lp || pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
436 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
437 PMA_PMD_LED_OVERR_REG);
438 reg &= ~(PMA_PMD_LED_MASK << PMA_PMD_LED_RX_LBN);
439 if (!bad_lp) {
440 reg |= PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN;
441 } else {
442 reg |= PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN;
443 EFX_ERR(efx, "appears to be plugged into a port"
444 " that is not 10GBASE-T capable. The PHY"
445 " supports 10GBASE-T ONLY, so no link can"
446 " be established\n");
447 }
448 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
449 PMA_PMD_LED_OVERR_REG, reg);
450 pd->bad_lp_tries = bad_lp;
451 }
452}
258 453
259 if (bad_lp) 454static bool sfx7101_link_ok(struct efx_nic *efx)
260 pd->bad_lp_tries++; 455{
261 else 456 return mdio_clause45_links_ok(efx,
262 pd->bad_lp_tries = 0; 457 MDIO_MMDREG_DEVS_PMAPMD |
263 458 MDIO_MMDREG_DEVS_PCS |
264 if (pd->bad_lp_tries == MAX_BAD_LP_TRIES) { 459 MDIO_MMDREG_DEVS_PHYXS);
265 pd->bad_lp_tries = 0; /* Restart count */ 460}
266 reg &= ~(PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN); 461
267 reg |= (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN); 462static bool sft9001_link_ok(struct efx_nic *efx, struct ethtool_cmd *ecmd)
268 EFX_ERR(efx, "This NIC appears to be plugged into" 463{
269 " a port that is not 10GBASE-T capable.\n" 464 int phy_id = efx->mii.phy_id;
270 " This PHY is 10GBASE-T ONLY, so no link can" 465 u32 reg;
271 " be established.\n"); 466
467 if (efx_phy_mode_disabled(efx->phy_mode))
468 return false;
469 else if (efx->loopback_mode == LOOPBACK_GPHY)
470 return true;
471 else if (efx->loopback_mode)
472 return mdio_clause45_links_ok(efx,
473 MDIO_MMDREG_DEVS_PMAPMD |
474 MDIO_MMDREG_DEVS_PHYXS);
475
476 /* We must use the same definition of link state as LASI,
477 * otherwise we can miss a link state transition
478 */
479 if (ecmd->speed == 10000) {
480 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PCS,
481 PCS_10GBASET_STAT1);
482 return reg & (1 << PCS_10GBASET_BLKLK_LBN);
272 } else { 483 } else {
273 reg |= (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN); 484 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
485 C22EXT_STATUS_REG);
486 return reg & (1 << C22EXT_STATUS_LINK_LBN);
274 } 487 }
275 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
276 PMA_PMD_LED_OVERR_REG, reg);
277} 488}
278 489
279/* Check link status and return a boolean OK value. If the link is NOT 490static void tenxpress_ext_loopback(struct efx_nic *efx)
280 * OK we have a quick rummage round to see if we appear to be plugged
281 * into a non-10GBT port and if so warn the user that they won't get
282 * link any time soon as we are 10GBT only, unless caller specified
283 * not to do this check (it isn't useful in loopback) */
284static bool tenxpress_link_ok(struct efx_nic *efx, bool check_lp)
285{ 491{
286 bool ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS); 492 int phy_id = efx->mii.phy_id;
287
288 if (ok) {
289 tenxpress_set_bad_lp(efx, false);
290 } else if (check_lp) {
291 /* Are we plugged into the wrong sort of link? */
292 bool bad_lp = false;
293 int phy_id = efx->mii.phy_id;
294 int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
295 MDIO_AN_STATUS);
296 int xphy_stat = mdio_clause45_read(efx, phy_id,
297 MDIO_MMD_PMAPMD,
298 PMA_PMD_XSTATUS_REG);
299 /* Are we plugged into anything that sends FLPs? If
300 * not we can't distinguish between not being plugged
301 * in and being plugged into a non-AN antique. The FLP
302 * bit has the advantage of not clearing when autoneg
303 * restarts. */
304 if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) {
305 tenxpress_set_bad_lp(efx, false);
306 return ok;
307 }
308 493
309 /* If it can do 10GBT it must be XNP capable */ 494 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_PHYXS,
310 bad_lp = !(an_stat & (1 << MDIO_AN_STATUS_XNP_LBN)); 495 PHYXS_TEST1, LOOPBACK_NEAR_LBN,
311 if (!bad_lp && (an_stat & (1 << MDIO_AN_STATUS_PAGE_LBN))) { 496 efx->loopback_mode == LOOPBACK_PHYXS);
312 bad_lp = !(mdio_clause45_read(efx, phy_id, 497 if (efx->phy_type != PHY_TYPE_SFX7101)
313 MDIO_MMD_AN, MDIO_AN_10GBT_STATUS) & 498 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
314 (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN)); 499 GPHY_XCONTROL_REG,
315 } 500 GPHY_LOOPBACK_NEAR_LBN,
316 tenxpress_set_bad_lp(efx, bad_lp); 501 efx->loopback_mode == LOOPBACK_GPHY);
317 }
318 return ok;
319} 502}
320 503
321static void tenxpress_phyxs_loopback(struct efx_nic *efx) 504static void tenxpress_low_power(struct efx_nic *efx)
322{ 505{
323 int phy_id = efx->mii.phy_id; 506 int phy_id = efx->mii.phy_id;
324 int ctrl1, ctrl2;
325 507
326 ctrl1 = ctrl2 = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, 508 if (efx->phy_type == PHY_TYPE_SFX7101)
327 PHYXS_TEST1); 509 mdio_clause45_set_mmds_lpower(
328 if (efx->loopback_mode == LOOPBACK_PHYXS) 510 efx, !!(efx->phy_mode & PHY_MODE_LOW_POWER),
329 ctrl2 |= (1 << LOOPBACK_NEAR_LBN); 511 TENXPRESS_REQUIRED_DEVS);
330 else 512 else
331 ctrl2 &= ~(1 << LOOPBACK_NEAR_LBN); 513 mdio_clause45_set_flag(
332 if (ctrl1 != ctrl2) 514 efx, phy_id, MDIO_MMD_PMAPMD,
333 mdio_clause45_write(efx, phy_id, MDIO_MMD_PHYXS, 515 PMA_PMD_XCONTROL_REG, PMA_PMD_EXT_LPOWER_LBN,
334 PHYXS_TEST1, ctrl2); 516 !!(efx->phy_mode & PHY_MODE_LOW_POWER));
335} 517}
336 518
337static void tenxpress_phy_reconfigure(struct efx_nic *efx) 519static void tenxpress_phy_reconfigure(struct efx_nic *efx)
338{ 520{
339 struct tenxpress_phy_data *phy_data = efx->phy_data; 521 struct tenxpress_phy_data *phy_data = efx->phy_data;
340 bool loop_change = LOOPBACK_OUT_OF(phy_data, efx, 522 struct ethtool_cmd ecmd;
341 TENXPRESS_LOOPBACKS); 523 bool phy_mode_change, loop_reset, loop_toggle, loopback;
342 524
343 if (efx->phy_mode & PHY_MODE_SPECIAL) { 525 if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) {
344 phy_data->phy_mode = efx->phy_mode; 526 phy_data->phy_mode = efx->phy_mode;
345 return; 527 return;
346 } 528 }
347 529
348 /* When coming out of transmit disable, coming out of low power 530 tenxpress_low_power(efx);
349 * mode, or moving out of any PHY internal loopback mode, 531
350 * perform a special software reset */ 532 phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL &&
351 if ((efx->phy_mode == PHY_MODE_NORMAL && 533 phy_data->phy_mode != PHY_MODE_NORMAL);
352 phy_data->phy_mode != PHY_MODE_NORMAL) || 534 loopback = LOOPBACK_MASK(efx) & efx->phy_op->loopbacks;
353 loop_change) { 535 loop_toggle = LOOPBACK_CHANGED(phy_data, efx, efx->phy_op->loopbacks);
354 tenxpress_special_reset(efx); 536 loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) ||
355 falcon_reset_xaui(efx); 537 LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY));
538
539 if (loop_reset || loop_toggle || loopback || phy_mode_change) {
540 int rc;
541
542 efx->phy_op->get_settings(efx, &ecmd);
543
544 if (loop_reset || phy_mode_change) {
545 tenxpress_special_reset(efx);
546
547 /* Reset XAUI if we were in 10G, and are staying
548 * in 10G. If we're moving into and out of 10G
549 * then xaui will be reset anyway */
550 if (EFX_IS10G(efx))
551 falcon_reset_xaui(efx);
552 }
553
554 if (efx->phy_type != PHY_TYPE_SFX7101) {
555 /* Only change autoneg once, on coming out or
556 * going into loopback */
557 if (loop_toggle)
558 ecmd.autoneg = !loopback;
559 if (loopback) {
560 ecmd.duplex = DUPLEX_FULL;
561 if (efx->loopback_mode == LOOPBACK_GPHY)
562 ecmd.speed = SPEED_1000;
563 else
564 ecmd.speed = SPEED_10000;
565 }
566 }
567
568 rc = efx->phy_op->set_settings(efx, &ecmd);
569 WARN_ON(rc);
356 } 570 }
357 571
358 mdio_clause45_transmit_disable(efx); 572 mdio_clause45_transmit_disable(efx);
359 mdio_clause45_phy_reconfigure(efx); 573 mdio_clause45_phy_reconfigure(efx);
360 tenxpress_phyxs_loopback(efx); 574 tenxpress_ext_loopback(efx);
361 575
362 phy_data->loopback_mode = efx->loopback_mode; 576 phy_data->loopback_mode = efx->loopback_mode;
363 phy_data->phy_mode = efx->phy_mode; 577 phy_data->phy_mode = efx->phy_mode;
364 efx->link_up = tenxpress_link_ok(efx, false);
365 efx->link_options = GM_LPA_10000FULL;
366}
367 578
368static void tenxpress_phy_clear_interrupt(struct efx_nic *efx) 579 if (efx->phy_type == PHY_TYPE_SFX7101) {
369{ 580 efx->link_speed = 10000;
370 /* Nothing done here - LASI interrupts aren't reliable so poll */ 581 efx->link_fd = true;
582 efx->link_up = sfx7101_link_ok(efx);
583 } else {
584 efx->phy_op->get_settings(efx, &ecmd);
585 efx->link_speed = ecmd.speed;
586 efx->link_fd = ecmd.duplex == DUPLEX_FULL;
587 efx->link_up = sft9001_link_ok(efx, &ecmd);
588 }
589 efx->link_fc = mdio_clause45_get_pause(efx);
371} 590}
372 591
373
374/* Poll PHY for interrupt */ 592/* Poll PHY for interrupt */
375static int tenxpress_phy_check_hw(struct efx_nic *efx) 593static void tenxpress_phy_poll(struct efx_nic *efx)
376{ 594{
377 struct tenxpress_phy_data *phy_data = efx->phy_data; 595 struct tenxpress_phy_data *phy_data = efx->phy_data;
378 bool link_ok; 596 bool change = false, link_ok;
379 597 unsigned link_fc;
380 link_ok = tenxpress_link_ok(efx, true); 598
599 if (efx->phy_type == PHY_TYPE_SFX7101) {
600 link_ok = sfx7101_link_ok(efx);
601 if (link_ok != efx->link_up) {
602 change = true;
603 } else {
604 link_fc = mdio_clause45_get_pause(efx);
605 if (link_fc != efx->link_fc)
606 change = true;
607 }
608 sfx7101_check_bad_lp(efx, link_ok);
609 } else if (efx->loopback_mode) {
610 bool link_ok = sft9001_link_ok(efx, NULL);
611 if (link_ok != efx->link_up)
612 change = true;
613 } else {
614 u32 status = mdio_clause45_read(efx, efx->mii.phy_id,
615 MDIO_MMD_PMAPMD,
616 PMA_PMD_LASI_STATUS);
617 if (status & (1 << PMA_PMD_LS_ALARM_LBN))
618 change = true;
619 }
381 620
382 if (link_ok != efx->link_up) 621 if (change)
383 falcon_xmac_sim_phy_event(efx); 622 falcon_sim_phy_event(efx);
384 623
385 if (phy_data->phy_mode != PHY_MODE_NORMAL) 624 if (phy_data->phy_mode != PHY_MODE_NORMAL)
386 return 0; 625 return;
387 626
388 if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) { 627 if (EFX_WORKAROUND_10750(efx) &&
628 atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
389 EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n"); 629 EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n");
390 falcon_reset_xaui(efx); 630 falcon_reset_xaui(efx);
391 atomic_set(&phy_data->bad_crc_count, 0); 631 atomic_set(&phy_data->bad_crc_count, 0);
392 } 632 }
393
394 return 0;
395} 633}
396 634
397static void tenxpress_phy_fini(struct efx_nic *efx) 635static void tenxpress_phy_fini(struct efx_nic *efx)
398{ 636{
399 int reg; 637 int reg;
400 638
401 /* Power down the LNPGA */ 639 if (efx->phy_type == PHY_TYPE_SFT9001B) {
402 reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); 640 device_remove_file(&efx->pci_dev->dev,
403 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, 641 &dev_attr_phy_short_reach);
404 PMA_PMD_XCONTROL_REG, reg); 642 } else {
405 643 /* Power down the LNPGA */
406 /* Waiting here ensures that the board fini, which can turn off the 644 reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
407 * power to the PHY, won't get run until the LNPGA powerdown has been 645 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
408 * given long enough to complete. */ 646 PMA_PMD_XCONTROL_REG, reg);
409 schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */ 647
648 /* Waiting here ensures that the board fini, which can turn
649 * off the power to the PHY, won't get run until the LNPGA
650 * powerdown has been given long enough to complete. */
651 schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
652 }
410 653
411 kfree(efx->phy_data); 654 kfree(efx->phy_data);
412 efx->phy_data = NULL; 655 efx->phy_data = NULL;
@@ -430,19 +673,236 @@ void tenxpress_phy_blink(struct efx_nic *efx, bool blink)
430 PMA_PMD_LED_OVERR_REG, reg); 673 PMA_PMD_LED_OVERR_REG, reg);
431} 674}
432 675
433static int tenxpress_phy_test(struct efx_nic *efx) 676static const char *const sfx7101_test_names[] = {
677 "bist"
678};
679
680static int
681sfx7101_run_tests(struct efx_nic *efx, int *results, unsigned flags)
434{ 682{
683 int rc;
684
685 if (!(flags & ETH_TEST_FL_OFFLINE))
686 return 0;
687
435 /* BIST is automatically run after a special software reset */ 688 /* BIST is automatically run after a special software reset */
436 return tenxpress_special_reset(efx); 689 rc = tenxpress_special_reset(efx);
690 results[0] = rc ? -1 : 1;
691 return rc;
437} 692}
438 693
439struct efx_phy_operations falcon_tenxpress_phy_ops = { 694static const char *const sft9001_test_names[] = {
695 "bist",
696 "cable.pairA.status",
697 "cable.pairB.status",
698 "cable.pairC.status",
699 "cable.pairD.status",
700 "cable.pairA.length",
701 "cable.pairB.length",
702 "cable.pairC.length",
703 "cable.pairD.length",
704};
705
706static int sft9001_run_tests(struct efx_nic *efx, int *results, unsigned flags)
707{
708 struct ethtool_cmd ecmd;
709 int phy_id = efx->mii.phy_id;
710 int rc = 0, rc2, i, res_reg;
711
712 if (!(flags & ETH_TEST_FL_OFFLINE))
713 return 0;
714
715 efx->phy_op->get_settings(efx, &ecmd);
716
717 /* Initialise cable diagnostic results to unknown failure */
718 for (i = 1; i < 9; ++i)
719 results[i] = -1;
720
721 /* Run cable diagnostics; wait up to 5 seconds for them to complete.
722 * A cable fault is not a self-test failure, but a timeout is. */
723 mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
724 PMA_PMD_CDIAG_CTRL_REG,
725 (1 << CDIAG_CTRL_IMMED_LBN) |
726 (1 << CDIAG_CTRL_BRK_LINK_LBN) |
727 (CDIAG_CTRL_LEN_METRES << CDIAG_CTRL_LEN_UNIT_LBN));
728 i = 0;
729 while (mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
730 PMA_PMD_CDIAG_CTRL_REG) &
731 (1 << CDIAG_CTRL_IN_PROG_LBN)) {
732 if (++i == 50) {
733 rc = -ETIMEDOUT;
734 goto reset;
735 }
736 msleep(100);
737 }
738 res_reg = mdio_clause45_read(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
739 PMA_PMD_CDIAG_RES_REG);
740 for (i = 0; i < 4; i++) {
741 int pair_res =
742 (res_reg >> (CDIAG_RES_A_LBN - i * CDIAG_RES_WIDTH))
743 & ((1 << CDIAG_RES_WIDTH) - 1);
744 int len_reg = mdio_clause45_read(efx, efx->mii.phy_id,
745 MDIO_MMD_PMAPMD,
746 PMA_PMD_CDIAG_LEN_REG + i);
747 if (pair_res == CDIAG_RES_OK)
748 results[1 + i] = 1;
749 else if (pair_res == CDIAG_RES_INVALID)
750 results[1 + i] = -1;
751 else
752 results[1 + i] = -pair_res;
753 if (pair_res != CDIAG_RES_INVALID &&
754 pair_res != CDIAG_RES_OPEN &&
755 len_reg != 0xffff)
756 results[5 + i] = len_reg;
757 }
758
759 /* We must reset to exit cable diagnostic mode. The BIST will
760 * also run when we do this. */
761reset:
762 rc2 = tenxpress_special_reset(efx);
763 results[0] = rc2 ? -1 : 1;
764 if (!rc)
765 rc = rc2;
766
767 rc2 = efx->phy_op->set_settings(efx, &ecmd);
768 if (!rc)
769 rc = rc2;
770
771 return rc;
772}
773
774static u32 tenxpress_get_xnp_lpa(struct efx_nic *efx)
775{
776 int phy = efx->mii.phy_id;
777 u32 lpa = 0;
778 int reg;
779
780 if (efx->phy_type != PHY_TYPE_SFX7101) {
781 reg = mdio_clause45_read(efx, phy, MDIO_MMD_C22EXT,
782 C22EXT_MSTSLV_REG);
783 if (reg & (1 << C22EXT_MSTSLV_1000_HD_LBN))
784 lpa |= ADVERTISED_1000baseT_Half;
785 if (reg & (1 << C22EXT_MSTSLV_1000_FD_LBN))
786 lpa |= ADVERTISED_1000baseT_Full;
787 }
788 reg = mdio_clause45_read(efx, phy, MDIO_MMD_AN, MDIO_AN_10GBT_STATUS);
789 if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN))
790 lpa |= ADVERTISED_10000baseT_Full;
791 return lpa;
792}
793
794static void sfx7101_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
795{
796 mdio_clause45_get_settings_ext(efx, ecmd, ADVERTISED_10000baseT_Full,
797 tenxpress_get_xnp_lpa(efx));
798 ecmd->supported |= SUPPORTED_10000baseT_Full;
799 ecmd->advertising |= ADVERTISED_10000baseT_Full;
800}
801
802static void sft9001_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
803{
804 int phy_id = efx->mii.phy_id;
805 u32 xnp_adv = 0;
806 int reg;
807
808 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
809 PMA_PMD_SPEED_ENABLE_REG);
810 if (EFX_WORKAROUND_13204(efx) && (reg & (1 << PMA_PMD_100TX_ADV_LBN)))
811 xnp_adv |= ADVERTISED_100baseT_Full;
812 if (reg & (1 << PMA_PMD_1000T_ADV_LBN))
813 xnp_adv |= ADVERTISED_1000baseT_Full;
814 if (reg & (1 << PMA_PMD_10000T_ADV_LBN))
815 xnp_adv |= ADVERTISED_10000baseT_Full;
816
817 mdio_clause45_get_settings_ext(efx, ecmd, xnp_adv,
818 tenxpress_get_xnp_lpa(efx));
819
820 ecmd->supported |= (SUPPORTED_100baseT_Half |
821 SUPPORTED_100baseT_Full |
822 SUPPORTED_1000baseT_Full);
823
824 /* Use the vendor defined C22ext register for duplex settings */
825 if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) {
826 reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT,
827 GPHY_XCONTROL_REG);
828 ecmd->duplex = (reg & (1 << GPHY_DUPLEX_LBN) ?
829 DUPLEX_FULL : DUPLEX_HALF);
830 }
831}
832
833static int sft9001_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
834{
835 int phy_id = efx->mii.phy_id;
836 int rc;
837
838 rc = mdio_clause45_set_settings(efx, ecmd);
839 if (rc)
840 return rc;
841
842 if (ecmd->speed != SPEED_10000 && !ecmd->autoneg)
843 mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT,
844 GPHY_XCONTROL_REG, GPHY_DUPLEX_LBN,
845 ecmd->duplex == DUPLEX_FULL);
846
847 return rc;
848}
849
850static bool sft9001_set_xnp_advertise(struct efx_nic *efx, u32 advertising)
851{
852 int phy = efx->mii.phy_id;
853 int reg = mdio_clause45_read(efx, phy, MDIO_MMD_PMAPMD,
854 PMA_PMD_SPEED_ENABLE_REG);
855 bool enabled;
856
857 reg &= ~((1 << 2) | (1 << 3));
858 if (EFX_WORKAROUND_13204(efx) &&
859 (advertising & ADVERTISED_100baseT_Full))
860 reg |= 1 << PMA_PMD_100TX_ADV_LBN;
861 if (advertising & ADVERTISED_1000baseT_Full)
862 reg |= 1 << PMA_PMD_1000T_ADV_LBN;
863 if (advertising & ADVERTISED_10000baseT_Full)
864 reg |= 1 << PMA_PMD_10000T_ADV_LBN;
865 mdio_clause45_write(efx, phy, MDIO_MMD_PMAPMD,
866 PMA_PMD_SPEED_ENABLE_REG, reg);
867
868 enabled = (advertising &
869 (ADVERTISED_1000baseT_Half |
870 ADVERTISED_1000baseT_Full |
871 ADVERTISED_10000baseT_Full));
872 if (EFX_WORKAROUND_13204(efx))
873 enabled |= (advertising & ADVERTISED_100baseT_Full);
874 return enabled;
875}
876
877struct efx_phy_operations falcon_sfx7101_phy_ops = {
878 .macs = EFX_XMAC,
879 .init = tenxpress_phy_init,
880 .reconfigure = tenxpress_phy_reconfigure,
881 .poll = tenxpress_phy_poll,
882 .fini = tenxpress_phy_fini,
883 .clear_interrupt = efx_port_dummy_op_void,
884 .get_settings = sfx7101_get_settings,
885 .set_settings = mdio_clause45_set_settings,
886 .num_tests = ARRAY_SIZE(sfx7101_test_names),
887 .test_names = sfx7101_test_names,
888 .run_tests = sfx7101_run_tests,
889 .mmds = TENXPRESS_REQUIRED_DEVS,
890 .loopbacks = SFX7101_LOOPBACKS,
891};
892
893struct efx_phy_operations falcon_sft9001_phy_ops = {
894 .macs = EFX_GMAC | EFX_XMAC,
440 .init = tenxpress_phy_init, 895 .init = tenxpress_phy_init,
441 .reconfigure = tenxpress_phy_reconfigure, 896 .reconfigure = tenxpress_phy_reconfigure,
442 .check_hw = tenxpress_phy_check_hw, 897 .poll = tenxpress_phy_poll,
443 .fini = tenxpress_phy_fini, 898 .fini = tenxpress_phy_fini,
444 .clear_interrupt = tenxpress_phy_clear_interrupt, 899 .clear_interrupt = efx_port_dummy_op_void,
445 .test = tenxpress_phy_test, 900 .get_settings = sft9001_get_settings,
901 .set_settings = sft9001_set_settings,
902 .set_xnp_advertise = sft9001_set_xnp_advertise,
903 .num_tests = ARRAY_SIZE(sft9001_test_names),
904 .test_names = sft9001_test_names,
905 .run_tests = sft9001_run_tests,
446 .mmds = TENXPRESS_REQUIRED_DEVS, 906 .mmds = TENXPRESS_REQUIRED_DEVS,
447 .loopbacks = TENXPRESS_LOOPBACKS, 907 .loopbacks = SFT9001_LOOPBACKS,
448}; 908};
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index fa7b49d69288..82e03e1d7371 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -17,15 +17,20 @@
17 17
18#define EFX_WORKAROUND_ALWAYS(efx) 1 18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) 19#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
20#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx)
21#define EFX_WORKAROUND_SFX7101(efx) ((efx)->phy_type == PHY_TYPE_SFX7101)
22#define EFX_WORKAROUND_SFT9001A(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A)
20 23
21/* XAUI resets if link not detected */ 24/* XAUI resets if link not detected */
22#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS 25#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
23/* RX PCIe double split performance issue */ 26/* RX PCIe double split performance issue */
24#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS 27#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
28/* Bit-bashed I2C reads cause performance drop */
29#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
25/* TX pkt parser problem with <= 16 byte TXes */ 30/* TX pkt parser problem with <= 16 byte TXes */
26#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS 31#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
27/* Low rate CRC errors require XAUI reset */ 32/* Low rate CRC errors require XAUI reset */
28#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS 33#define EFX_WORKAROUND_10750 EFX_WORKAROUND_SFX7101
29/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor 34/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
30 * or a PCIe error (bug 11028) */ 35 * or a PCIe error (bug 11028) */
31#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS 36#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
@@ -49,4 +54,9 @@
49/* Leak overlength packets rather than free */ 54/* Leak overlength packets rather than free */
50#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A 55#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
51 56
57/* Need to send XNP pages for 100BaseT */
58#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001A
59/* Need to keep AN enabled */
60#define EFX_WORKAROUND_13963 EFX_WORKAROUND_SFT9001A
61
52#endif /* EFX_WORKAROUNDS_H */ 62#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
index 276151df3a70..2d50b6ecf5f9 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/xfp_phy.c
@@ -7,22 +7,21 @@
7 * by the Free Software Foundation, incorporated herein by reference. 7 * by the Free Software Foundation, incorporated herein by reference.
8 */ 8 */
9/* 9/*
10 * Driver for XFP optical PHYs (plus some support specific to the Quake 2032) 10 * Driver for XFP optical PHYs (plus some support specific to the Quake 2022/32)
11 * See www.amcc.com for details (search for qt2032) 11 * See www.amcc.com for details (search for qt2032)
12 */ 12 */
13 13
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include "efx.h" 16#include "efx.h"
17#include "gmii.h"
18#include "mdio_10g.h" 17#include "mdio_10g.h"
19#include "xenpack.h" 18#include "xenpack.h"
20#include "phy.h" 19#include "phy.h"
21#include "mac.h" 20#include "falcon.h"
22 21
23#define XFP_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PCS | \ 22#define XFP_REQUIRED_DEVS (MDIO_MMDREG_DEVS_PCS | \
24 MDIO_MMDREG_DEVS0_PMAPMD | \ 23 MDIO_MMDREG_DEVS_PMAPMD | \
25 MDIO_MMDREG_DEVS0_PHYXS) 24 MDIO_MMDREG_DEVS_PHYXS)
26 25
27#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ 26#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \
28 (1 << LOOPBACK_PMAPMD) | \ 27 (1 << LOOPBACK_PMAPMD) | \
@@ -65,7 +64,7 @@ static int xfp_reset_phy(struct efx_nic *efx)
65 /* Check that all the MMDs we expect are present and responding. We 64 /* Check that all the MMDs we expect are present and responding. We
66 * expect faults on some if the link is down, but not on the PHY XS */ 65 * expect faults on some if the link is down, but not on the PHY XS */
67 rc = mdio_clause45_check_mmds(efx, XFP_REQUIRED_DEVS, 66 rc = mdio_clause45_check_mmds(efx, XFP_REQUIRED_DEVS,
68 MDIO_MMDREG_DEVS0_PHYXS); 67 MDIO_MMDREG_DEVS_PHYXS);
69 if (rc < 0) 68 if (rc < 0)
70 goto fail; 69 goto fail;
71 70
@@ -120,15 +119,12 @@ static int xfp_link_ok(struct efx_nic *efx)
120 return mdio_clause45_links_ok(efx, XFP_REQUIRED_DEVS); 119 return mdio_clause45_links_ok(efx, XFP_REQUIRED_DEVS);
121} 120}
122 121
123static int xfp_phy_check_hw(struct efx_nic *efx) 122static void xfp_phy_poll(struct efx_nic *efx)
124{ 123{
125 int rc = 0;
126 int link_up = xfp_link_ok(efx); 124 int link_up = xfp_link_ok(efx);
127 /* Simulate a PHY event if link state has changed */ 125 /* Simulate a PHY event if link state has changed */
128 if (link_up != efx->link_up) 126 if (link_up != efx->link_up)
129 falcon_xmac_sim_phy_event(efx); 127 falcon_sim_phy_event(efx);
130
131 return rc;
132} 128}
133 129
134static void xfp_phy_reconfigure(struct efx_nic *efx) 130static void xfp_phy_reconfigure(struct efx_nic *efx)
@@ -145,7 +141,9 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
145 141
146 phy_data->phy_mode = efx->phy_mode; 142 phy_data->phy_mode = efx->phy_mode;
147 efx->link_up = xfp_link_ok(efx); 143 efx->link_up = xfp_link_ok(efx);
148 efx->link_options = GM_LPA_10000FULL; 144 efx->link_speed = 10000;
145 efx->link_fd = true;
146 efx->link_fc = efx->wanted_fc;
149} 147}
150 148
151 149
@@ -160,11 +158,14 @@ static void xfp_phy_fini(struct efx_nic *efx)
160} 158}
161 159
162struct efx_phy_operations falcon_xfp_phy_ops = { 160struct efx_phy_operations falcon_xfp_phy_ops = {
161 .macs = EFX_XMAC,
163 .init = xfp_phy_init, 162 .init = xfp_phy_init,
164 .reconfigure = xfp_phy_reconfigure, 163 .reconfigure = xfp_phy_reconfigure,
165 .check_hw = xfp_phy_check_hw, 164 .poll = xfp_phy_poll,
166 .fini = xfp_phy_fini, 165 .fini = xfp_phy_fini,
167 .clear_interrupt = xfp_phy_clear_interrupt, 166 .clear_interrupt = xfp_phy_clear_interrupt,
167 .get_settings = mdio_clause45_get_settings,
168 .set_settings = mdio_clause45_set_settings,
168 .mmds = XFP_REQUIRED_DEVS, 169 .mmds = XFP_REQUIRED_DEVS,
169 .loopbacks = XFP_LOOPBACKS, 170 .loopbacks = XFP_LOOPBACKS,
170}; 171};
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 6261201403cd..97d68560067d 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -377,7 +377,6 @@ memory_squeeze:
377 skb_put(skb, len); 377 skb_put(skb, len);
378 skb->protocol = eth_type_trans(skb, dev); 378 skb->protocol = eth_type_trans(skb, dev);
379 netif_rx(skb); 379 netif_rx(skb);
380 dev->last_rx = jiffies;
381 dev->stats.rx_packets++; 380 dev->stats.rx_packets++;
382 dev->stats.rx_bytes += len; 381 dev->stats.rx_bytes += len;
383 } else { 382 } else {
@@ -657,7 +656,7 @@ static void timeout(struct net_device *dev)
657 656
658static void sgiseeq_set_multicast(struct net_device *dev) 657static void sgiseeq_set_multicast(struct net_device *dev)
659{ 658{
660 struct sgiseeq_private *sp = (struct sgiseeq_private *) dev->priv; 659 struct sgiseeq_private *sp = netdev_priv(dev);
661 unsigned char oldmode = sp->mode; 660 unsigned char oldmode = sp->mode;
662 661
663 if(dev->flags & IFF_PROMISC) 662 if(dev->flags & IFF_PROMISC)
@@ -719,7 +718,6 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
719 struct sgiseeq_private *sp; 718 struct sgiseeq_private *sp;
720 struct net_device *dev; 719 struct net_device *dev;
721 int err; 720 int err;
722 DECLARE_MAC_BUF(mac);
723 721
724 dev = alloc_etherdev(sizeof (struct sgiseeq_private)); 722 dev = alloc_etherdev(sizeof (struct sgiseeq_private));
725 if (!dev) { 723 if (!dev) {
@@ -793,8 +791,7 @@ static int __init sgiseeq_probe(struct platform_device *pdev)
793 goto err_out_free_page; 791 goto err_out_free_page;
794 } 792 }
795 793
796 printk(KERN_INFO "%s: %s %s\n", 794 printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
797 dev->name, sgiseeqstr, print_mac(mac, dev->dev_addr));
798 795
799 return 0; 796 return 0;
800 797
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 59f242a67714..7f8e514eb5e9 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -540,7 +540,6 @@ static int sh_eth_rx(struct net_device *ndev)
540 skb_put(skb, pkt_len); 540 skb_put(skb, pkt_len);
541 skb->protocol = eth_type_trans(skb, ndev); 541 skb->protocol = eth_type_trans(skb, ndev);
542 netif_rx(skb); 542 netif_rx(skb);
543 ndev->last_rx = jiffies;
544 mdp->stats.rx_packets++; 543 mdp->stats.rx_packets++;
545 mdp->stats.rx_bytes += pkt_len; 544 mdp->stats.rx_bytes += pkt_len;
546 } 545 }
@@ -800,7 +799,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
800 char phy_id[BUS_ID_SIZE]; 799 char phy_id[BUS_ID_SIZE];
801 struct phy_device *phydev = NULL; 800 struct phy_device *phydev = NULL;
802 801
803 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, 802 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
804 mdp->mii_bus->id , mdp->phy_id); 803 mdp->mii_bus->id , mdp->phy_id);
805 804
806 mdp->link = PHY_DOWN; 805 mdp->link = PHY_DOWN;
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index e6e3bf58a569..83cc3c5f7946 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -627,7 +627,6 @@ static int sis190_rx_interrupt(struct net_device *dev,
627 627
628 sis190_rx_skb(skb); 628 sis190_rx_skb(skb);
629 629
630 dev->last_rx = jiffies;
631 stats->rx_packets++; 630 stats->rx_packets++;
632 stats->rx_bytes += pkt_size; 631 stats->rx_bytes += pkt_size;
633 if ((status & BCAST) == MCAST) 632 if ((status & BCAST) == MCAST)
@@ -1791,7 +1790,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1791 struct net_device *dev; 1790 struct net_device *dev;
1792 void __iomem *ioaddr; 1791 void __iomem *ioaddr;
1793 int rc; 1792 int rc;
1794 DECLARE_MAC_BUF(mac);
1795 1793
1796 if (!printed_version) { 1794 if (!printed_version) {
1797 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n"); 1795 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
@@ -1841,10 +1839,9 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1841 if (rc < 0) 1839 if (rc < 0)
1842 goto err_remove_mii; 1840 goto err_remove_mii;
1843 1841
1844 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), " 1842 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), %pM\n",
1845 "%s\n",
1846 pci_name(pdev), sis_chip_info[ent->driver_data].name, 1843 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1847 ioaddr, dev->irq, print_mac(mac, dev->dev_addr)); 1844 ioaddr, dev->irq, dev->dev_addr);
1848 1845
1849 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name, 1846 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
1850 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); 1847 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 8e8337e8b072..4acd41a093ad 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -381,6 +381,21 @@ static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
381 return 0; 381 return 0;
382} 382}
383 383
384static const struct net_device_ops sis900_netdev_ops = {
385 .ndo_open = sis900_open,
386 .ndo_stop = sis900_close,
387 .ndo_start_xmit = sis900_start_xmit,
388 .ndo_set_config = sis900_set_config,
389 .ndo_set_multicast_list = set_rx_mode,
390 .ndo_change_mtu = eth_change_mtu,
391 .ndo_validate_addr = eth_validate_addr,
392 .ndo_do_ioctl = mii_ioctl,
393 .ndo_tx_timeout = sis900_tx_timeout,
394#ifdef CONFIG_NET_POLL_CONTROLLER
395 .ndo_poll_controller = sis900_poll,
396#endif
397};
398
384/** 399/**
385 * sis900_probe - Probe for sis900 device 400 * sis900_probe - Probe for sis900 device
386 * @pci_dev: the sis900 pci device 401 * @pci_dev: the sis900 pci device
@@ -404,7 +419,6 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
404 int i, ret; 419 int i, ret;
405 const char *card_name = card_names[pci_id->driver_data]; 420 const char *card_name = card_names[pci_id->driver_data];
406 const char *dev_name = pci_name(pci_dev); 421 const char *dev_name = pci_name(pci_dev);
407 DECLARE_MAC_BUF(mac);
408 422
409/* when built into the kernel, we only print version if device is found */ 423/* when built into the kernel, we only print version if device is found */
410#ifndef MODULE 424#ifndef MODULE
@@ -437,7 +451,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
437 if (ret) 451 if (ret)
438 goto err_out; 452 goto err_out;
439 453
440 sis_priv = net_dev->priv; 454 sis_priv = netdev_priv(net_dev);
441 net_dev->base_addr = ioaddr; 455 net_dev->base_addr = ioaddr;
442 net_dev->irq = pci_dev->irq; 456 net_dev->irq = pci_dev->irq;
443 sis_priv->pci_dev = pci_dev; 457 sis_priv->pci_dev = pci_dev;
@@ -462,20 +476,10 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
462 sis_priv->rx_ring_dma = ring_dma; 476 sis_priv->rx_ring_dma = ring_dma;
463 477
464 /* The SiS900-specific entries in the device structure. */ 478 /* The SiS900-specific entries in the device structure. */
465 net_dev->open = &sis900_open; 479 net_dev->netdev_ops = &sis900_netdev_ops;
466 net_dev->hard_start_xmit = &sis900_start_xmit;
467 net_dev->stop = &sis900_close;
468 net_dev->set_config = &sis900_set_config;
469 net_dev->set_multicast_list = &set_rx_mode;
470 net_dev->do_ioctl = &mii_ioctl;
471 net_dev->tx_timeout = sis900_tx_timeout;
472 net_dev->watchdog_timeo = TX_TIMEOUT; 480 net_dev->watchdog_timeo = TX_TIMEOUT;
473 net_dev->ethtool_ops = &sis900_ethtool_ops; 481 net_dev->ethtool_ops = &sis900_ethtool_ops;
474 482
475#ifdef CONFIG_NET_POLL_CONTROLLER
476 net_dev->poll_controller = &sis900_poll;
477#endif
478
479 if (sis900_debug > 0) 483 if (sis900_debug > 0)
480 sis_priv->msg_enable = sis900_debug; 484 sis_priv->msg_enable = sis900_debug;
481 else 485 else
@@ -534,9 +538,9 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
534 goto err_unmap_rx; 538 goto err_unmap_rx;
535 539
536 /* print some information about our NIC */ 540 /* print some information about our NIC */
537 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %s\n", 541 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n",
538 net_dev->name, card_name, ioaddr, net_dev->irq, 542 net_dev->name, card_name, ioaddr, net_dev->irq,
539 print_mac(mac, net_dev->dev_addr)); 543 net_dev->dev_addr);
540 544
541 /* Detect Wake on Lan support */ 545 /* Detect Wake on Lan support */
542 ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27; 546 ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27;
@@ -570,7 +574,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
570 574
571static int __devinit sis900_mii_probe(struct net_device * net_dev) 575static int __devinit sis900_mii_probe(struct net_device * net_dev)
572{ 576{
573 struct sis900_private * sis_priv = net_dev->priv; 577 struct sis900_private *sis_priv = netdev_priv(net_dev);
574 const char *dev_name = pci_name(sis_priv->pci_dev); 578 const char *dev_name = pci_name(sis_priv->pci_dev);
575 u16 poll_bit = MII_STAT_LINK, status = 0; 579 u16 poll_bit = MII_STAT_LINK, status = 0;
576 unsigned long timeout = jiffies + 5 * HZ; 580 unsigned long timeout = jiffies + 5 * HZ;
@@ -698,7 +702,7 @@ static int __devinit sis900_mii_probe(struct net_device * net_dev)
698 702
699static u16 sis900_default_phy(struct net_device * net_dev) 703static u16 sis900_default_phy(struct net_device * net_dev)
700{ 704{
701 struct sis900_private * sis_priv = net_dev->priv; 705 struct sis900_private *sis_priv = netdev_priv(net_dev);
702 struct mii_phy *phy = NULL, *phy_home = NULL, 706 struct mii_phy *phy = NULL, *phy_home = NULL,
703 *default_phy = NULL, *phy_lan = NULL; 707 *default_phy = NULL, *phy_lan = NULL;
704 u16 status; 708 u16 status;
@@ -999,7 +1003,7 @@ static void sis900_poll(struct net_device *dev)
999static int 1003static int
1000sis900_open(struct net_device *net_dev) 1004sis900_open(struct net_device *net_dev)
1001{ 1005{
1002 struct sis900_private *sis_priv = net_dev->priv; 1006 struct sis900_private *sis_priv = netdev_priv(net_dev);
1003 long ioaddr = net_dev->base_addr; 1007 long ioaddr = net_dev->base_addr;
1004 int ret; 1008 int ret;
1005 1009
@@ -1055,7 +1059,7 @@ sis900_open(struct net_device *net_dev)
1055static void 1059static void
1056sis900_init_rxfilter (struct net_device * net_dev) 1060sis900_init_rxfilter (struct net_device * net_dev)
1057{ 1061{
1058 struct sis900_private *sis_priv = net_dev->priv; 1062 struct sis900_private *sis_priv = netdev_priv(net_dev);
1059 long ioaddr = net_dev->base_addr; 1063 long ioaddr = net_dev->base_addr;
1060 u32 rfcrSave; 1064 u32 rfcrSave;
1061 u32 i; 1065 u32 i;
@@ -1093,7 +1097,7 @@ sis900_init_rxfilter (struct net_device * net_dev)
1093static void 1097static void
1094sis900_init_tx_ring(struct net_device *net_dev) 1098sis900_init_tx_ring(struct net_device *net_dev)
1095{ 1099{
1096 struct sis900_private *sis_priv = net_dev->priv; 1100 struct sis900_private *sis_priv = netdev_priv(net_dev);
1097 long ioaddr = net_dev->base_addr; 1101 long ioaddr = net_dev->base_addr;
1098 int i; 1102 int i;
1099 1103
@@ -1127,7 +1131,7 @@ sis900_init_tx_ring(struct net_device *net_dev)
1127static void 1131static void
1128sis900_init_rx_ring(struct net_device *net_dev) 1132sis900_init_rx_ring(struct net_device *net_dev)
1129{ 1133{
1130 struct sis900_private *sis_priv = net_dev->priv; 1134 struct sis900_private *sis_priv = netdev_priv(net_dev);
1131 long ioaddr = net_dev->base_addr; 1135 long ioaddr = net_dev->base_addr;
1132 int i; 1136 int i;
1133 1137
@@ -1198,7 +1202,7 @@ sis900_init_rx_ring(struct net_device *net_dev)
1198 1202
1199static void sis630_set_eq(struct net_device *net_dev, u8 revision) 1203static void sis630_set_eq(struct net_device *net_dev, u8 revision)
1200{ 1204{
1201 struct sis900_private *sis_priv = net_dev->priv; 1205 struct sis900_private *sis_priv = netdev_priv(net_dev);
1202 u16 reg14h, eq_value=0, max_value=0, min_value=0; 1206 u16 reg14h, eq_value=0, max_value=0, min_value=0;
1203 int i, maxcount=10; 1207 int i, maxcount=10;
1204 1208
@@ -1271,13 +1275,13 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
1271static void sis900_timer(unsigned long data) 1275static void sis900_timer(unsigned long data)
1272{ 1276{
1273 struct net_device *net_dev = (struct net_device *)data; 1277 struct net_device *net_dev = (struct net_device *)data;
1274 struct sis900_private *sis_priv = net_dev->priv; 1278 struct sis900_private *sis_priv = netdev_priv(net_dev);
1275 struct mii_phy *mii_phy = sis_priv->mii; 1279 struct mii_phy *mii_phy = sis_priv->mii;
1276 static const int next_tick = 5*HZ; 1280 static const int next_tick = 5*HZ;
1277 u16 status; 1281 u16 status;
1278 1282
1279 if (!sis_priv->autong_complete){ 1283 if (!sis_priv->autong_complete){
1280 int speed, duplex = 0; 1284 int uninitialized_var(speed), duplex = 0;
1281 1285
1282 sis900_read_mode(net_dev, &speed, &duplex); 1286 sis900_read_mode(net_dev, &speed, &duplex);
1283 if (duplex){ 1287 if (duplex){
@@ -1341,7 +1345,7 @@ static void sis900_timer(unsigned long data)
1341 1345
1342static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy) 1346static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
1343{ 1347{
1344 struct sis900_private *sis_priv = net_dev->priv; 1348 struct sis900_private *sis_priv = netdev_priv(net_dev);
1345 long ioaddr = net_dev->base_addr; 1349 long ioaddr = net_dev->base_addr;
1346 int speed, duplex; 1350 int speed, duplex;
1347 1351
@@ -1420,7 +1424,7 @@ static void sis900_set_mode (long ioaddr, int speed, int duplex)
1420 1424
1421static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr) 1425static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr)
1422{ 1426{
1423 struct sis900_private *sis_priv = net_dev->priv; 1427 struct sis900_private *sis_priv = netdev_priv(net_dev);
1424 int i = 0; 1428 int i = 0;
1425 u32 status; 1429 u32 status;
1426 1430
@@ -1455,7 +1459,7 @@ static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr)
1455 1459
1456static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex) 1460static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex)
1457{ 1461{
1458 struct sis900_private *sis_priv = net_dev->priv; 1462 struct sis900_private *sis_priv = netdev_priv(net_dev);
1459 struct mii_phy *phy = sis_priv->mii; 1463 struct mii_phy *phy = sis_priv->mii;
1460 int phy_addr = sis_priv->cur_phy; 1464 int phy_addr = sis_priv->cur_phy;
1461 u32 status; 1465 u32 status;
@@ -1510,7 +1514,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
1510 1514
1511static void sis900_tx_timeout(struct net_device *net_dev) 1515static void sis900_tx_timeout(struct net_device *net_dev)
1512{ 1516{
1513 struct sis900_private *sis_priv = net_dev->priv; 1517 struct sis900_private *sis_priv = netdev_priv(net_dev);
1514 long ioaddr = net_dev->base_addr; 1518 long ioaddr = net_dev->base_addr;
1515 unsigned long flags; 1519 unsigned long flags;
1516 int i; 1520 int i;
@@ -1569,7 +1573,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
1569static int 1573static int
1570sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 1574sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1571{ 1575{
1572 struct sis900_private *sis_priv = net_dev->priv; 1576 struct sis900_private *sis_priv = netdev_priv(net_dev);
1573 long ioaddr = net_dev->base_addr; 1577 long ioaddr = net_dev->base_addr;
1574 unsigned int entry; 1578 unsigned int entry;
1575 unsigned long flags; 1579 unsigned long flags;
@@ -1638,7 +1642,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1638static irqreturn_t sis900_interrupt(int irq, void *dev_instance) 1642static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1639{ 1643{
1640 struct net_device *net_dev = dev_instance; 1644 struct net_device *net_dev = dev_instance;
1641 struct sis900_private *sis_priv = net_dev->priv; 1645 struct sis900_private *sis_priv = netdev_priv(net_dev);
1642 int boguscnt = max_interrupt_work; 1646 int boguscnt = max_interrupt_work;
1643 long ioaddr = net_dev->base_addr; 1647 long ioaddr = net_dev->base_addr;
1644 u32 status; 1648 u32 status;
@@ -1700,7 +1704,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1700 1704
1701static int sis900_rx(struct net_device *net_dev) 1705static int sis900_rx(struct net_device *net_dev)
1702{ 1706{
1703 struct sis900_private *sis_priv = net_dev->priv; 1707 struct sis900_private *sis_priv = netdev_priv(net_dev);
1704 long ioaddr = net_dev->base_addr; 1708 long ioaddr = net_dev->base_addr;
1705 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC; 1709 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
1706 u32 rx_status = sis_priv->rx_ring[entry].cmdsts; 1710 u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
@@ -1789,7 +1793,6 @@ static int sis900_rx(struct net_device *net_dev)
1789 /* some network statistics */ 1793 /* some network statistics */
1790 if ((rx_status & BCAST) == MCAST) 1794 if ((rx_status & BCAST) == MCAST)
1791 net_dev->stats.multicast++; 1795 net_dev->stats.multicast++;
1792 net_dev->last_rx = jiffies;
1793 net_dev->stats.rx_bytes += rx_size; 1796 net_dev->stats.rx_bytes += rx_size;
1794 net_dev->stats.rx_packets++; 1797 net_dev->stats.rx_packets++;
1795 sis_priv->dirty_rx++; 1798 sis_priv->dirty_rx++;
@@ -1850,7 +1853,7 @@ refill_rx_ring:
1850 1853
1851static void sis900_finish_xmit (struct net_device *net_dev) 1854static void sis900_finish_xmit (struct net_device *net_dev)
1852{ 1855{
1853 struct sis900_private *sis_priv = net_dev->priv; 1856 struct sis900_private *sis_priv = netdev_priv(net_dev);
1854 1857
1855 for (; sis_priv->dirty_tx != sis_priv->cur_tx; sis_priv->dirty_tx++) { 1858 for (; sis_priv->dirty_tx != sis_priv->cur_tx; sis_priv->dirty_tx++) {
1856 struct sk_buff *skb; 1859 struct sk_buff *skb;
@@ -1919,7 +1922,7 @@ static void sis900_finish_xmit (struct net_device *net_dev)
1919static int sis900_close(struct net_device *net_dev) 1922static int sis900_close(struct net_device *net_dev)
1920{ 1923{
1921 long ioaddr = net_dev->base_addr; 1924 long ioaddr = net_dev->base_addr;
1922 struct sis900_private *sis_priv = net_dev->priv; 1925 struct sis900_private *sis_priv = netdev_priv(net_dev);
1923 struct sk_buff *skb; 1926 struct sk_buff *skb;
1924 int i; 1927 int i;
1925 1928
@@ -1974,7 +1977,7 @@ static int sis900_close(struct net_device *net_dev)
1974static void sis900_get_drvinfo(struct net_device *net_dev, 1977static void sis900_get_drvinfo(struct net_device *net_dev,
1975 struct ethtool_drvinfo *info) 1978 struct ethtool_drvinfo *info)
1976{ 1979{
1977 struct sis900_private *sis_priv = net_dev->priv; 1980 struct sis900_private *sis_priv = netdev_priv(net_dev);
1978 1981
1979 strcpy (info->driver, SIS900_MODULE_NAME); 1982 strcpy (info->driver, SIS900_MODULE_NAME);
1980 strcpy (info->version, SIS900_DRV_VERSION); 1983 strcpy (info->version, SIS900_DRV_VERSION);
@@ -1983,26 +1986,26 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
1983 1986
1984static u32 sis900_get_msglevel(struct net_device *net_dev) 1987static u32 sis900_get_msglevel(struct net_device *net_dev)
1985{ 1988{
1986 struct sis900_private *sis_priv = net_dev->priv; 1989 struct sis900_private *sis_priv = netdev_priv(net_dev);
1987 return sis_priv->msg_enable; 1990 return sis_priv->msg_enable;
1988} 1991}
1989 1992
1990static void sis900_set_msglevel(struct net_device *net_dev, u32 value) 1993static void sis900_set_msglevel(struct net_device *net_dev, u32 value)
1991{ 1994{
1992 struct sis900_private *sis_priv = net_dev->priv; 1995 struct sis900_private *sis_priv = netdev_priv(net_dev);
1993 sis_priv->msg_enable = value; 1996 sis_priv->msg_enable = value;
1994} 1997}
1995 1998
1996static u32 sis900_get_link(struct net_device *net_dev) 1999static u32 sis900_get_link(struct net_device *net_dev)
1997{ 2000{
1998 struct sis900_private *sis_priv = net_dev->priv; 2001 struct sis900_private *sis_priv = netdev_priv(net_dev);
1999 return mii_link_ok(&sis_priv->mii_info); 2002 return mii_link_ok(&sis_priv->mii_info);
2000} 2003}
2001 2004
2002static int sis900_get_settings(struct net_device *net_dev, 2005static int sis900_get_settings(struct net_device *net_dev,
2003 struct ethtool_cmd *cmd) 2006 struct ethtool_cmd *cmd)
2004{ 2007{
2005 struct sis900_private *sis_priv = net_dev->priv; 2008 struct sis900_private *sis_priv = netdev_priv(net_dev);
2006 spin_lock_irq(&sis_priv->lock); 2009 spin_lock_irq(&sis_priv->lock);
2007 mii_ethtool_gset(&sis_priv->mii_info, cmd); 2010 mii_ethtool_gset(&sis_priv->mii_info, cmd);
2008 spin_unlock_irq(&sis_priv->lock); 2011 spin_unlock_irq(&sis_priv->lock);
@@ -2012,7 +2015,7 @@ static int sis900_get_settings(struct net_device *net_dev,
2012static int sis900_set_settings(struct net_device *net_dev, 2015static int sis900_set_settings(struct net_device *net_dev,
2013 struct ethtool_cmd *cmd) 2016 struct ethtool_cmd *cmd)
2014{ 2017{
2015 struct sis900_private *sis_priv = net_dev->priv; 2018 struct sis900_private *sis_priv = netdev_priv(net_dev);
2016 int rt; 2019 int rt;
2017 spin_lock_irq(&sis_priv->lock); 2020 spin_lock_irq(&sis_priv->lock);
2018 rt = mii_ethtool_sset(&sis_priv->mii_info, cmd); 2021 rt = mii_ethtool_sset(&sis_priv->mii_info, cmd);
@@ -2022,7 +2025,7 @@ static int sis900_set_settings(struct net_device *net_dev,
2022 2025
2023static int sis900_nway_reset(struct net_device *net_dev) 2026static int sis900_nway_reset(struct net_device *net_dev)
2024{ 2027{
2025 struct sis900_private *sis_priv = net_dev->priv; 2028 struct sis900_private *sis_priv = netdev_priv(net_dev);
2026 return mii_nway_restart(&sis_priv->mii_info); 2029 return mii_nway_restart(&sis_priv->mii_info);
2027} 2030}
2028 2031
@@ -2039,7 +2042,7 @@ static int sis900_nway_reset(struct net_device *net_dev)
2039 2042
2040static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) 2043static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2041{ 2044{
2042 struct sis900_private *sis_priv = net_dev->priv; 2045 struct sis900_private *sis_priv = netdev_priv(net_dev);
2043 long pmctrl_addr = net_dev->base_addr + pmctrl; 2046 long pmctrl_addr = net_dev->base_addr + pmctrl;
2044 u32 cfgpmcsr = 0, pmctrl_bits = 0; 2047 u32 cfgpmcsr = 0, pmctrl_bits = 0;
2045 2048
@@ -2110,7 +2113,7 @@ static const struct ethtool_ops sis900_ethtool_ops = {
2110 2113
2111static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) 2114static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
2112{ 2115{
2113 struct sis900_private *sis_priv = net_dev->priv; 2116 struct sis900_private *sis_priv = netdev_priv(net_dev);
2114 struct mii_ioctl_data *data = if_mii(rq); 2117 struct mii_ioctl_data *data = if_mii(rq);
2115 2118
2116 switch(cmd) { 2119 switch(cmd) {
@@ -2144,7 +2147,7 @@ static int mii_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
2144 2147
2145static int sis900_set_config(struct net_device *dev, struct ifmap *map) 2148static int sis900_set_config(struct net_device *dev, struct ifmap *map)
2146{ 2149{
2147 struct sis900_private *sis_priv = dev->priv; 2150 struct sis900_private *sis_priv = netdev_priv(dev);
2148 struct mii_phy *mii_phy = sis_priv->mii; 2151 struct mii_phy *mii_phy = sis_priv->mii;
2149 2152
2150 u16 status; 2153 u16 status;
@@ -2267,7 +2270,7 @@ static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
2267static void set_rx_mode(struct net_device *net_dev) 2270static void set_rx_mode(struct net_device *net_dev)
2268{ 2271{
2269 long ioaddr = net_dev->base_addr; 2272 long ioaddr = net_dev->base_addr;
2270 struct sis900_private * sis_priv = net_dev->priv; 2273 struct sis900_private *sis_priv = netdev_priv(net_dev);
2271 u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */ 2274 u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */
2272 int i, table_entries; 2275 int i, table_entries;
2273 u32 rx_mode; 2276 u32 rx_mode;
@@ -2342,7 +2345,7 @@ static void set_rx_mode(struct net_device *net_dev)
2342 2345
2343static void sis900_reset(struct net_device *net_dev) 2346static void sis900_reset(struct net_device *net_dev)
2344{ 2347{
2345 struct sis900_private * sis_priv = net_dev->priv; 2348 struct sis900_private *sis_priv = netdev_priv(net_dev);
2346 long ioaddr = net_dev->base_addr; 2349 long ioaddr = net_dev->base_addr;
2347 int i = 0; 2350 int i = 0;
2348 u32 status = TxRCMP | RxRCMP; 2351 u32 status = TxRCMP | RxRCMP;
@@ -2375,7 +2378,7 @@ static void sis900_reset(struct net_device *net_dev)
2375static void __devexit sis900_remove(struct pci_dev *pci_dev) 2378static void __devexit sis900_remove(struct pci_dev *pci_dev)
2376{ 2379{
2377 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2380 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2378 struct sis900_private * sis_priv = net_dev->priv; 2381 struct sis900_private *sis_priv = netdev_priv(net_dev);
2379 struct mii_phy *phy = NULL; 2382 struct mii_phy *phy = NULL;
2380 2383
2381 while (sis_priv->first_mii) { 2384 while (sis_priv->first_mii) {
@@ -2419,7 +2422,7 @@ static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
2419static int sis900_resume(struct pci_dev *pci_dev) 2422static int sis900_resume(struct pci_dev *pci_dev)
2420{ 2423{
2421 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2424 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2422 struct sis900_private *sis_priv = net_dev->priv; 2425 struct sis900_private *sis_priv = netdev_priv(net_dev);
2423 long ioaddr = net_dev->base_addr; 2426 long ioaddr = net_dev->base_addr;
2424 2427
2425 if(!netif_running(net_dev)) 2428 if(!netif_running(net_dev))
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index a2b092bb3626..607efeaf0bc5 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -168,6 +168,17 @@ static int num_boards; /* total number of adapters configured */
168#define PRINTK(s, args...) 168#define PRINTK(s, args...)
169#endif // DRIVERDEBUG 169#endif // DRIVERDEBUG
170 170
171static const struct net_device_ops skfp_netdev_ops = {
172 .ndo_open = skfp_open,
173 .ndo_stop = skfp_close,
174 .ndo_start_xmit = skfp_send_pkt,
175 .ndo_get_stats = skfp_ctl_get_stats,
176 .ndo_change_mtu = fddi_change_mtu,
177 .ndo_set_multicast_list = skfp_ctl_set_multicast_list,
178 .ndo_set_mac_address = skfp_ctl_set_mac_address,
179 .ndo_do_ioctl = skfp_ioctl,
180};
181
171/* 182/*
172 * ================= 183 * =================
173 * = skfp_init_one = 184 * = skfp_init_one =
@@ -253,13 +264,7 @@ static int skfp_init_one(struct pci_dev *pdev,
253 } 264 }
254 265
255 dev->irq = pdev->irq; 266 dev->irq = pdev->irq;
256 dev->get_stats = &skfp_ctl_get_stats; 267 dev->netdev_ops = &skfp_netdev_ops;
257 dev->open = &skfp_open;
258 dev->stop = &skfp_close;
259 dev->hard_start_xmit = &skfp_send_pkt;
260 dev->set_multicast_list = &skfp_ctl_set_multicast_list;
261 dev->set_mac_address = &skfp_ctl_set_mac_address;
262 dev->do_ioctl = &skfp_ioctl;
263 268
264 SET_NETDEV_DEV(dev, &pdev->dev); 269 SET_NETDEV_DEV(dev, &pdev->dev);
265 270
@@ -612,7 +617,7 @@ static int skfp_close(struct net_device *dev)
612 * Interrupts are disabled, then reenabled at the adapter. 617 * Interrupts are disabled, then reenabled at the adapter.
613 */ 618 */
614 619
615irqreturn_t skfp_interrupt(int irq, void *dev_id) 620static irqreturn_t skfp_interrupt(int irq, void *dev_id)
616{ 621{
617 struct net_device *dev = dev_id; 622 struct net_device *dev = dev_id;
618 struct s_smc *smc; /* private board structure pointer */ 623 struct s_smc *smc; /* private board structure pointer */
@@ -679,7 +684,7 @@ irqreturn_t skfp_interrupt(int irq, void *dev_id)
679 * independent. 684 * independent.
680 * 685 *
681 */ 686 */
682struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev) 687static struct net_device_stats *skfp_ctl_get_stats(struct net_device *dev)
683{ 688{
684 struct s_smc *bp = netdev_priv(dev); 689 struct s_smc *bp = netdev_priv(dev);
685 690
@@ -1224,7 +1229,7 @@ static void send_queued_packets(struct s_smc *smc)
1224 * Verify if the source address is set. Insert it if necessary. 1229 * Verify if the source address is set. Insert it if necessary.
1225 * 1230 *
1226 ************************/ 1231 ************************/
1227void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr) 1232static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
1228{ 1233{
1229 unsigned char SRBit; 1234 unsigned char SRBit;
1230 1235
@@ -1680,7 +1685,6 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1680 skb->protocol = fddi_type_trans(skb, bp->dev); 1685 skb->protocol = fddi_type_trans(skb, bp->dev);
1681 1686
1682 netif_rx(skb); 1687 netif_rx(skb);
1683 bp->dev->last_rx = jiffies;
1684 1688
1685 HWM_RX_CHECK(smc, RX_LOW_WATERMARK); 1689 HWM_RX_CHECK(smc, RX_LOW_WATERMARK);
1686 return; 1690 return;
@@ -1939,7 +1943,6 @@ int mac_drv_rx_init(struct s_smc *smc, int len, int fc,
1939 1943
1940 // deliver frame to system 1944 // deliver frame to system
1941 skb->protocol = fddi_type_trans(skb, smc->os.dev); 1945 skb->protocol = fddi_type_trans(skb, smc->os.dev);
1942 skb->dev->last_rx = jiffies;
1943 netif_rx(skb); 1946 netif_rx(skb);
1944 1947
1945 return (0); 1948 return (0);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 43f4c730be42..c9dbb06f8c94 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -104,6 +104,7 @@ static void yukon_get_stats(struct skge_port *skge, u64 *data);
104static void yukon_init(struct skge_hw *hw, int port); 104static void yukon_init(struct skge_hw *hw, int port);
105static void genesis_mac_init(struct skge_hw *hw, int port); 105static void genesis_mac_init(struct skge_hw *hw, int port);
106static void genesis_link_up(struct skge_port *skge); 106static void genesis_link_up(struct skge_port *skge);
107static void skge_set_multicast(struct net_device *dev);
107 108
108/* Avoid conditionals by using array */ 109/* Avoid conditionals by using array */
109static const int txqaddr[] = { Q_XA1, Q_XA2 }; 110static const int txqaddr[] = { Q_XA1, Q_XA2 };
@@ -149,24 +150,6 @@ static u32 wol_supported(const struct skge_hw *hw)
149 return WAKE_MAGIC | WAKE_PHY; 150 return WAKE_MAGIC | WAKE_PHY;
150} 151}
151 152
152static u32 pci_wake_enabled(struct pci_dev *dev)
153{
154 int pm = pci_find_capability(dev, PCI_CAP_ID_PM);
155 u16 value;
156
157 /* If device doesn't support PM Capabilities, but request is to disable
158 * wake events, it's a nop; otherwise fail */
159 if (!pm)
160 return 0;
161
162 pci_read_config_word(dev, pm + PCI_PM_PMC, &value);
163
164 value &= PCI_PM_CAP_PME_MASK;
165 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */
166
167 return value != 0;
168}
169
170static void skge_wol_init(struct skge_port *skge) 153static void skge_wol_init(struct skge_port *skge)
171{ 154{
172 struct skge_hw *hw = skge->hw; 155 struct skge_hw *hw = skge->hw;
@@ -254,10 +237,14 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
254 struct skge_port *skge = netdev_priv(dev); 237 struct skge_port *skge = netdev_priv(dev);
255 struct skge_hw *hw = skge->hw; 238 struct skge_hw *hw = skge->hw;
256 239
257 if (wol->wolopts & ~wol_supported(hw)) 240 if ((wol->wolopts & ~wol_supported(hw))
241 || !device_can_wakeup(&hw->pdev->dev))
258 return -EOPNOTSUPP; 242 return -EOPNOTSUPP;
259 243
260 skge->wol = wol->wolopts; 244 skge->wol = wol->wolopts;
245
246 device_set_wakeup_enable(&hw->pdev->dev, skge->wol);
247
261 return 0; 248 return 0;
262} 249}
263 250
@@ -2477,7 +2464,7 @@ static void skge_phy_reset(struct skge_port *skge)
2477 } 2464 }
2478 spin_unlock_bh(&hw->phy_lock); 2465 spin_unlock_bh(&hw->phy_lock);
2479 2466
2480 dev->set_multicast_list(dev); 2467 skge_set_multicast(dev);
2481} 2468}
2482 2469
2483/* Basic MII support */ 2470/* Basic MII support */
@@ -3045,6 +3032,18 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
3045 (status & GMR_FS_RX_OK) == 0; 3032 (status & GMR_FS_RX_OK) == 0;
3046} 3033}
3047 3034
3035static void skge_set_multicast(struct net_device *dev)
3036{
3037 struct skge_port *skge = netdev_priv(dev);
3038 struct skge_hw *hw = skge->hw;
3039
3040 if (hw->chip_id == CHIP_ID_GENESIS)
3041 genesis_set_multicast(dev);
3042 else
3043 yukon_set_multicast(dev);
3044
3045}
3046
3048 3047
3049/* Get receive buffer from descriptor. 3048/* Get receive buffer from descriptor.
3050 * Handles copy of small buffers and reallocation failures 3049 * Handles copy of small buffers and reallocation failures
@@ -3200,7 +3199,6 @@ static int skge_poll(struct napi_struct *napi, int to_do)
3200 3199
3201 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); 3200 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
3202 if (likely(skb)) { 3201 if (likely(skb)) {
3203 dev->last_rx = jiffies;
3204 netif_receive_skb(skb); 3202 netif_receive_skb(skb);
3205 3203
3206 ++work_done; 3204 ++work_done;
@@ -3216,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
3216 unsigned long flags; 3214 unsigned long flags;
3217 3215
3218 spin_lock_irqsave(&hw->hw_lock, flags); 3216 spin_lock_irqsave(&hw->hw_lock, flags);
3219 __netif_rx_complete(dev, napi); 3217 __netif_rx_complete(napi);
3220 hw->intr_mask |= napimask[skge->port]; 3218 hw->intr_mask |= napimask[skge->port];
3221 skge_write32(hw, B0_IMSK, hw->intr_mask); 3219 skge_write32(hw, B0_IMSK, hw->intr_mask);
3222 skge_read32(hw, B0_IMSK); 3220 skge_read32(hw, B0_IMSK);
@@ -3379,7 +3377,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
3379 if (status & (IS_XA1_F|IS_R1_F)) { 3377 if (status & (IS_XA1_F|IS_R1_F)) {
3380 struct skge_port *skge = netdev_priv(hw->dev[0]); 3378 struct skge_port *skge = netdev_priv(hw->dev[0]);
3381 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); 3379 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
3382 netif_rx_schedule(hw->dev[0], &skge->napi); 3380 netif_rx_schedule(&skge->napi);
3383 } 3381 }
3384 3382
3385 if (status & IS_PA_TO_TX1) 3383 if (status & IS_PA_TO_TX1)
@@ -3399,7 +3397,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
3399 3397
3400 if (status & (IS_XA2_F|IS_R2_F)) { 3398 if (status & (IS_XA2_F|IS_R2_F)) {
3401 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); 3399 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
3402 netif_rx_schedule(hw->dev[1], &skge->napi); 3400 netif_rx_schedule(&skge->napi);
3403 } 3401 }
3404 3402
3405 if (status & IS_PA_TO_RX2) { 3403 if (status & IS_PA_TO_RX2) {
@@ -3730,7 +3728,7 @@ static int skge_device_event(struct notifier_block *unused,
3730 struct skge_port *skge; 3728 struct skge_port *skge;
3731 struct dentry *d; 3729 struct dentry *d;
3732 3730
3733 if (dev->open != &skge_up || !skge_debug) 3731 if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug)
3734 goto done; 3732 goto done;
3735 3733
3736 skge = netdev_priv(dev); 3734 skge = netdev_priv(dev);
@@ -3804,6 +3802,23 @@ static __exit void skge_debug_cleanup(void)
3804#define skge_debug_cleanup() 3802#define skge_debug_cleanup()
3805#endif 3803#endif
3806 3804
3805static const struct net_device_ops skge_netdev_ops = {
3806 .ndo_open = skge_up,
3807 .ndo_stop = skge_down,
3808 .ndo_start_xmit = skge_xmit_frame,
3809 .ndo_do_ioctl = skge_ioctl,
3810 .ndo_get_stats = skge_get_stats,
3811 .ndo_tx_timeout = skge_tx_timeout,
3812 .ndo_change_mtu = skge_change_mtu,
3813 .ndo_validate_addr = eth_validate_addr,
3814 .ndo_set_multicast_list = skge_set_multicast,
3815 .ndo_set_mac_address = skge_set_mac_address,
3816#ifdef CONFIG_NET_POLL_CONTROLLER
3817 .ndo_poll_controller = skge_netpoll,
3818#endif
3819};
3820
3821
3807/* Initialize network device */ 3822/* Initialize network device */
3808static struct net_device *skge_devinit(struct skge_hw *hw, int port, 3823static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3809 int highmem) 3824 int highmem)
@@ -3817,24 +3832,9 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3817 } 3832 }
3818 3833
3819 SET_NETDEV_DEV(dev, &hw->pdev->dev); 3834 SET_NETDEV_DEV(dev, &hw->pdev->dev);
3820 dev->open = skge_up; 3835 dev->netdev_ops = &skge_netdev_ops;
3821 dev->stop = skge_down; 3836 dev->ethtool_ops = &skge_ethtool_ops;
3822 dev->do_ioctl = skge_ioctl;
3823 dev->hard_start_xmit = skge_xmit_frame;
3824 dev->get_stats = skge_get_stats;
3825 if (hw->chip_id == CHIP_ID_GENESIS)
3826 dev->set_multicast_list = genesis_set_multicast;
3827 else
3828 dev->set_multicast_list = yukon_set_multicast;
3829
3830 dev->set_mac_address = skge_set_mac_address;
3831 dev->change_mtu = skge_change_mtu;
3832 SET_ETHTOOL_OPS(dev, &skge_ethtool_ops);
3833 dev->tx_timeout = skge_tx_timeout;
3834 dev->watchdog_timeo = TX_WATCHDOG; 3837 dev->watchdog_timeo = TX_WATCHDOG;
3835#ifdef CONFIG_NET_POLL_CONTROLLER
3836 dev->poll_controller = skge_netpoll;
3837#endif
3838 dev->irq = hw->pdev->irq; 3838 dev->irq = hw->pdev->irq;
3839 3839
3840 if (highmem) 3840 if (highmem)
@@ -3856,7 +3856,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3856 skge->speed = -1; 3856 skge->speed = -1;
3857 skge->advertising = skge_supported_modes(hw); 3857 skge->advertising = skge_supported_modes(hw);
3858 3858
3859 if (pci_wake_enabled(hw->pdev)) 3859 if (device_may_wakeup(&hw->pdev->dev))
3860 skge->wol = wol_supported(hw) & WAKE_MAGIC; 3860 skge->wol = wol_supported(hw) & WAKE_MAGIC;
3861 3861
3862 hw->dev[port] = dev; 3862 hw->dev[port] = dev;
@@ -3885,11 +3885,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3885static void __devinit skge_show_addr(struct net_device *dev) 3885static void __devinit skge_show_addr(struct net_device *dev)
3886{ 3886{
3887 const struct skge_port *skge = netdev_priv(dev); 3887 const struct skge_port *skge = netdev_priv(dev);
3888 DECLARE_MAC_BUF(mac);
3889 3888
3890 if (netif_msg_probe(skge)) 3889 if (netif_msg_probe(skge))
3891 printk(KERN_INFO PFX "%s: addr %s\n", 3890 printk(KERN_INFO PFX "%s: addr %pM\n",
3892 dev->name, print_mac(mac, dev->dev_addr)); 3891 dev->name, dev->dev_addr);
3893} 3892}
3894 3893
3895static int __devinit skge_probe(struct pci_dev *pdev, 3894static int __devinit skge_probe(struct pci_dev *pdev,
@@ -4082,8 +4081,8 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
4082 } 4081 }
4083 4082
4084 skge_write32(hw, B0_IMSK, 0); 4083 skge_write32(hw, B0_IMSK, 0);
4085 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 4084
4086 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 4085 pci_prepare_to_sleep(pdev);
4087 4086
4088 return 0; 4087 return 0;
4089} 4088}
@@ -4096,7 +4095,7 @@ static int skge_resume(struct pci_dev *pdev)
4096 if (!hw) 4095 if (!hw)
4097 return 0; 4096 return 0;
4098 4097
4099 err = pci_set_power_state(pdev, PCI_D0); 4098 err = pci_back_from_sleep(pdev);
4100 if (err) 4099 if (err)
4101 goto out; 4100 goto out;
4102 4101
@@ -4104,8 +4103,6 @@ static int skge_resume(struct pci_dev *pdev)
4104 if (err) 4103 if (err)
4105 goto out; 4104 goto out;
4106 4105
4107 pci_enable_wake(pdev, PCI_D0, 0);
4108
4109 err = skge_reset(hw); 4106 err = skge_reset(hw);
4110 if (err) 4107 if (err)
4111 goto out; 4108 goto out;
@@ -4146,8 +4143,8 @@ static void skge_shutdown(struct pci_dev *pdev)
4146 wol |= skge->wol; 4143 wol |= skge->wol;
4147 } 4144 }
4148 4145
4149 pci_enable_wake(pdev, PCI_D3hot, wol); 4146 if (pci_enable_wake(pdev, PCI_D3cold, wol))
4150 pci_enable_wake(pdev, PCI_D3cold, wol); 4147 pci_enable_wake(pdev, PCI_D3hot, wol);
4151 4148
4152 pci_disable_device(pdev); 4149 pci_disable_device(pdev);
4153 pci_set_power_state(pdev, PCI_D3hot); 4150 pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 3813d15e2df7..3668e81e474d 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -3979,7 +3979,7 @@ static int sky2_device_event(struct notifier_block *unused,
3979 struct net_device *dev = ptr; 3979 struct net_device *dev = ptr;
3980 struct sky2_port *sky2 = netdev_priv(dev); 3980 struct sky2_port *sky2 = netdev_priv(dev);
3981 3981
3982 if (dev->open != sky2_up || !sky2_debug) 3982 if (dev->netdev_ops->ndo_open != sky2_up || !sky2_debug)
3983 return NOTIFY_DONE; 3983 return NOTIFY_DONE;
3984 3984
3985 switch(event) { 3985 switch(event) {
@@ -4041,6 +4041,41 @@ static __exit void sky2_debug_cleanup(void)
4041#define sky2_debug_cleanup() 4041#define sky2_debug_cleanup()
4042#endif 4042#endif
4043 4043
4044/* Two copies of network device operations to handle special case of
4045 not allowing netpoll on second port */
4046static const struct net_device_ops sky2_netdev_ops[2] = {
4047 {
4048 .ndo_open = sky2_up,
4049 .ndo_stop = sky2_down,
4050 .ndo_start_xmit = sky2_xmit_frame,
4051 .ndo_do_ioctl = sky2_ioctl,
4052 .ndo_validate_addr = eth_validate_addr,
4053 .ndo_set_mac_address = sky2_set_mac_address,
4054 .ndo_set_multicast_list = sky2_set_multicast,
4055 .ndo_change_mtu = sky2_change_mtu,
4056 .ndo_tx_timeout = sky2_tx_timeout,
4057#ifdef SKY2_VLAN_TAG_USED
4058 .ndo_vlan_rx_register = sky2_vlan_rx_register,
4059#endif
4060#ifdef CONFIG_NET_POLL_CONTROLLER
4061 .ndo_poll_controller = sky2_netpoll,
4062#endif
4063 },
4064 {
4065 .ndo_open = sky2_up,
4066 .ndo_stop = sky2_down,
4067 .ndo_start_xmit = sky2_xmit_frame,
4068 .ndo_do_ioctl = sky2_ioctl,
4069 .ndo_validate_addr = eth_validate_addr,
4070 .ndo_set_mac_address = sky2_set_mac_address,
4071 .ndo_set_multicast_list = sky2_set_multicast,
4072 .ndo_change_mtu = sky2_change_mtu,
4073 .ndo_tx_timeout = sky2_tx_timeout,
4074#ifdef SKY2_VLAN_TAG_USED
4075 .ndo_vlan_rx_register = sky2_vlan_rx_register,
4076#endif
4077 },
4078};
4044 4079
4045/* Initialize network device */ 4080/* Initialize network device */
4046static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, 4081static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
@@ -4057,20 +4092,9 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4057 4092
4058 SET_NETDEV_DEV(dev, &hw->pdev->dev); 4093 SET_NETDEV_DEV(dev, &hw->pdev->dev);
4059 dev->irq = hw->pdev->irq; 4094 dev->irq = hw->pdev->irq;
4060 dev->open = sky2_up;
4061 dev->stop = sky2_down;
4062 dev->do_ioctl = sky2_ioctl;
4063 dev->hard_start_xmit = sky2_xmit_frame;
4064 dev->set_multicast_list = sky2_set_multicast;
4065 dev->set_mac_address = sky2_set_mac_address;
4066 dev->change_mtu = sky2_change_mtu;
4067 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops); 4095 SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
4068 dev->tx_timeout = sky2_tx_timeout;
4069 dev->watchdog_timeo = TX_WATCHDOG; 4096 dev->watchdog_timeo = TX_WATCHDOG;
4070#ifdef CONFIG_NET_POLL_CONTROLLER 4097 dev->netdev_ops = &sky2_netdev_ops[port];
4071 if (port == 0)
4072 dev->poll_controller = sky2_netpoll;
4073#endif
4074 4098
4075 sky2 = netdev_priv(dev); 4099 sky2 = netdev_priv(dev);
4076 sky2->netdev = dev; 4100 sky2->netdev = dev;
@@ -4104,7 +4128,6 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4104 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && 4128 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
4105 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) { 4129 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
4106 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 4130 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4107 dev->vlan_rx_register = sky2_vlan_rx_register;
4108 } 4131 }
4109#endif 4132#endif
4110 4133
@@ -4118,11 +4141,10 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4118static void __devinit sky2_show_addr(struct net_device *dev) 4141static void __devinit sky2_show_addr(struct net_device *dev)
4119{ 4142{
4120 const struct sky2_port *sky2 = netdev_priv(dev); 4143 const struct sky2_port *sky2 = netdev_priv(dev);
4121 DECLARE_MAC_BUF(mac);
4122 4144
4123 if (netif_msg_probe(sky2)) 4145 if (netif_msg_probe(sky2))
4124 printk(KERN_INFO PFX "%s: addr %s\n", 4146 printk(KERN_INFO PFX "%s: addr %pM\n",
4125 dev->name, print_mac(mac, dev->dev_addr)); 4147 dev->name, dev->dev_addr);
4126} 4148}
4127 4149
4128/* Handle software interrupt used during MSI test */ 4150/* Handle software interrupt used during MSI test */
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 1d58991d395b..8e1c0baf6958 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -365,7 +365,6 @@ static void sl_bump(struct slip *sl)
365 skb_reset_mac_header(skb); 365 skb_reset_mac_header(skb);
366 skb->protocol = htons(ETH_P_IP); 366 skb->protocol = htons(ETH_P_IP);
367 netif_rx(skb); 367 netif_rx(skb);
368 sl->dev->last_rx = jiffies;
369 sl->rx_packets++; 368 sl->rx_packets++;
370} 369}
371 370
@@ -402,7 +401,7 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
402 * if we did not request it before write operation. 401 * if we did not request it before write operation.
403 * 14 Oct 1994 Dmitry Gorodchanin. 402 * 14 Oct 1994 Dmitry Gorodchanin.
404 */ 403 */
405 sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); 404 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
406 actual = sl->tty->ops->write(sl->tty, sl->xbuff, count); 405 actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
407#ifdef SL_CHECK_TRANSMIT 406#ifdef SL_CHECK_TRANSMIT
408 sl->dev->trans_start = jiffies; 407 sl->dev->trans_start = jiffies;
@@ -432,7 +431,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
432 /* Now serial buffer is almost free & we can start 431 /* Now serial buffer is almost free & we can start
433 * transmission of another packet */ 432 * transmission of another packet */
434 sl->tx_packets++; 433 sl->tx_packets++;
435 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 434 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
436 sl_unlock(sl); 435 sl_unlock(sl);
437 return; 436 return;
438 } 437 }
@@ -465,7 +464,7 @@ static void sl_tx_timeout(struct net_device *dev)
465 (tty_chars_in_buffer(sl->tty) || sl->xleft) ? 464 (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
466 "bad line quality" : "driver error"); 465 "bad line quality" : "driver error");
467 sl->xleft = 0; 466 sl->xleft = 0;
468 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 467 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
469 sl_unlock(sl); 468 sl_unlock(sl);
470#endif 469#endif
471 } 470 }
@@ -515,10 +514,9 @@ sl_close(struct net_device *dev)
515 struct slip *sl = netdev_priv(dev); 514 struct slip *sl = netdev_priv(dev);
516 515
517 spin_lock_bh(&sl->lock); 516 spin_lock_bh(&sl->lock);
518 if (sl->tty) { 517 if (sl->tty)
519 /* TTY discipline is running. */ 518 /* TTY discipline is running. */
520 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 519 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
521 }
522 netif_stop_queue(dev); 520 netif_stop_queue(dev);
523 sl->rcount = 0; 521 sl->rcount = 0;
524 sl->xleft = 0; 522 sl->xleft = 0;
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index d6abb68e6e2f..404b80e5ba11 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -182,6 +182,22 @@ static char *smc_mca_adapter_names[] __initdata = {
182 182
183static int ultra_found = 0; 183static int ultra_found = 0;
184 184
185
186static const struct net_device_ops ultramca_netdev_ops = {
187 .ndo_open = ultramca_open,
188 .ndo_stop = ultramca_close_card,
189
190 .ndo_start_xmit = ei_start_xmit,
191 .ndo_tx_timeout = ei_tx_timeout,
192 .ndo_get_stats = ei_get_stats,
193 .ndo_set_multicast_list = ei_set_multicast_list,
194 .ndo_validate_addr = eth_validate_addr,
195 .ndo_change_mtu = eth_change_mtu,
196#ifdef CONFIG_NET_POLL_CONTROLLER
197 .ndo_poll_controller = ei_poll,
198#endif
199};
200
185static int __init ultramca_probe(struct device *gen_dev) 201static int __init ultramca_probe(struct device *gen_dev)
186{ 202{
187 unsigned short ioaddr; 203 unsigned short ioaddr;
@@ -196,7 +212,6 @@ static int __init ultramca_probe(struct device *gen_dev)
196 int tirq = 0; 212 int tirq = 0;
197 int base_addr = ultra_io[ultra_found]; 213 int base_addr = ultra_io[ultra_found];
198 int irq = ultra_irq[ultra_found]; 214 int irq = ultra_irq[ultra_found];
199 DECLARE_MAC_BUF(mac);
200 215
201 if (base_addr || irq) { 216 if (base_addr || irq) {
202 printk(KERN_INFO "Probing for SMC MCA adapter"); 217 printk(KERN_INFO "Probing for SMC MCA adapter");
@@ -334,8 +349,8 @@ static int __init ultramca_probe(struct device *gen_dev)
334 for (i = 0; i < 6; i++) 349 for (i = 0; i < 6; i++)
335 dev->dev_addr[i] = inb(ioaddr + 8 + i); 350 dev->dev_addr[i] = inb(ioaddr + 8 + i);
336 351
337 printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %s", 352 printk(KERN_INFO "smc_mca[%d]: Parameters: %#3x, %pM",
338 slot + 1, ioaddr, print_mac(mac, dev->dev_addr)); 353 slot + 1, ioaddr, dev->dev_addr);
339 354
340 /* Switch from the station address to the alternate register set 355 /* Switch from the station address to the alternate register set
341 * and read the useful registers there. 356 * and read the useful registers there.
@@ -385,11 +400,7 @@ static int __init ultramca_probe(struct device *gen_dev)
385 400
386 ei_status.priv = slot; 401 ei_status.priv = slot;
387 402
388 dev->open = &ultramca_open; 403 dev->netdev_ops = &ultramca_netdev_ops;
389 dev->stop = &ultramca_close_card;
390#ifdef CONFIG_NET_POLL_CONTROLLER
391 dev->poll_controller = ei_poll;
392#endif
393 404
394 NS8390_init(dev, 0); 405 NS8390_init(dev, 0);
395 406
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 00d6cf1af484..b3866089a206 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -187,6 +187,21 @@ out:
187} 187}
188#endif 188#endif
189 189
190static const struct net_device_ops ultra_netdev_ops = {
191 .ndo_open = ultra_open,
192 .ndo_stop = ultra_close_card,
193
194 .ndo_start_xmit = ei_start_xmit,
195 .ndo_tx_timeout = ei_tx_timeout,
196 .ndo_get_stats = ei_get_stats,
197 .ndo_set_multicast_list = ei_set_multicast_list,
198 .ndo_validate_addr = eth_validate_addr,
199 .ndo_change_mtu = eth_change_mtu,
200#ifdef CONFIG_NET_POLL_CONTROLLER
201 .ndo_poll_controller = ei_poll,
202#endif
203};
204
190static int __init ultra_probe1(struct net_device *dev, int ioaddr) 205static int __init ultra_probe1(struct net_device *dev, int ioaddr)
191{ 206{
192 int i, retval; 207 int i, retval;
@@ -198,7 +213,6 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
198 unsigned char num_pages, irqreg, addr, piomode; 213 unsigned char num_pages, irqreg, addr, piomode;
199 unsigned char idreg = inb(ioaddr + 7); 214 unsigned char idreg = inb(ioaddr + 7);
200 unsigned char reg4 = inb(ioaddr + 4) & 0x7f; 215 unsigned char reg4 = inb(ioaddr + 4) & 0x7f;
201 DECLARE_MAC_BUF(mac);
202 216
203 if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME)) 217 if (!request_region(ioaddr, ULTRA_IO_EXTENT, DRV_NAME))
204 return -EBUSY; 218 return -EBUSY;
@@ -228,8 +242,8 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
228 for (i = 0; i < 6; i++) 242 for (i = 0; i < 6; i++)
229 dev->dev_addr[i] = inb(ioaddr + 8 + i); 243 dev->dev_addr[i] = inb(ioaddr + 8 + i);
230 244
231 printk("%s: %s at %#3x, %s", dev->name, model_name, 245 printk("%s: %s at %#3x, %pM", dev->name, model_name,
232 ioaddr, print_mac(mac, dev->dev_addr)); 246 ioaddr, dev->dev_addr);
233 247
234 /* Switch from the station address to the alternate register set and 248 /* Switch from the station address to the alternate register set and
235 read the useful registers there. */ 249 read the useful registers there. */
@@ -301,11 +315,8 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
301 ei_status.get_8390_hdr = &ultra_get_8390_hdr; 315 ei_status.get_8390_hdr = &ultra_get_8390_hdr;
302 } 316 }
303 ei_status.reset_8390 = &ultra_reset_8390; 317 ei_status.reset_8390 = &ultra_reset_8390;
304 dev->open = &ultra_open; 318
305 dev->stop = &ultra_close_card; 319 dev->netdev_ops = &ultra_netdev_ops;
306#ifdef CONFIG_NET_POLL_CONTROLLER
307 dev->poll_controller = ei_poll;
308#endif
309 NS8390_init(dev, 0); 320 NS8390_init(dev, 0);
310 321
311 retval = register_netdev(dev); 322 retval = register_netdev(dev);
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
index a5a91ace28cc..cb6c097a2e0a 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/smc-ultra32.c
@@ -163,7 +163,6 @@ static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
163 unsigned char idreg; 163 unsigned char idreg;
164 unsigned char reg4; 164 unsigned char reg4;
165 const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"}; 165 const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"};
166 DECLARE_MAC_BUF(mac);
167 166
168 if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME)) 167 if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME))
169 return -EBUSY; 168 return -EBUSY;
@@ -207,8 +206,8 @@ static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
207 for (i = 0; i < 6; i++) 206 for (i = 0; i < 6; i++)
208 dev->dev_addr[i] = inb(ioaddr + 8 + i); 207 dev->dev_addr[i] = inb(ioaddr + 8 + i);
209 208
210 printk("%s: %s at 0x%X, %s", 209 printk("%s: %s at 0x%X, %pM",
211 dev->name, model_name, ioaddr, print_mac(mac, dev->dev_addr)); 210 dev->name, model_name, ioaddr, dev->dev_addr);
212 211
213 /* Switch from the station address to the alternate register set and 212 /* Switch from the station address to the alternate register set and
214 read the useful registers there. */ 213 read the useful registers there. */
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 9a16a79b67d0..bf3aa2a1effe 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -439,7 +439,6 @@ static inline void smc911x_rcv(struct net_device *dev)
439 439
440 DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name); 440 DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name);
441 PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64); 441 PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
442 dev->last_rx = jiffies;
443 skb->protocol = eth_type_trans(skb, dev); 442 skb->protocol = eth_type_trans(skb, dev);
444 netif_rx(skb); 443 netif_rx(skb);
445 dev->stats.rx_packets++; 444 dev->stats.rx_packets++;
@@ -1231,7 +1230,6 @@ smc911x_rx_dma_irq(int dma, void *data)
1231 BUG_ON(skb == NULL); 1230 BUG_ON(skb == NULL);
1232 lp->current_rx_skb = NULL; 1231 lp->current_rx_skb = NULL;
1233 PRINT_PKT(skb->data, skb->len); 1232 PRINT_PKT(skb->data, skb->len);
1234 dev->last_rx = jiffies;
1235 skb->protocol = eth_type_trans(skb, dev); 1233 skb->protocol = eth_type_trans(skb, dev);
1236 dev->stats.rx_packets++; 1234 dev->stats.rx_packets++;
1237 dev->stats.rx_bytes += skb->len; 1235 dev->stats.rx_bytes += skb->len;
@@ -2050,9 +2048,6 @@ err_out:
2050 */ 2048 */
2051static int __devinit smc911x_drv_probe(struct platform_device *pdev) 2049static int __devinit smc911x_drv_probe(struct platform_device *pdev)
2052{ 2050{
2053#ifdef SMC_DYNAMIC_BUS_CONFIG
2054 struct smc911x_platdata *pd = pdev->dev.platform_data;
2055#endif
2056 struct net_device *ndev; 2051 struct net_device *ndev;
2057 struct resource *res; 2052 struct resource *res;
2058 struct smc911x_local *lp; 2053 struct smc911x_local *lp;
@@ -2087,11 +2082,14 @@ static int __devinit smc911x_drv_probe(struct platform_device *pdev)
2087 lp = netdev_priv(ndev); 2082 lp = netdev_priv(ndev);
2088 lp->netdev = ndev; 2083 lp->netdev = ndev;
2089#ifdef SMC_DYNAMIC_BUS_CONFIG 2084#ifdef SMC_DYNAMIC_BUS_CONFIG
2090 if (!pd) { 2085 {
2091 ret = -EINVAL; 2086 struct smc911x_platdata *pd = pdev->dev.platform_data;
2092 goto release_both; 2087 if (!pd) {
2088 ret = -EINVAL;
2089 goto release_both;
2090 }
2091 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2093 } 2092 }
2094 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2095#endif 2093#endif
2096 2094
2097 addr = ioremap(res->start, SMC911X_IO_EXTENT); 2095 addr = ioremap(res->start, SMC911X_IO_EXTENT);
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index de67744c4a2a..18d653bbd4e0 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -764,7 +764,7 @@ out:
764 . interrupt, so an auto-detect routine can detect it, and find the IRQ, 764 . interrupt, so an auto-detect routine can detect it, and find the IRQ,
765 ------------------------------------------------------------------------ 765 ------------------------------------------------------------------------
766*/ 766*/
767int __init smc_findirq( int ioaddr ) 767static int __init smc_findirq(int ioaddr)
768{ 768{
769#ifndef NO_AUTOPROBE 769#ifndef NO_AUTOPROBE
770 int timeout = 20; 770 int timeout = 20;
@@ -876,8 +876,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
876 word memory_info_register; 876 word memory_info_register;
877 word memory_cfg_register; 877 word memory_cfg_register;
878 878
879 DECLARE_MAC_BUF(mac);
880
881 /* Grab the region so that no one else tries to probe our ioports. */ 879 /* Grab the region so that no one else tries to probe our ioports. */
882 if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME)) 880 if (!request_region(ioaddr, SMC_IO_EXTENT, DRV_NAME))
883 return -EBUSY; 881 return -EBUSY;
@@ -1033,10 +1031,10 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
1033 /* 1031 /*
1034 . Print the Ethernet address 1032 . Print the Ethernet address
1035 */ 1033 */
1036 printk("ADDR: %s\n", print_mac(mac, dev->dev_addr)); 1034 printk("ADDR: %pM\n", dev->dev_addr);
1037 1035
1038 /* set the private data to zero by default */ 1036 /* set the private data to zero by default */
1039 memset(dev->priv, 0, sizeof(struct smc_local)); 1037 memset(netdev_priv(dev), 0, sizeof(struct smc_local));
1040 1038
1041 /* Grab the IRQ */ 1039 /* Grab the IRQ */
1042 retval = request_irq(dev->irq, &smc_interrupt, 0, DRV_NAME, dev); 1040 retval = request_irq(dev->irq, &smc_interrupt, 0, DRV_NAME, dev);
@@ -1110,7 +1108,7 @@ static int smc_open(struct net_device *dev)
1110 int i; /* used to set hw ethernet address */ 1108 int i; /* used to set hw ethernet address */
1111 1109
1112 /* clear out all the junk that was put here before... */ 1110 /* clear out all the junk that was put here before... */
1113 memset(dev->priv, 0, sizeof(struct smc_local)); 1111 memset(netdev_priv(dev), 0, sizeof(struct smc_local));
1114 1112
1115 /* reset the hardware */ 1113 /* reset the hardware */
1116 1114
@@ -1166,7 +1164,7 @@ static void smc_timeout(struct net_device *dev)
1166 smc_enable( dev->base_addr ); 1164 smc_enable( dev->base_addr );
1167 dev->trans_start = jiffies; 1165 dev->trans_start = jiffies;
1168 /* clear anything saved */ 1166 /* clear anything saved */
1169 ((struct smc_local *)dev->priv)->saved_skb = NULL; 1167 ((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
1170 netif_wake_queue(dev); 1168 netif_wake_queue(dev);
1171} 1169}
1172 1170
@@ -1272,7 +1270,6 @@ static void smc_rcv(struct net_device *dev)
1272 1270
1273 skb->protocol = eth_type_trans(skb, dev ); 1271 skb->protocol = eth_type_trans(skb, dev );
1274 netif_rx(skb); 1272 netif_rx(skb);
1275 dev->last_rx = jiffies;
1276 dev->stats.rx_packets++; 1273 dev->stats.rx_packets++;
1277 dev->stats.rx_bytes += packet_length; 1274 dev->stats.rx_bytes += packet_length;
1278 } else { 1275 } else {
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 35c56abf4113..b215a8d85e62 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -90,33 +90,6 @@ static const char version[] =
90 90
91#include "smc91x.h" 91#include "smc91x.h"
92 92
93#ifdef CONFIG_ISA
94/*
95 * the LAN91C111 can be at any of the following port addresses. To change,
96 * for a slightly different card, you can add it to the array. Keep in
97 * mind that the array must end in zero.
98 */
99static unsigned int smc_portlist[] __initdata = {
100 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0,
101 0x300, 0x320, 0x340, 0x360, 0x380, 0x3A0, 0x3C0, 0x3E0, 0
102};
103
104#ifndef SMC_IOADDR
105# define SMC_IOADDR -1
106#endif
107static unsigned long io = SMC_IOADDR;
108module_param(io, ulong, 0400);
109MODULE_PARM_DESC(io, "I/O base address");
110
111#ifndef SMC_IRQ
112# define SMC_IRQ -1
113#endif
114static int irq = SMC_IRQ;
115module_param(irq, int, 0400);
116MODULE_PARM_DESC(irq, "IRQ number");
117
118#endif /* CONFIG_ISA */
119
120#ifndef SMC_NOWAIT 93#ifndef SMC_NOWAIT
121# define SMC_NOWAIT 0 94# define SMC_NOWAIT 0
122#endif 95#endif
@@ -518,7 +491,6 @@ static inline void smc_rcv(struct net_device *dev)
518 491
519 PRINT_PKT(data, packet_len - 4); 492 PRINT_PKT(data, packet_len - 4);
520 493
521 dev->last_rx = jiffies;
522 skb->protocol = eth_type_trans(skb, dev); 494 skb->protocol = eth_type_trans(skb, dev);
523 netif_rx(skb); 495 netif_rx(skb);
524 dev->stats.rx_packets++; 496 dev->stats.rx_packets++;
@@ -1778,7 +1750,6 @@ static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr,
1778 int retval; 1750 int retval;
1779 unsigned int val, revision_register; 1751 unsigned int val, revision_register;
1780 const char *version_string; 1752 const char *version_string;
1781 DECLARE_MAC_BUF(mac);
1782 1753
1783 DBG(2, "%s: %s\n", CARDNAME, __func__); 1754 DBG(2, "%s: %s\n", CARDNAME, __func__);
1784 1755
@@ -1972,8 +1943,8 @@ static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr,
1972 "set using ifconfig\n", dev->name); 1943 "set using ifconfig\n", dev->name);
1973 } else { 1944 } else {
1974 /* Print the Ethernet address */ 1945 /* Print the Ethernet address */
1975 printk("%s: Ethernet addr: %s\n", 1946 printk("%s: Ethernet addr: %pM\n",
1976 dev->name, print_mac(mac, dev->dev_addr)); 1947 dev->name, dev->dev_addr);
1977 } 1948 }
1978 1949
1979 if (lp->phy_type == 0) { 1950 if (lp->phy_type == 0) {
@@ -2316,15 +2287,6 @@ static struct platform_driver smc_driver = {
2316 2287
2317static int __init smc_init(void) 2288static int __init smc_init(void)
2318{ 2289{
2319#ifdef MODULE
2320#ifdef CONFIG_ISA
2321 if (io == -1)
2322 printk(KERN_WARNING
2323 "%s: You shouldn't use auto-probing with insmod!\n",
2324 CARDNAME);
2325#endif
2326#endif
2327
2328 return platform_driver_register(&smc_driver); 2290 return platform_driver_register(&smc_driver);
2329} 2291}
2330 2292
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index a07cc9351c6b..3e7c6a3cbc65 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -87,49 +87,28 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
87#define RPC_LSA_DEFAULT RPC_LED_100_10 87#define RPC_LSA_DEFAULT RPC_LED_100_10
88#define RPC_LSB_DEFAULT RPC_LED_TX_RX 88#define RPC_LSB_DEFAULT RPC_LED_TX_RX
89 89
90# if defined (CONFIG_BFIN561_EZKIT)
91#define SMC_CAN_USE_8BIT 0 90#define SMC_CAN_USE_8BIT 0
92#define SMC_CAN_USE_16BIT 1 91#define SMC_CAN_USE_16BIT 1
92# if defined(CONFIG_BF561)
93#define SMC_CAN_USE_32BIT 1 93#define SMC_CAN_USE_32BIT 1
94#define SMC_IO_SHIFT 0
95#define SMC_NOWAIT 1
96#define SMC_USE_BFIN_DMA 0
97
98
99#define SMC_inw(a, r) readw((a) + (r))
100#define SMC_outw(v, a, r) writew(v, (a) + (r))
101#define SMC_inl(a, r) readl((a) + (r))
102#define SMC_outl(v, a, r) writel(v, (a) + (r))
103#define SMC_outsl(a, r, p, l) outsl((unsigned long *)((a) + (r)), p, l)
104#define SMC_insl(a, r, p, l) insl ((unsigned long *)((a) + (r)), p, l)
105# else 94# else
106#define SMC_CAN_USE_8BIT 0
107#define SMC_CAN_USE_16BIT 1
108#define SMC_CAN_USE_32BIT 0 95#define SMC_CAN_USE_32BIT 0
96# endif
109#define SMC_IO_SHIFT 0 97#define SMC_IO_SHIFT 0
110#define SMC_NOWAIT 1 98#define SMC_NOWAIT 1
111#define SMC_USE_BFIN_DMA 0 99#define SMC_USE_BFIN_DMA 0
112 100
113 101#define SMC_inw(a, r) readw((a) + (r))
114#define SMC_inw(a, r) readw((a) + (r)) 102#define SMC_outw(v, a, r) writew(v, (a) + (r))
115#define SMC_outw(v, a, r) writew(v, (a) + (r)) 103#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
116#define SMC_outsw(a, r, p, l) outsw((unsigned long *)((a) + (r)), p, l) 104#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
117#define SMC_insw(a, r, p, l) insw ((unsigned long *)((a) + (r)), p, l) 105# if SMC_CAN_USE_32BIT
106#define SMC_inl(a, r) readl((a) + (r))
107#define SMC_outl(v, a, r) writel(v, (a) + (r))
108#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
109#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
118# endif 110# endif
119/* check if the mac in reg is valid */ 111
120#define SMC_GET_MAC_ADDR(lp, addr) \
121 do { \
122 unsigned int __v; \
123 __v = SMC_inw(ioaddr, ADDR0_REG(lp)); \
124 addr[0] = __v; addr[1] = __v >> 8; \
125 __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \
126 addr[2] = __v; addr[3] = __v >> 8; \
127 __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \
128 addr[4] = __v; addr[5] = __v >> 8; \
129 if (*(u32 *)(&addr[0]) == 0xFFFFFFFF) { \
130 random_ether_addr(addr); \
131 } \
132 } while (0)
133#elif defined(CONFIG_REDWOOD_5) || defined(CONFIG_REDWOOD_6) 112#elif defined(CONFIG_REDWOOD_5) || defined(CONFIG_REDWOOD_6)
134 113
135/* We can only do 16-bit reads and writes in the static memory space. */ 114/* We can only do 16-bit reads and writes in the static memory space. */
@@ -286,19 +265,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
286 265
287#define SMC_IRQ_FLAGS (0) 266#define SMC_IRQ_FLAGS (0)
288 267
289#elif defined(CONFIG_ISA)
290
291#define SMC_CAN_USE_8BIT 1
292#define SMC_CAN_USE_16BIT 1
293#define SMC_CAN_USE_32BIT 0
294
295#define SMC_inb(a, r) inb((a) + (r))
296#define SMC_inw(a, r) inw((a) + (r))
297#define SMC_outb(v, a, r) outb(v, (a) + (r))
298#define SMC_outw(v, a, r) outw(v, (a) + (r))
299#define SMC_insw(a, r, p, l) insw((a) + (r), p, l)
300#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l)
301
302#elif defined(CONFIG_M32R) 268#elif defined(CONFIG_M32R)
303 269
304#define SMC_CAN_USE_8BIT 0 270#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
new file mode 100644
index 000000000000..5e989d884ddd
--- /dev/null
+++ b/drivers/net/smsc911x.c
@@ -0,0 +1,2071 @@
1/***************************************************************************
2 *
3 * Copyright (C) 2004-2008 SMSC
4 * Copyright (C) 2005-2008 ARM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 ***************************************************************************
21 * Rewritten, heavily based on smsc911x simple driver by SMSC.
22 * Partly uses io macros from smc91x.c by Nicolas Pitre
23 *
24 * Supported devices:
25 * LAN9115, LAN9116, LAN9117, LAN9118
26 * LAN9215, LAN9216, LAN9217, LAN9218
27 * LAN9210, LAN9211
28 * LAN9220, LAN9221
29 *
30 */
31
32#include <linux/crc32.h>
33#include <linux/delay.h>
34#include <linux/errno.h>
35#include <linux/etherdevice.h>
36#include <linux/ethtool.h>
37#include <linux/init.h>
38#include <linux/ioport.h>
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/netdevice.h>
42#include <linux/platform_device.h>
43#include <linux/sched.h>
44#include <linux/slab.h>
45#include <linux/timer.h>
46#include <linux/version.h>
47#include <linux/bug.h>
48#include <linux/bitops.h>
49#include <linux/irq.h>
50#include <linux/io.h>
51#include <linux/phy.h>
52#include <linux/smsc911x.h>
53#include "smsc911x.h"
54
55#define SMSC_CHIPNAME "smsc911x"
56#define SMSC_MDIONAME "smsc911x-mdio"
57#define SMSC_DRV_VERSION "2008-10-21"
58
59MODULE_LICENSE("GPL");
60MODULE_VERSION(SMSC_DRV_VERSION);
61
62#if USE_DEBUG > 0
63static int debug = 16;
64#else
65static int debug = 3;
66#endif
67
68module_param(debug, int, 0);
69MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71struct smsc911x_data {
72 void __iomem *ioaddr;
73
74 unsigned int idrev;
75
76 /* used to decide which workarounds apply */
77 unsigned int generation;
78
79 /* device configuration (copied from platform_data during probe) */
80 struct smsc911x_platform_config config;
81
82 /* This needs to be acquired before calling any of below:
83 * smsc911x_mac_read(), smsc911x_mac_write()
84 */
85 spinlock_t mac_lock;
86
87 /* spinlock to ensure 16-bit accesses are serialised.
88 * unused with a 32-bit bus */
89 spinlock_t dev_lock;
90
91 struct phy_device *phy_dev;
92 struct mii_bus *mii_bus;
93 int phy_irq[PHY_MAX_ADDR];
94 unsigned int using_extphy;
95 int last_duplex;
96 int last_carrier;
97
98 u32 msg_enable;
99 unsigned int gpio_setting;
100 unsigned int gpio_orig_setting;
101 struct net_device *dev;
102 struct napi_struct napi;
103
104 unsigned int software_irq_signal;
105
106#ifdef USE_PHY_WORK_AROUND
107#define MIN_PACKET_SIZE (64)
108 char loopback_tx_pkt[MIN_PACKET_SIZE];
109 char loopback_rx_pkt[MIN_PACKET_SIZE];
110 unsigned int resetcount;
111#endif
112
113 /* Members for Multicast filter workaround */
114 unsigned int multicast_update_pending;
115 unsigned int set_bits_mask;
116 unsigned int clear_bits_mask;
117 unsigned int hashhi;
118 unsigned int hashlo;
119};
120
121/* The 16-bit access functions are significantly slower, due to the locking
122 * necessary. If your bus hardware can be configured to do this for you
123 * (in response to a single 32-bit operation from software), you should use
124 * the 32-bit access functions instead. */
125
126static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
127{
128 if (pdata->config.flags & SMSC911X_USE_32BIT)
129 return readl(pdata->ioaddr + reg);
130
131 if (pdata->config.flags & SMSC911X_USE_16BIT) {
132 u32 data;
133 unsigned long flags;
134
135 /* these two 16-bit reads must be performed consecutively, so
136 * must not be interrupted by our own ISR (which would start
137 * another read operation) */
138 spin_lock_irqsave(&pdata->dev_lock, flags);
139 data = ((readw(pdata->ioaddr + reg) & 0xFFFF) |
140 ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16));
141 spin_unlock_irqrestore(&pdata->dev_lock, flags);
142
143 return data;
144 }
145
146 BUG();
147}
148
149static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
150 u32 val)
151{
152 if (pdata->config.flags & SMSC911X_USE_32BIT) {
153 writel(val, pdata->ioaddr + reg);
154 return;
155 }
156
157 if (pdata->config.flags & SMSC911X_USE_16BIT) {
158 unsigned long flags;
159
160 /* these two 16-bit writes must be performed consecutively, so
161 * must not be interrupted by our own ISR (which would start
162 * another read operation) */
163 spin_lock_irqsave(&pdata->dev_lock, flags);
164 writew(val & 0xFFFF, pdata->ioaddr + reg);
165 writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2);
166 spin_unlock_irqrestore(&pdata->dev_lock, flags);
167 return;
168 }
169
170 BUG();
171}
172
173/* Writes a packet to the TX_DATA_FIFO */
174static inline void
175smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf,
176 unsigned int wordcount)
177{
178 if (pdata->config.flags & SMSC911X_USE_32BIT) {
179 writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount);
180 return;
181 }
182
183 if (pdata->config.flags & SMSC911X_USE_16BIT) {
184 while (wordcount--)
185 smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++);
186 return;
187 }
188
189 BUG();
190}
191
192/* Reads a packet out of the RX_DATA_FIFO */
193static inline void
194smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
195 unsigned int wordcount)
196{
197 if (pdata->config.flags & SMSC911X_USE_32BIT) {
198 readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount);
199 return;
200 }
201
202 if (pdata->config.flags & SMSC911X_USE_16BIT) {
203 while (wordcount--)
204 *buf++ = smsc911x_reg_read(pdata, RX_DATA_FIFO);
205 return;
206 }
207
208 BUG();
209}
210
211/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
212 * and smsc911x_mac_write, so assumes mac_lock is held */
213static int smsc911x_mac_complete(struct smsc911x_data *pdata)
214{
215 int i;
216 u32 val;
217
218 SMSC_ASSERT_MAC_LOCK(pdata);
219
220 for (i = 0; i < 40; i++) {
221 val = smsc911x_reg_read(pdata, MAC_CSR_CMD);
222 if (!(val & MAC_CSR_CMD_CSR_BUSY_))
223 return 0;
224 }
225 SMSC_WARNING(HW, "Timed out waiting for MAC not BUSY. "
226 "MAC_CSR_CMD: 0x%08X", val);
227 return -EIO;
228}
229
230/* Fetches a MAC register value. Assumes mac_lock is acquired */
231static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset)
232{
233 unsigned int temp;
234
235 SMSC_ASSERT_MAC_LOCK(pdata);
236
237 temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
238 if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
239 SMSC_WARNING(HW, "MAC busy at entry");
240 return 0xFFFFFFFF;
241 }
242
243 /* Send the MAC cmd */
244 smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) |
245 MAC_CSR_CMD_CSR_BUSY_ | MAC_CSR_CMD_R_NOT_W_));
246
247 /* Workaround for hardware read-after-write restriction */
248 temp = smsc911x_reg_read(pdata, BYTE_TEST);
249
250 /* Wait for the read to complete */
251 if (likely(smsc911x_mac_complete(pdata) == 0))
252 return smsc911x_reg_read(pdata, MAC_CSR_DATA);
253
254 SMSC_WARNING(HW, "MAC busy after read");
255 return 0xFFFFFFFF;
256}
257
258/* Set a mac register, mac_lock must be acquired before calling */
259static void smsc911x_mac_write(struct smsc911x_data *pdata,
260 unsigned int offset, u32 val)
261{
262 unsigned int temp;
263
264 SMSC_ASSERT_MAC_LOCK(pdata);
265
266 temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
267 if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
268 SMSC_WARNING(HW,
269 "smsc911x_mac_write failed, MAC busy at entry");
270 return;
271 }
272
273 /* Send data to write */
274 smsc911x_reg_write(pdata, MAC_CSR_DATA, val);
275
276 /* Write the actual data */
277 smsc911x_reg_write(pdata, MAC_CSR_CMD, ((offset & 0xFF) |
278 MAC_CSR_CMD_CSR_BUSY_));
279
280 /* Workaround for hardware read-after-write restriction */
281 temp = smsc911x_reg_read(pdata, BYTE_TEST);
282
283 /* Wait for the write to complete */
284 if (likely(smsc911x_mac_complete(pdata) == 0))
285 return;
286
287 SMSC_WARNING(HW,
288 "smsc911x_mac_write failed, MAC busy after write");
289}
290
291/* Get a phy register */
292static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
293{
294 struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv;
295 unsigned long flags;
296 unsigned int addr;
297 int i, reg;
298
299 spin_lock_irqsave(&pdata->mac_lock, flags);
300
301 /* Confirm MII not busy */
302 if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
303 SMSC_WARNING(HW,
304 "MII is busy in smsc911x_mii_read???");
305 reg = -EIO;
306 goto out;
307 }
308
309 /* Set the address, index & direction (read from PHY) */
310 addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6);
311 smsc911x_mac_write(pdata, MII_ACC, addr);
312
313 /* Wait for read to complete w/ timeout */
314 for (i = 0; i < 100; i++)
315 if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
316 reg = smsc911x_mac_read(pdata, MII_DATA);
317 goto out;
318 }
319
320 SMSC_WARNING(HW, "Timed out waiting for MII write to finish");
321 reg = -EIO;
322
323out:
324 spin_unlock_irqrestore(&pdata->mac_lock, flags);
325 return reg;
326}
327
328/* Set a phy register */
329static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
330 u16 val)
331{
332 struct smsc911x_data *pdata = (struct smsc911x_data *)bus->priv;
333 unsigned long flags;
334 unsigned int addr;
335 int i, reg;
336
337 spin_lock_irqsave(&pdata->mac_lock, flags);
338
339 /* Confirm MII not busy */
340 if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
341 SMSC_WARNING(HW,
342 "MII is busy in smsc911x_mii_write???");
343 reg = -EIO;
344 goto out;
345 }
346
347 /* Put the data to write in the MAC */
348 smsc911x_mac_write(pdata, MII_DATA, val);
349
350 /* Set the address, index & direction (write to PHY) */
351 addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) |
352 MII_ACC_MII_WRITE_;
353 smsc911x_mac_write(pdata, MII_ACC, addr);
354
355 /* Wait for write to complete w/ timeout */
356 for (i = 0; i < 100; i++)
357 if (!(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
358 reg = 0;
359 goto out;
360 }
361
362 SMSC_WARNING(HW, "Timed out waiting for MII write to finish");
363 reg = -EIO;
364
365out:
366 spin_unlock_irqrestore(&pdata->mac_lock, flags);
367 return reg;
368}
369
370/* Autodetects and initialises external phy for SMSC9115 and SMSC9117 flavors.
371 * If something goes wrong, returns -ENODEV to revert back to internal phy.
372 * Performed at initialisation only, so interrupts are enabled */
373static int smsc911x_phy_initialise_external(struct smsc911x_data *pdata)
374{
375 unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG);
376
377 /* External phy is requested, supported, and detected */
378 if (hwcfg & HW_CFG_EXT_PHY_DET_) {
379
380 /* Switch to external phy. Assuming tx and rx are stopped
381 * because smsc911x_phy_initialise is called before
382 * smsc911x_rx_initialise and tx_initialise. */
383
384 /* Disable phy clocks to the MAC */
385 hwcfg &= (~HW_CFG_PHY_CLK_SEL_);
386 hwcfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
387 smsc911x_reg_write(pdata, HW_CFG, hwcfg);
388 udelay(10); /* Enough time for clocks to stop */
389
390 /* Switch to external phy */
391 hwcfg |= HW_CFG_EXT_PHY_EN_;
392 smsc911x_reg_write(pdata, HW_CFG, hwcfg);
393
394 /* Enable phy clocks to the MAC */
395 hwcfg &= (~HW_CFG_PHY_CLK_SEL_);
396 hwcfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
397 smsc911x_reg_write(pdata, HW_CFG, hwcfg);
398 udelay(10); /* Enough time for clocks to restart */
399
400 hwcfg |= HW_CFG_SMI_SEL_;
401 smsc911x_reg_write(pdata, HW_CFG, hwcfg);
402
403 SMSC_TRACE(HW, "Successfully switched to external PHY");
404 pdata->using_extphy = 1;
405 } else {
406 SMSC_WARNING(HW, "No external PHY detected, "
407 "Using internal PHY instead.");
408 /* Use internal phy */
409 return -ENODEV;
410 }
411 return 0;
412}
413
414/* Fetches a tx status out of the status fifo */
415static unsigned int smsc911x_tx_get_txstatus(struct smsc911x_data *pdata)
416{
417 unsigned int result =
418 smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TSUSED_;
419
420 if (result != 0)
421 result = smsc911x_reg_read(pdata, TX_STATUS_FIFO);
422
423 return result;
424}
425
426/* Fetches the next rx status */
427static unsigned int smsc911x_rx_get_rxstatus(struct smsc911x_data *pdata)
428{
429 unsigned int result =
430 smsc911x_reg_read(pdata, RX_FIFO_INF) & RX_FIFO_INF_RXSUSED_;
431
432 if (result != 0)
433 result = smsc911x_reg_read(pdata, RX_STATUS_FIFO);
434
435 return result;
436}
437
438#ifdef USE_PHY_WORK_AROUND
439static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
440{
441 unsigned int tries;
442 u32 wrsz;
443 u32 rdsz;
444 ulong bufp;
445
446 for (tries = 0; tries < 10; tries++) {
447 unsigned int txcmd_a;
448 unsigned int txcmd_b;
449 unsigned int status;
450 unsigned int pktlength;
451 unsigned int i;
452
453 /* Zero-out rx packet memory */
454 memset(pdata->loopback_rx_pkt, 0, MIN_PACKET_SIZE);
455
456 /* Write tx packet to 118 */
457 txcmd_a = (u32)((ulong)pdata->loopback_tx_pkt & 0x03) << 16;
458 txcmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_;
459 txcmd_a |= MIN_PACKET_SIZE;
460
461 txcmd_b = MIN_PACKET_SIZE << 16 | MIN_PACKET_SIZE;
462
463 smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_a);
464 smsc911x_reg_write(pdata, TX_DATA_FIFO, txcmd_b);
465
466 bufp = (ulong)pdata->loopback_tx_pkt & (~0x3);
467 wrsz = MIN_PACKET_SIZE + 3;
468 wrsz += (u32)((ulong)pdata->loopback_tx_pkt & 0x3);
469 wrsz >>= 2;
470
471 smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
472
473 /* Wait till transmit is done */
474 i = 60;
475 do {
476 udelay(5);
477 status = smsc911x_tx_get_txstatus(pdata);
478 } while ((i--) && (!status));
479
480 if (!status) {
481 SMSC_WARNING(HW, "Failed to transmit "
482 "during loopback test");
483 continue;
484 }
485 if (status & TX_STS_ES_) {
486 SMSC_WARNING(HW, "Transmit encountered "
487 "errors during loopback test");
488 continue;
489 }
490
491 /* Wait till receive is done */
492 i = 60;
493 do {
494 udelay(5);
495 status = smsc911x_rx_get_rxstatus(pdata);
496 } while ((i--) && (!status));
497
498 if (!status) {
499 SMSC_WARNING(HW,
500 "Failed to receive during loopback test");
501 continue;
502 }
503 if (status & RX_STS_ES_) {
504 SMSC_WARNING(HW, "Receive encountered "
505 "errors during loopback test");
506 continue;
507 }
508
509 pktlength = ((status & 0x3FFF0000UL) >> 16);
510 bufp = (ulong)pdata->loopback_rx_pkt;
511 rdsz = pktlength + 3;
512 rdsz += (u32)((ulong)pdata->loopback_rx_pkt & 0x3);
513 rdsz >>= 2;
514
515 smsc911x_rx_readfifo(pdata, (unsigned int *)bufp, rdsz);
516
517 if (pktlength != (MIN_PACKET_SIZE + 4)) {
518 SMSC_WARNING(HW, "Unexpected packet size "
519 "during loop back test, size=%d, will retry",
520 pktlength);
521 } else {
522 unsigned int j;
523 int mismatch = 0;
524 for (j = 0; j < MIN_PACKET_SIZE; j++) {
525 if (pdata->loopback_tx_pkt[j]
526 != pdata->loopback_rx_pkt[j]) {
527 mismatch = 1;
528 break;
529 }
530 }
531 if (!mismatch) {
532 SMSC_TRACE(HW, "Successfully verified "
533 "loopback packet");
534 return 0;
535 } else {
536 SMSC_WARNING(HW, "Data mismatch "
537 "during loop back test, will retry");
538 }
539 }
540 }
541
542 return -EIO;
543}
544
545static int smsc911x_phy_reset(struct smsc911x_data *pdata)
546{
547 struct phy_device *phy_dev = pdata->phy_dev;
548 unsigned int temp;
549 unsigned int i = 100000;
550
551 BUG_ON(!phy_dev);
552 BUG_ON(!phy_dev->bus);
553
554 SMSC_TRACE(HW, "Performing PHY BCR Reset");
555 smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
556 do {
557 msleep(1);
558 temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr,
559 MII_BMCR);
560 } while ((i--) && (temp & BMCR_RESET));
561
562 if (temp & BMCR_RESET) {
563 SMSC_WARNING(HW, "PHY reset failed to complete.");
564 return -EIO;
565 }
566 /* Extra delay required because the phy may not be completed with
567 * its reset when BMCR_RESET is cleared. Specs say 256 uS is
568 * enough delay but using 1ms here to be safe */
569 msleep(1);
570
571 return 0;
572}
573
574static int smsc911x_phy_loopbacktest(struct net_device *dev)
575{
576 struct smsc911x_data *pdata = netdev_priv(dev);
577 struct phy_device *phy_dev = pdata->phy_dev;
578 int result = -EIO;
579 unsigned int i, val;
580 unsigned long flags;
581
582 /* Initialise tx packet using broadcast destination address */
583 memset(pdata->loopback_tx_pkt, 0xff, ETH_ALEN);
584
585 /* Use incrementing source address */
586 for (i = 6; i < 12; i++)
587 pdata->loopback_tx_pkt[i] = (char)i;
588
589 /* Set length type field */
590 pdata->loopback_tx_pkt[12] = 0x00;
591 pdata->loopback_tx_pkt[13] = 0x00;
592
593 for (i = 14; i < MIN_PACKET_SIZE; i++)
594 pdata->loopback_tx_pkt[i] = (char)i;
595
596 val = smsc911x_reg_read(pdata, HW_CFG);
597 val &= HW_CFG_TX_FIF_SZ_;
598 val |= HW_CFG_SF_;
599 smsc911x_reg_write(pdata, HW_CFG, val);
600
601 smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_);
602 smsc911x_reg_write(pdata, RX_CFG,
603 (u32)((ulong)pdata->loopback_rx_pkt & 0x03) << 8);
604
605 for (i = 0; i < 10; i++) {
606 /* Set PHY to 10/FD, no ANEG, and loopback mode */
607 smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR,
608 BMCR_LOOPBACK | BMCR_FULLDPLX);
609
610 /* Enable MAC tx/rx, FD */
611 spin_lock_irqsave(&pdata->mac_lock, flags);
612 smsc911x_mac_write(pdata, MAC_CR, MAC_CR_FDPX_
613 | MAC_CR_TXEN_ | MAC_CR_RXEN_);
614 spin_unlock_irqrestore(&pdata->mac_lock, flags);
615
616 if (smsc911x_phy_check_loopbackpkt(pdata) == 0) {
617 result = 0;
618 break;
619 }
620 pdata->resetcount++;
621
622 /* Disable MAC rx */
623 spin_lock_irqsave(&pdata->mac_lock, flags);
624 smsc911x_mac_write(pdata, MAC_CR, 0);
625 spin_unlock_irqrestore(&pdata->mac_lock, flags);
626
627 smsc911x_phy_reset(pdata);
628 }
629
630 /* Disable MAC */
631 spin_lock_irqsave(&pdata->mac_lock, flags);
632 smsc911x_mac_write(pdata, MAC_CR, 0);
633 spin_unlock_irqrestore(&pdata->mac_lock, flags);
634
635 /* Cancel PHY loopback mode */
636 smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, 0);
637
638 smsc911x_reg_write(pdata, TX_CFG, 0);
639 smsc911x_reg_write(pdata, RX_CFG, 0);
640
641 return result;
642}
643#endif /* USE_PHY_WORK_AROUND */
644
645static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
646{
647 struct phy_device *phy_dev = pdata->phy_dev;
648 u32 afc = smsc911x_reg_read(pdata, AFC_CFG);
649 u32 flow;
650 unsigned long flags;
651
652 if (phy_dev->duplex == DUPLEX_FULL) {
653 u16 lcladv = phy_read(phy_dev, MII_ADVERTISE);
654 u16 rmtadv = phy_read(phy_dev, MII_LPA);
655 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
656
657 if (cap & FLOW_CTRL_RX)
658 flow = 0xFFFF0002;
659 else
660 flow = 0;
661
662 if (cap & FLOW_CTRL_TX)
663 afc |= 0xF;
664 else
665 afc &= ~0xF;
666
667 SMSC_TRACE(HW, "rx pause %s, tx pause %s",
668 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
669 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
670 } else {
671 SMSC_TRACE(HW, "half duplex");
672 flow = 0;
673 afc |= 0xF;
674 }
675
676 spin_lock_irqsave(&pdata->mac_lock, flags);
677 smsc911x_mac_write(pdata, FLOW, flow);
678 spin_unlock_irqrestore(&pdata->mac_lock, flags);
679
680 smsc911x_reg_write(pdata, AFC_CFG, afc);
681}
682
683/* Update link mode if anything has changed. Called periodically when the
684 * PHY is in polling mode, even if nothing has changed. */
685static void smsc911x_phy_adjust_link(struct net_device *dev)
686{
687 struct smsc911x_data *pdata = netdev_priv(dev);
688 struct phy_device *phy_dev = pdata->phy_dev;
689 unsigned long flags;
690 int carrier;
691
692 if (phy_dev->duplex != pdata->last_duplex) {
693 unsigned int mac_cr;
694 SMSC_TRACE(HW, "duplex state has changed");
695
696 spin_lock_irqsave(&pdata->mac_lock, flags);
697 mac_cr = smsc911x_mac_read(pdata, MAC_CR);
698 if (phy_dev->duplex) {
699 SMSC_TRACE(HW,
700 "configuring for full duplex mode");
701 mac_cr |= MAC_CR_FDPX_;
702 } else {
703 SMSC_TRACE(HW,
704 "configuring for half duplex mode");
705 mac_cr &= ~MAC_CR_FDPX_;
706 }
707 smsc911x_mac_write(pdata, MAC_CR, mac_cr);
708 spin_unlock_irqrestore(&pdata->mac_lock, flags);
709
710 smsc911x_phy_update_flowcontrol(pdata);
711 pdata->last_duplex = phy_dev->duplex;
712 }
713
714 carrier = netif_carrier_ok(dev);
715 if (carrier != pdata->last_carrier) {
716 SMSC_TRACE(HW, "carrier state has changed");
717 if (carrier) {
718 SMSC_TRACE(HW, "configuring for carrier OK");
719 if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) &&
720 (!pdata->using_extphy)) {
721 /* Restore orginal GPIO configuration */
722 pdata->gpio_setting = pdata->gpio_orig_setting;
723 smsc911x_reg_write(pdata, GPIO_CFG,
724 pdata->gpio_setting);
725 }
726 } else {
727 SMSC_TRACE(HW, "configuring for no carrier");
728 /* Check global setting that LED1
729 * usage is 10/100 indicator */
730 pdata->gpio_setting = smsc911x_reg_read(pdata,
731 GPIO_CFG);
732 if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_)
733 && (!pdata->using_extphy)) {
734 /* Force 10/100 LED off, after saving
735 * orginal GPIO configuration */
736 pdata->gpio_orig_setting = pdata->gpio_setting;
737
738 pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_;
739 pdata->gpio_setting |= (GPIO_CFG_GPIOBUF0_
740 | GPIO_CFG_GPIODIR0_
741 | GPIO_CFG_GPIOD0_);
742 smsc911x_reg_write(pdata, GPIO_CFG,
743 pdata->gpio_setting);
744 }
745 }
746 pdata->last_carrier = carrier;
747 }
748}
749
750static int smsc911x_mii_probe(struct net_device *dev)
751{
752 struct smsc911x_data *pdata = netdev_priv(dev);
753 struct phy_device *phydev = NULL;
754 int phy_addr;
755
756 /* find the first phy */
757 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
758 if (pdata->mii_bus->phy_map[phy_addr]) {
759 phydev = pdata->mii_bus->phy_map[phy_addr];
760 SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X",
761 phy_addr, phydev->addr, phydev->phy_id);
762 break;
763 }
764 }
765
766 if (!phydev) {
767 pr_err("%s: no PHY found\n", dev->name);
768 return -ENODEV;
769 }
770
771 phydev = phy_connect(dev, phydev->dev.bus_id,
772 &smsc911x_phy_adjust_link, 0, pdata->config.phy_interface);
773
774 if (IS_ERR(phydev)) {
775 pr_err("%s: Could not attach to PHY\n", dev->name);
776 return PTR_ERR(phydev);
777 }
778
779 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
780 dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq);
781
782 /* mask with MAC supported features */
783 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
784 SUPPORTED_Asym_Pause);
785 phydev->advertising = phydev->supported;
786
787 pdata->phy_dev = phydev;
788 pdata->last_duplex = -1;
789 pdata->last_carrier = -1;
790
791#ifdef USE_PHY_WORK_AROUND
792 if (smsc911x_phy_loopbacktest(dev) < 0) {
793 SMSC_WARNING(HW, "Failed Loop Back Test");
794 return -ENODEV;
795 }
796 SMSC_TRACE(HW, "Passed Loop Back Test");
797#endif /* USE_PHY_WORK_AROUND */
798
799 SMSC_TRACE(HW, "phy initialised succesfully");
800 return 0;
801}
802
803static int __devinit smsc911x_mii_init(struct platform_device *pdev,
804 struct net_device *dev)
805{
806 struct smsc911x_data *pdata = netdev_priv(dev);
807 int err = -ENXIO, i;
808
809 pdata->mii_bus = mdiobus_alloc();
810 if (!pdata->mii_bus) {
811 err = -ENOMEM;
812 goto err_out_1;
813 }
814
815 pdata->mii_bus->name = SMSC_MDIONAME;
816 snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
817 pdata->mii_bus->priv = pdata;
818 pdata->mii_bus->read = smsc911x_mii_read;
819 pdata->mii_bus->write = smsc911x_mii_write;
820 pdata->mii_bus->irq = pdata->phy_irq;
821 for (i = 0; i < PHY_MAX_ADDR; ++i)
822 pdata->mii_bus->irq[i] = PHY_POLL;
823
824 pdata->mii_bus->parent = &pdev->dev;
825
826 pdata->using_extphy = 0;
827
828 switch (pdata->idrev & 0xFFFF0000) {
829 case 0x01170000:
830 case 0x01150000:
831 case 0x117A0000:
832 case 0x115A0000:
833 /* External PHY supported, try to autodetect */
834 if (smsc911x_phy_initialise_external(pdata) < 0) {
835 SMSC_TRACE(HW, "No external PHY detected, "
836 "using internal PHY");
837 }
838 break;
839 default:
840 SMSC_TRACE(HW, "External PHY is not supported, "
841 "using internal PHY");
842 break;
843 }
844
845 if (!pdata->using_extphy) {
846 /* Mask all PHYs except ID 1 (internal) */
847 pdata->mii_bus->phy_mask = ~(1 << 1);
848 }
849
850 if (mdiobus_register(pdata->mii_bus)) {
851 SMSC_WARNING(PROBE, "Error registering mii bus");
852 goto err_out_free_bus_2;
853 }
854
855 if (smsc911x_mii_probe(dev) < 0) {
856 SMSC_WARNING(PROBE, "Error registering mii bus");
857 goto err_out_unregister_bus_3;
858 }
859
860 return 0;
861
862err_out_unregister_bus_3:
863 mdiobus_unregister(pdata->mii_bus);
864err_out_free_bus_2:
865 mdiobus_free(pdata->mii_bus);
866err_out_1:
867 return err;
868}
869
870/* Gets the number of tx statuses in the fifo */
871static unsigned int smsc911x_tx_get_txstatcount(struct smsc911x_data *pdata)
872{
873 return (smsc911x_reg_read(pdata, TX_FIFO_INF)
874 & TX_FIFO_INF_TSUSED_) >> 16;
875}
876
877/* Reads tx statuses and increments counters where necessary */
878static void smsc911x_tx_update_txcounters(struct net_device *dev)
879{
880 struct smsc911x_data *pdata = netdev_priv(dev);
881 unsigned int tx_stat;
882
883 while ((tx_stat = smsc911x_tx_get_txstatus(pdata)) != 0) {
884 if (unlikely(tx_stat & 0x80000000)) {
885 /* In this driver the packet tag is used as the packet
886 * length. Since a packet length can never reach the
887 * size of 0x8000, this bit is reserved. It is worth
888 * noting that the "reserved bit" in the warning above
889 * does not reference a hardware defined reserved bit
890 * but rather a driver defined one.
891 */
892 SMSC_WARNING(HW,
893 "Packet tag reserved bit is high");
894 } else {
895 if (unlikely(tx_stat & 0x00008000)) {
896 dev->stats.tx_errors++;
897 } else {
898 dev->stats.tx_packets++;
899 dev->stats.tx_bytes += (tx_stat >> 16);
900 }
901 if (unlikely(tx_stat & 0x00000100)) {
902 dev->stats.collisions += 16;
903 dev->stats.tx_aborted_errors += 1;
904 } else {
905 dev->stats.collisions +=
906 ((tx_stat >> 3) & 0xF);
907 }
908 if (unlikely(tx_stat & 0x00000800))
909 dev->stats.tx_carrier_errors += 1;
910 if (unlikely(tx_stat & 0x00000200)) {
911 dev->stats.collisions++;
912 dev->stats.tx_aborted_errors++;
913 }
914 }
915 }
916}
917
918/* Increments the Rx error counters */
919static void
920smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
921{
922 int crc_err = 0;
923
924 if (unlikely(rxstat & 0x00008000)) {
925 dev->stats.rx_errors++;
926 if (unlikely(rxstat & 0x00000002)) {
927 dev->stats.rx_crc_errors++;
928 crc_err = 1;
929 }
930 }
931 if (likely(!crc_err)) {
932 if (unlikely((rxstat & 0x00001020) == 0x00001020)) {
933 /* Frame type indicates length,
934 * and length error is set */
935 dev->stats.rx_length_errors++;
936 }
937 if (rxstat & RX_STS_MCAST_)
938 dev->stats.multicast++;
939 }
940}
941
942/* Quickly dumps bad packets */
943static void
944smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
945{
946 unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
947
948 if (likely(pktwords >= 4)) {
949 unsigned int timeout = 500;
950 unsigned int val;
951 smsc911x_reg_write(pdata, RX_DP_CTRL, RX_DP_CTRL_RX_FFWD_);
952 do {
953 udelay(1);
954 val = smsc911x_reg_read(pdata, RX_DP_CTRL);
955 } while (timeout-- && (val & RX_DP_CTRL_RX_FFWD_));
956
957 if (unlikely(timeout == 0))
958 SMSC_WARNING(HW, "Timed out waiting for "
959 "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val);
960 } else {
961 unsigned int temp;
962 while (pktwords--)
963 temp = smsc911x_reg_read(pdata, RX_DATA_FIFO);
964 }
965}
966
967/* NAPI poll function */
968static int smsc911x_poll(struct napi_struct *napi, int budget)
969{
970 struct smsc911x_data *pdata =
971 container_of(napi, struct smsc911x_data, napi);
972 struct net_device *dev = pdata->dev;
973 int npackets = 0;
974
975 while (likely(netif_running(dev)) && (npackets < budget)) {
976 unsigned int pktlength;
977 unsigned int pktwords;
978 struct sk_buff *skb;
979 unsigned int rxstat = smsc911x_rx_get_rxstatus(pdata);
980
981 if (!rxstat) {
982 unsigned int temp;
983 /* We processed all packets available. Tell NAPI it can
984 * stop polling then re-enable rx interrupts */
985 smsc911x_reg_write(pdata, INT_STS, INT_STS_RSFL_);
986 netif_rx_complete(napi);
987 temp = smsc911x_reg_read(pdata, INT_EN);
988 temp |= INT_EN_RSFL_EN_;
989 smsc911x_reg_write(pdata, INT_EN, temp);
990 break;
991 }
992
993 /* Count packet for NAPI scheduling, even if it has an error.
994 * Error packets still require cycles to discard */
995 npackets++;
996
997 pktlength = ((rxstat & 0x3FFF0000) >> 16);
998 pktwords = (pktlength + NET_IP_ALIGN + 3) >> 2;
999 smsc911x_rx_counterrors(dev, rxstat);
1000
1001 if (unlikely(rxstat & RX_STS_ES_)) {
1002 SMSC_WARNING(RX_ERR,
1003 "Discarding packet with error bit set");
1004 /* Packet has an error, discard it and continue with
1005 * the next */
1006 smsc911x_rx_fastforward(pdata, pktwords);
1007 dev->stats.rx_dropped++;
1008 continue;
1009 }
1010
1011 skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
1012 if (unlikely(!skb)) {
1013 SMSC_WARNING(RX_ERR,
1014 "Unable to allocate skb for rx packet");
1015 /* Drop the packet and stop this polling iteration */
1016 smsc911x_rx_fastforward(pdata, pktwords);
1017 dev->stats.rx_dropped++;
1018 break;
1019 }
1020
1021 skb->data = skb->head;
1022 skb_reset_tail_pointer(skb);
1023
1024 /* Align IP on 16B boundary */
1025 skb_reserve(skb, NET_IP_ALIGN);
1026 skb_put(skb, pktlength - 4);
1027 smsc911x_rx_readfifo(pdata, (unsigned int *)skb->head,
1028 pktwords);
1029 skb->protocol = eth_type_trans(skb, dev);
1030 skb->ip_summed = CHECKSUM_NONE;
1031 netif_receive_skb(skb);
1032
1033 /* Update counters */
1034 dev->stats.rx_packets++;
1035 dev->stats.rx_bytes += (pktlength - 4);
1036 dev->last_rx = jiffies;
1037 }
1038
1039 /* Return total received packets */
1040 return npackets;
1041}
1042
1043/* Returns hash bit number for given MAC address
1044 * Example:
1045 * 01 00 5E 00 00 01 -> returns bit number 31 */
1046static unsigned int smsc911x_hash(char addr[ETH_ALEN])
1047{
1048 return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f;
1049}
1050
1051static void smsc911x_rx_multicast_update(struct smsc911x_data *pdata)
1052{
1053 /* Performs the multicast & mac_cr update. This is called when
1054 * safe on the current hardware, and with the mac_lock held */
1055 unsigned int mac_cr;
1056
1057 SMSC_ASSERT_MAC_LOCK(pdata);
1058
1059 mac_cr = smsc911x_mac_read(pdata, MAC_CR);
1060 mac_cr |= pdata->set_bits_mask;
1061 mac_cr &= ~(pdata->clear_bits_mask);
1062 smsc911x_mac_write(pdata, MAC_CR, mac_cr);
1063 smsc911x_mac_write(pdata, HASHH, pdata->hashhi);
1064 smsc911x_mac_write(pdata, HASHL, pdata->hashlo);
1065 SMSC_TRACE(HW, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X",
1066 mac_cr, pdata->hashhi, pdata->hashlo);
1067}
1068
1069static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
1070{
1071 unsigned int mac_cr;
1072
1073 /* This function is only called for older LAN911x devices
1074 * (revA or revB), where MAC_CR, HASHH and HASHL should not
1075 * be modified during Rx - newer devices immediately update the
1076 * registers.
1077 *
1078 * This is called from interrupt context */
1079
1080 spin_lock(&pdata->mac_lock);
1081
1082 /* Check Rx has stopped */
1083 if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_)
1084 SMSC_WARNING(DRV, "Rx not stopped");
1085
1086 /* Perform the update - safe to do now Rx has stopped */
1087 smsc911x_rx_multicast_update(pdata);
1088
1089 /* Re-enable Rx */
1090 mac_cr = smsc911x_mac_read(pdata, MAC_CR);
1091 mac_cr |= MAC_CR_RXEN_;
1092 smsc911x_mac_write(pdata, MAC_CR, mac_cr);
1093
1094 pdata->multicast_update_pending = 0;
1095
1096 spin_unlock(&pdata->mac_lock);
1097}
1098
1099static int smsc911x_soft_reset(struct smsc911x_data *pdata)
1100{
1101 unsigned int timeout;
1102 unsigned int temp;
1103
1104 /* Reset the LAN911x */
1105 smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_);
1106 timeout = 10;
1107 do {
1108 udelay(10);
1109 temp = smsc911x_reg_read(pdata, HW_CFG);
1110 } while ((--timeout) && (temp & HW_CFG_SRST_));
1111
1112 if (unlikely(temp & HW_CFG_SRST_)) {
1113 SMSC_WARNING(DRV, "Failed to complete reset");
1114 return -EIO;
1115 }
1116 return 0;
1117}
1118
1119/* Sets the device MAC address to dev_addr, called with mac_lock held */
1120static void
1121smsc911x_set_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
1122{
1123 u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
1124 u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
1125 (dev_addr[1] << 8) | dev_addr[0];
1126
1127 SMSC_ASSERT_MAC_LOCK(pdata);
1128
1129 smsc911x_mac_write(pdata, ADDRH, mac_high16);
1130 smsc911x_mac_write(pdata, ADDRL, mac_low32);
1131}
1132
1133static int smsc911x_open(struct net_device *dev)
1134{
1135 struct smsc911x_data *pdata = netdev_priv(dev);
1136 unsigned int timeout;
1137 unsigned int temp;
1138 unsigned int intcfg;
1139
1140 /* if the phy is not yet registered, retry later*/
1141 if (!pdata->phy_dev) {
1142 SMSC_WARNING(HW, "phy_dev is NULL");
1143 return -EAGAIN;
1144 }
1145
1146 if (!is_valid_ether_addr(dev->dev_addr)) {
1147 SMSC_WARNING(HW, "dev_addr is not a valid MAC address");
1148 return -EADDRNOTAVAIL;
1149 }
1150
1151 /* Reset the LAN911x */
1152 if (smsc911x_soft_reset(pdata)) {
1153 SMSC_WARNING(HW, "soft reset failed");
1154 return -EIO;
1155 }
1156
1157 smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
1158 smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740);
1159
1160 /* Make sure EEPROM has finished loading before setting GPIO_CFG */
1161 timeout = 50;
1162 while ((timeout--) &&
1163 (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_)) {
1164 udelay(10);
1165 }
1166
1167 if (unlikely(timeout == 0))
1168 SMSC_WARNING(IFUP,
1169 "Timed out waiting for EEPROM busy bit to clear");
1170
1171 smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000);
1172
1173 /* The soft reset above cleared the device's MAC address,
1174 * restore it from local copy (set in probe) */
1175 spin_lock_irq(&pdata->mac_lock);
1176 smsc911x_set_mac_address(pdata, dev->dev_addr);
1177 spin_unlock_irq(&pdata->mac_lock);
1178
1179 /* Initialise irqs, but leave all sources disabled */
1180 smsc911x_reg_write(pdata, INT_EN, 0);
1181 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
1182
1183 /* Set interrupt deassertion to 100uS */
1184 intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
1185
1186 if (pdata->config.irq_polarity) {
1187 SMSC_TRACE(IFUP, "irq polarity: active high");
1188 intcfg |= INT_CFG_IRQ_POL_;
1189 } else {
1190 SMSC_TRACE(IFUP, "irq polarity: active low");
1191 }
1192
1193 if (pdata->config.irq_type) {
1194 SMSC_TRACE(IFUP, "irq type: push-pull");
1195 intcfg |= INT_CFG_IRQ_TYPE_;
1196 } else {
1197 SMSC_TRACE(IFUP, "irq type: open drain");
1198 }
1199
1200 smsc911x_reg_write(pdata, INT_CFG, intcfg);
1201
1202 SMSC_TRACE(IFUP, "Testing irq handler using IRQ %d", dev->irq);
1203 pdata->software_irq_signal = 0;
1204 smp_wmb();
1205
1206 temp = smsc911x_reg_read(pdata, INT_EN);
1207 temp |= INT_EN_SW_INT_EN_;
1208 smsc911x_reg_write(pdata, INT_EN, temp);
1209
1210 timeout = 1000;
1211 while (timeout--) {
1212 if (pdata->software_irq_signal)
1213 break;
1214 msleep(1);
1215 }
1216
1217 if (!pdata->software_irq_signal) {
1218 dev_warn(&dev->dev, "ISR failed signaling test (IRQ %d)\n",
1219 dev->irq);
1220 return -ENODEV;
1221 }
1222 SMSC_TRACE(IFUP, "IRQ handler passed test using IRQ %d", dev->irq);
1223
1224 dev_info(&dev->dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n",
1225 (unsigned long)pdata->ioaddr, dev->irq);
1226
1227 /* Bring the PHY up */
1228 phy_start(pdata->phy_dev);
1229
1230 temp = smsc911x_reg_read(pdata, HW_CFG);
1231 /* Preserve TX FIFO size and external PHY configuration */
1232 temp &= (HW_CFG_TX_FIF_SZ_|0x00000FFF);
1233 temp |= HW_CFG_SF_;
1234 smsc911x_reg_write(pdata, HW_CFG, temp);
1235
1236 temp = smsc911x_reg_read(pdata, FIFO_INT);
1237 temp |= FIFO_INT_TX_AVAIL_LEVEL_;
1238 temp &= ~(FIFO_INT_RX_STS_LEVEL_);
1239 smsc911x_reg_write(pdata, FIFO_INT, temp);
1240
1241 /* set RX Data offset to 2 bytes for alignment */
1242 smsc911x_reg_write(pdata, RX_CFG, (2 << 8));
1243
1244 /* enable NAPI polling before enabling RX interrupts */
1245 napi_enable(&pdata->napi);
1246
1247 temp = smsc911x_reg_read(pdata, INT_EN);
1248 temp |= (INT_EN_TDFA_EN_ | INT_EN_RSFL_EN_);
1249 smsc911x_reg_write(pdata, INT_EN, temp);
1250
1251 spin_lock_irq(&pdata->mac_lock);
1252 temp = smsc911x_mac_read(pdata, MAC_CR);
1253 temp |= (MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
1254 smsc911x_mac_write(pdata, MAC_CR, temp);
1255 spin_unlock_irq(&pdata->mac_lock);
1256
1257 smsc911x_reg_write(pdata, TX_CFG, TX_CFG_TX_ON_);
1258
1259 netif_start_queue(dev);
1260 return 0;
1261}
1262
1263/* Entry point for stopping the interface */
1264static int smsc911x_stop(struct net_device *dev)
1265{
1266 struct smsc911x_data *pdata = netdev_priv(dev);
1267 unsigned int temp;
1268
1269 /* Disable all device interrupts */
1270 temp = smsc911x_reg_read(pdata, INT_CFG);
1271 temp &= ~INT_CFG_IRQ_EN_;
1272 smsc911x_reg_write(pdata, INT_CFG, temp);
1273
1274 /* Stop Tx and Rx polling */
1275 netif_stop_queue(dev);
1276 napi_disable(&pdata->napi);
1277
1278 /* At this point all Rx and Tx activity is stopped */
1279 dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
1280 smsc911x_tx_update_txcounters(dev);
1281
1282 /* Bring the PHY down */
1283 if (pdata->phy_dev)
1284 phy_stop(pdata->phy_dev);
1285
1286 SMSC_TRACE(IFDOWN, "Interface stopped");
1287 return 0;
1288}
1289
1290/* Entry point for transmitting a packet */
1291static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1292{
1293 struct smsc911x_data *pdata = netdev_priv(dev);
1294 unsigned int freespace;
1295 unsigned int tx_cmd_a;
1296 unsigned int tx_cmd_b;
1297 unsigned int temp;
1298 u32 wrsz;
1299 ulong bufp;
1300
1301 freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_;
1302
1303 if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD))
1304 SMSC_WARNING(TX_ERR,
1305 "Tx data fifo low, space available: %d", freespace);
1306
1307 /* Word alignment adjustment */
1308 tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16;
1309 tx_cmd_a |= TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_;
1310 tx_cmd_a |= (unsigned int)skb->len;
1311
1312 tx_cmd_b = ((unsigned int)skb->len) << 16;
1313 tx_cmd_b |= (unsigned int)skb->len;
1314
1315 smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_a);
1316 smsc911x_reg_write(pdata, TX_DATA_FIFO, tx_cmd_b);
1317
1318 bufp = (ulong)skb->data & (~0x3);
1319 wrsz = (u32)skb->len + 3;
1320 wrsz += (u32)((ulong)skb->data & 0x3);
1321 wrsz >>= 2;
1322
1323 smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
1324 freespace -= (skb->len + 32);
1325 dev_kfree_skb(skb);
1326 dev->trans_start = jiffies;
1327
1328 if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
1329 smsc911x_tx_update_txcounters(dev);
1330
1331 if (freespace < TX_FIFO_LOW_THRESHOLD) {
1332 netif_stop_queue(dev);
1333 temp = smsc911x_reg_read(pdata, FIFO_INT);
1334 temp &= 0x00FFFFFF;
1335 temp |= 0x32000000;
1336 smsc911x_reg_write(pdata, FIFO_INT, temp);
1337 }
1338
1339 return NETDEV_TX_OK;
1340}
1341
1342/* Entry point for getting status counters */
1343static struct net_device_stats *smsc911x_get_stats(struct net_device *dev)
1344{
1345 struct smsc911x_data *pdata = netdev_priv(dev);
1346 smsc911x_tx_update_txcounters(dev);
1347 dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
1348 return &dev->stats;
1349}
1350
1351/* Entry point for setting addressing modes */
1352static void smsc911x_set_multicast_list(struct net_device *dev)
1353{
1354 struct smsc911x_data *pdata = netdev_priv(dev);
1355 unsigned long flags;
1356
1357 if (dev->flags & IFF_PROMISC) {
1358 /* Enabling promiscuous mode */
1359 pdata->set_bits_mask = MAC_CR_PRMS_;
1360 pdata->clear_bits_mask = (MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
1361 pdata->hashhi = 0;
1362 pdata->hashlo = 0;
1363 } else if (dev->flags & IFF_ALLMULTI) {
1364 /* Enabling all multicast mode */
1365 pdata->set_bits_mask = MAC_CR_MCPAS_;
1366 pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_);
1367 pdata->hashhi = 0;
1368 pdata->hashlo = 0;
1369 } else if (dev->mc_count > 0) {
1370 /* Enabling specific multicast addresses */
1371 unsigned int hash_high = 0;
1372 unsigned int hash_low = 0;
1373 unsigned int count = 0;
1374 struct dev_mc_list *mc_list = dev->mc_list;
1375
1376 pdata->set_bits_mask = MAC_CR_HPFILT_;
1377 pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
1378
1379 while (mc_list) {
1380 count++;
1381 if ((mc_list->dmi_addrlen) == ETH_ALEN) {
1382 unsigned int bitnum =
1383 smsc911x_hash(mc_list->dmi_addr);
1384 unsigned int mask = 0x01 << (bitnum & 0x1F);
1385 if (bitnum & 0x20)
1386 hash_high |= mask;
1387 else
1388 hash_low |= mask;
1389 } else {
1390 SMSC_WARNING(DRV, "dmi_addrlen != 6");
1391 }
1392 mc_list = mc_list->next;
1393 }
1394 if (count != (unsigned int)dev->mc_count)
1395 SMSC_WARNING(DRV, "mc_count != dev->mc_count");
1396
1397 pdata->hashhi = hash_high;
1398 pdata->hashlo = hash_low;
1399 } else {
1400 /* Enabling local MAC address only */
1401 pdata->set_bits_mask = 0;
1402 pdata->clear_bits_mask =
1403 (MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
1404 pdata->hashhi = 0;
1405 pdata->hashlo = 0;
1406 }
1407
1408 spin_lock_irqsave(&pdata->mac_lock, flags);
1409
1410 if (pdata->generation <= 1) {
1411 /* Older hardware revision - cannot change these flags while
1412 * receiving data */
1413 if (!pdata->multicast_update_pending) {
1414 unsigned int temp;
1415 SMSC_TRACE(HW, "scheduling mcast update");
1416 pdata->multicast_update_pending = 1;
1417
1418 /* Request the hardware to stop, then perform the
1419 * update when we get an RX_STOP interrupt */
1420 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
1421 temp = smsc911x_reg_read(pdata, INT_EN);
1422 temp |= INT_EN_RXSTOP_INT_EN_;
1423 smsc911x_reg_write(pdata, INT_EN, temp);
1424
1425 temp = smsc911x_mac_read(pdata, MAC_CR);
1426 temp &= ~(MAC_CR_RXEN_);
1427 smsc911x_mac_write(pdata, MAC_CR, temp);
1428 } else {
1429 /* There is another update pending, this should now
1430 * use the newer values */
1431 }
1432 } else {
1433 /* Newer hardware revision - can write immediately */
1434 smsc911x_rx_multicast_update(pdata);
1435 }
1436
1437 spin_unlock_irqrestore(&pdata->mac_lock, flags);
1438}
1439
1440static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
1441{
1442 struct net_device *dev = dev_id;
1443 struct smsc911x_data *pdata = netdev_priv(dev);
1444 u32 intsts = smsc911x_reg_read(pdata, INT_STS);
1445 u32 inten = smsc911x_reg_read(pdata, INT_EN);
1446 int serviced = IRQ_NONE;
1447 u32 temp;
1448
1449 if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
1450 temp = smsc911x_reg_read(pdata, INT_EN);
1451 temp &= (~INT_EN_SW_INT_EN_);
1452 smsc911x_reg_write(pdata, INT_EN, temp);
1453 smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
1454 pdata->software_irq_signal = 1;
1455 smp_wmb();
1456 serviced = IRQ_HANDLED;
1457 }
1458
1459 if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
1460 /* Called when there is a multicast update scheduled and
1461 * it is now safe to complete the update */
1462 SMSC_TRACE(INTR, "RX Stop interrupt");
1463 temp = smsc911x_reg_read(pdata, INT_EN);
1464 temp &= (~INT_EN_RXSTOP_INT_EN_);
1465 smsc911x_reg_write(pdata, INT_EN, temp);
1466 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
1467 smsc911x_rx_multicast_update_workaround(pdata);
1468 serviced = IRQ_HANDLED;
1469 }
1470
1471 if (intsts & inten & INT_STS_TDFA_) {
1472 temp = smsc911x_reg_read(pdata, FIFO_INT);
1473 temp |= FIFO_INT_TX_AVAIL_LEVEL_;
1474 smsc911x_reg_write(pdata, FIFO_INT, temp);
1475 smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
1476 netif_wake_queue(dev);
1477 serviced = IRQ_HANDLED;
1478 }
1479
1480 if (unlikely(intsts & inten & INT_STS_RXE_)) {
1481 SMSC_TRACE(INTR, "RX Error interrupt");
1482 smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
1483 serviced = IRQ_HANDLED;
1484 }
1485
1486 if (likely(intsts & inten & INT_STS_RSFL_)) {
1487 if (likely(netif_rx_schedule_prep(dev, &pdata->napi))) {
1488 /* Disable Rx interrupts */
1489 temp = smsc911x_reg_read(pdata, INT_EN);
1490 temp &= (~INT_EN_RSFL_EN_);
1491 smsc911x_reg_write(pdata, INT_EN, temp);
1492 /* Schedule a NAPI poll */
1493 __netif_rx_schedule(dev, &pdata->napi);
1494 } else {
1495 SMSC_WARNING(RX_ERR,
1496 "netif_rx_schedule_prep failed");
1497 }
1498 serviced = IRQ_HANDLED;
1499 }
1500
1501 return serviced;
1502}
1503
1504#ifdef CONFIG_NET_POLL_CONTROLLER
1505static void smsc911x_poll_controller(struct net_device *dev)
1506{
1507 disable_irq(dev->irq);
1508 smsc911x_irqhandler(0, dev);
1509 enable_irq(dev->irq);
1510}
1511#endif /* CONFIG_NET_POLL_CONTROLLER */
1512
1513/* Standard ioctls for mii-tool */
1514static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1515{
1516 struct smsc911x_data *pdata = netdev_priv(dev);
1517
1518 if (!netif_running(dev) || !pdata->phy_dev)
1519 return -EINVAL;
1520
1521 return phy_mii_ioctl(pdata->phy_dev, if_mii(ifr), cmd);
1522}
1523
1524static int
1525smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1526{
1527 struct smsc911x_data *pdata = netdev_priv(dev);
1528
1529 cmd->maxtxpkt = 1;
1530 cmd->maxrxpkt = 1;
1531 return phy_ethtool_gset(pdata->phy_dev, cmd);
1532}
1533
1534static int
1535smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
1536{
1537 struct smsc911x_data *pdata = netdev_priv(dev);
1538
1539 return phy_ethtool_sset(pdata->phy_dev, cmd);
1540}
1541
1542static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
1543 struct ethtool_drvinfo *info)
1544{
1545 strlcpy(info->driver, SMSC_CHIPNAME, sizeof(info->driver));
1546 strlcpy(info->version, SMSC_DRV_VERSION, sizeof(info->version));
1547 strlcpy(info->bus_info, dev->dev.parent->bus_id,
1548 sizeof(info->bus_info));
1549}
1550
1551static int smsc911x_ethtool_nwayreset(struct net_device *dev)
1552{
1553 struct smsc911x_data *pdata = netdev_priv(dev);
1554
1555 return phy_start_aneg(pdata->phy_dev);
1556}
1557
1558static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev)
1559{
1560 struct smsc911x_data *pdata = netdev_priv(dev);
1561 return pdata->msg_enable;
1562}
1563
1564static void smsc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
1565{
1566 struct smsc911x_data *pdata = netdev_priv(dev);
1567 pdata->msg_enable = level;
1568}
1569
1570static int smsc911x_ethtool_getregslen(struct net_device *dev)
1571{
1572 return (((E2P_DATA - ID_REV) / 4 + 1) + (WUCSR - MAC_CR) + 1 + 32) *
1573 sizeof(u32);
1574}
1575
1576static void
1577smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
1578 void *buf)
1579{
1580 struct smsc911x_data *pdata = netdev_priv(dev);
1581 struct phy_device *phy_dev = pdata->phy_dev;
1582 unsigned long flags;
1583 unsigned int i;
1584 unsigned int j = 0;
1585 u32 *data = buf;
1586
1587 regs->version = pdata->idrev;
1588 for (i = ID_REV; i <= E2P_DATA; i += (sizeof(u32)))
1589 data[j++] = smsc911x_reg_read(pdata, i);
1590
1591 for (i = MAC_CR; i <= WUCSR; i++) {
1592 spin_lock_irqsave(&pdata->mac_lock, flags);
1593 data[j++] = smsc911x_mac_read(pdata, i);
1594 spin_unlock_irqrestore(&pdata->mac_lock, flags);
1595 }
1596
1597 for (i = 0; i <= 31; i++)
1598 data[j++] = smsc911x_mii_read(phy_dev->bus, phy_dev->addr, i);
1599}
1600
1601static void smsc911x_eeprom_enable_access(struct smsc911x_data *pdata)
1602{
1603 unsigned int temp = smsc911x_reg_read(pdata, GPIO_CFG);
1604 temp &= ~GPIO_CFG_EEPR_EN_;
1605 smsc911x_reg_write(pdata, GPIO_CFG, temp);
1606 msleep(1);
1607}
1608
1609static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op)
1610{
1611 int timeout = 100;
1612 u32 e2cmd;
1613
1614 SMSC_TRACE(DRV, "op 0x%08x", op);
1615 if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
1616 SMSC_WARNING(DRV, "Busy at start");
1617 return -EBUSY;
1618 }
1619
1620 e2cmd = op | E2P_CMD_EPC_BUSY_;
1621 smsc911x_reg_write(pdata, E2P_CMD, e2cmd);
1622
1623 do {
1624 msleep(1);
1625 e2cmd = smsc911x_reg_read(pdata, E2P_CMD);
1626 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (timeout--));
1627
1628 if (!timeout) {
1629 SMSC_TRACE(DRV, "TIMED OUT");
1630 return -EAGAIN;
1631 }
1632
1633 if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
1634 SMSC_TRACE(DRV, "Error occured during eeprom operation");
1635 return -EINVAL;
1636 }
1637
1638 return 0;
1639}
1640
1641static int smsc911x_eeprom_read_location(struct smsc911x_data *pdata,
1642 u8 address, u8 *data)
1643{
1644 u32 op = E2P_CMD_EPC_CMD_READ_ | address;
1645 int ret;
1646
1647 SMSC_TRACE(DRV, "address 0x%x", address);
1648 ret = smsc911x_eeprom_send_cmd(pdata, op);
1649
1650 if (!ret)
1651 data[address] = smsc911x_reg_read(pdata, E2P_DATA);
1652
1653 return ret;
1654}
1655
1656static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
1657 u8 address, u8 data)
1658{
1659 u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
1660 int ret;
1661
1662 SMSC_TRACE(DRV, "address 0x%x, data 0x%x", address, data);
1663 ret = smsc911x_eeprom_send_cmd(pdata, op);
1664
1665 if (!ret) {
1666 op = E2P_CMD_EPC_CMD_WRITE_ | address;
1667 smsc911x_reg_write(pdata, E2P_DATA, (u32)data);
1668 ret = smsc911x_eeprom_send_cmd(pdata, op);
1669 }
1670
1671 return ret;
1672}
1673
1674static int smsc911x_ethtool_get_eeprom_len(struct net_device *dev)
1675{
1676 return SMSC911X_EEPROM_SIZE;
1677}
1678
1679static int smsc911x_ethtool_get_eeprom(struct net_device *dev,
1680 struct ethtool_eeprom *eeprom, u8 *data)
1681{
1682 struct smsc911x_data *pdata = netdev_priv(dev);
1683 u8 eeprom_data[SMSC911X_EEPROM_SIZE];
1684 int len;
1685 int i;
1686
1687 smsc911x_eeprom_enable_access(pdata);
1688
1689 len = min(eeprom->len, SMSC911X_EEPROM_SIZE);
1690 for (i = 0; i < len; i++) {
1691 int ret = smsc911x_eeprom_read_location(pdata, i, eeprom_data);
1692 if (ret < 0) {
1693 eeprom->len = 0;
1694 return ret;
1695 }
1696 }
1697
1698 memcpy(data, &eeprom_data[eeprom->offset], len);
1699 eeprom->len = len;
1700 return 0;
1701}
1702
1703static int smsc911x_ethtool_set_eeprom(struct net_device *dev,
1704 struct ethtool_eeprom *eeprom, u8 *data)
1705{
1706 int ret;
1707 struct smsc911x_data *pdata = netdev_priv(dev);
1708
1709 smsc911x_eeprom_enable_access(pdata);
1710 smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWEN_);
1711 ret = smsc911x_eeprom_write_location(pdata, eeprom->offset, *data);
1712 smsc911x_eeprom_send_cmd(pdata, E2P_CMD_EPC_CMD_EWDS_);
1713
1714 /* Single byte write, according to man page */
1715 eeprom->len = 1;
1716
1717 return ret;
1718}
1719
1720static const struct ethtool_ops smsc911x_ethtool_ops = {
1721 .get_settings = smsc911x_ethtool_getsettings,
1722 .set_settings = smsc911x_ethtool_setsettings,
1723 .get_link = ethtool_op_get_link,
1724 .get_drvinfo = smsc911x_ethtool_getdrvinfo,
1725 .nway_reset = smsc911x_ethtool_nwayreset,
1726 .get_msglevel = smsc911x_ethtool_getmsglevel,
1727 .set_msglevel = smsc911x_ethtool_setmsglevel,
1728 .get_regs_len = smsc911x_ethtool_getregslen,
1729 .get_regs = smsc911x_ethtool_getregs,
1730 .get_eeprom_len = smsc911x_ethtool_get_eeprom_len,
1731 .get_eeprom = smsc911x_ethtool_get_eeprom,
1732 .set_eeprom = smsc911x_ethtool_set_eeprom,
1733};
1734
1735static const struct net_device_ops smsc911x_netdev_ops = {
1736 .ndo_open = smsc911x_open,
1737 .ndo_stop = smsc911x_stop,
1738 .ndo_start_xmit = smsc911x_hard_start_xmit,
1739 .ndo_get_stats = smsc911x_get_stats,
1740 .ndo_set_multicast_list = smsc911x_set_multicast_list,
1741 .ndo_do_ioctl = smsc911x_do_ioctl,
1742 .ndo_validate_addr = eth_validate_addr,
1743#ifdef CONFIG_NET_POLL_CONTROLLER
1744 .ndo_poll_controller = smsc911x_poll_controller,
1745#endif
1746};
1747
1748/* Initializing private device structures, only called from probe */
1749static int __devinit smsc911x_init(struct net_device *dev)
1750{
1751 struct smsc911x_data *pdata = netdev_priv(dev);
1752 unsigned int byte_test;
1753
1754 SMSC_TRACE(PROBE, "Driver Parameters:");
1755 SMSC_TRACE(PROBE, "LAN base: 0x%08lX",
1756 (unsigned long)pdata->ioaddr);
1757 SMSC_TRACE(PROBE, "IRQ: %d", dev->irq);
1758 SMSC_TRACE(PROBE, "PHY will be autodetected.");
1759
1760 spin_lock_init(&pdata->dev_lock);
1761
1762 if (pdata->ioaddr == 0) {
1763 SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000");
1764 return -ENODEV;
1765 }
1766
1767 /* Check byte ordering */
1768 byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
1769 SMSC_TRACE(PROBE, "BYTE_TEST: 0x%08X", byte_test);
1770 if (byte_test == 0x43218765) {
1771 SMSC_TRACE(PROBE, "BYTE_TEST looks swapped, "
1772 "applying WORD_SWAP");
1773 smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff);
1774
1775 /* 1 dummy read of BYTE_TEST is needed after a write to
1776 * WORD_SWAP before its contents are valid */
1777 byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
1778
1779 byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
1780 }
1781
1782 if (byte_test != 0x87654321) {
1783 SMSC_WARNING(DRV, "BYTE_TEST: 0x%08X", byte_test);
1784 if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) {
1785 SMSC_WARNING(PROBE,
1786 "top 16 bits equal to bottom 16 bits");
1787 SMSC_TRACE(PROBE, "This may mean the chip is set "
1788 "for 32 bit while the bus is reading 16 bit");
1789 }
1790 return -ENODEV;
1791 }
1792
1793 /* Default generation to zero (all workarounds apply) */
1794 pdata->generation = 0;
1795
1796 pdata->idrev = smsc911x_reg_read(pdata, ID_REV);
1797 switch (pdata->idrev & 0xFFFF0000) {
1798 case 0x01180000:
1799 case 0x01170000:
1800 case 0x01160000:
1801 case 0x01150000:
1802 /* LAN911[5678] family */
1803 pdata->generation = pdata->idrev & 0x0000FFFF;
1804 break;
1805
1806 case 0x118A0000:
1807 case 0x117A0000:
1808 case 0x116A0000:
1809 case 0x115A0000:
1810 /* LAN921[5678] family */
1811 pdata->generation = 3;
1812 break;
1813
1814 case 0x92100000:
1815 case 0x92110000:
1816 case 0x92200000:
1817 case 0x92210000:
1818 /* LAN9210/LAN9211/LAN9220/LAN9221 */
1819 pdata->generation = 4;
1820 break;
1821
1822 default:
1823 SMSC_WARNING(PROBE, "LAN911x not identified, idrev: 0x%08X",
1824 pdata->idrev);
1825 return -ENODEV;
1826 }
1827
1828 SMSC_TRACE(PROBE, "LAN911x identified, idrev: 0x%08X, generation: %d",
1829 pdata->idrev, pdata->generation);
1830
1831 if (pdata->generation == 0)
1832 SMSC_WARNING(PROBE,
1833 "This driver is not intended for this chip revision");
1834
1835 /* Reset the LAN911x */
1836 if (smsc911x_soft_reset(pdata))
1837 return -ENODEV;
1838
1839 /* Disable all interrupt sources until we bring the device up */
1840 smsc911x_reg_write(pdata, INT_EN, 0);
1841
1842 ether_setup(dev);
1843 dev->flags |= IFF_MULTICAST;
1844 netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT);
1845 dev->netdev_ops = &smsc911x_netdev_ops;
1846 dev->ethtool_ops = &smsc911x_ethtool_ops;
1847
1848 return 0;
1849}
1850
1851static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
1852{
1853 struct net_device *dev;
1854 struct smsc911x_data *pdata;
1855 struct resource *res;
1856
1857 dev = platform_get_drvdata(pdev);
1858 BUG_ON(!dev);
1859 pdata = netdev_priv(dev);
1860 BUG_ON(!pdata);
1861 BUG_ON(!pdata->ioaddr);
1862 BUG_ON(!pdata->phy_dev);
1863
1864 SMSC_TRACE(IFDOWN, "Stopping driver.");
1865
1866 phy_disconnect(pdata->phy_dev);
1867 pdata->phy_dev = NULL;
1868 mdiobus_unregister(pdata->mii_bus);
1869 mdiobus_free(pdata->mii_bus);
1870
1871 platform_set_drvdata(pdev, NULL);
1872 unregister_netdev(dev);
1873 free_irq(dev->irq, dev);
1874 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1875 "smsc911x-memory");
1876 if (!res)
1877 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1878
1879 release_mem_region(res->start, res->end - res->start);
1880
1881 iounmap(pdata->ioaddr);
1882
1883 free_netdev(dev);
1884
1885 return 0;
1886}
1887
1888static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
1889{
1890 struct net_device *dev;
1891 struct smsc911x_data *pdata;
1892 struct smsc911x_platform_config *config = pdev->dev.platform_data;
1893 struct resource *res;
1894 unsigned int intcfg = 0;
1895 int res_size;
1896 int retval;
1897 DECLARE_MAC_BUF(mac);
1898
1899 pr_info("%s: Driver version %s.\n", SMSC_CHIPNAME, SMSC_DRV_VERSION);
1900
1901 /* platform data specifies irq & dynamic bus configuration */
1902 if (!pdev->dev.platform_data) {
1903 pr_warning("%s: platform_data not provided\n", SMSC_CHIPNAME);
1904 retval = -ENODEV;
1905 goto out_0;
1906 }
1907
1908 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1909 "smsc911x-memory");
1910 if (!res)
1911 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1912 if (!res) {
1913 pr_warning("%s: Could not allocate resource.\n",
1914 SMSC_CHIPNAME);
1915 retval = -ENODEV;
1916 goto out_0;
1917 }
1918 res_size = res->end - res->start;
1919
1920 if (!request_mem_region(res->start, res_size, SMSC_CHIPNAME)) {
1921 retval = -EBUSY;
1922 goto out_0;
1923 }
1924
1925 dev = alloc_etherdev(sizeof(struct smsc911x_data));
1926 if (!dev) {
1927 pr_warning("%s: Could not allocate device.\n", SMSC_CHIPNAME);
1928 retval = -ENOMEM;
1929 goto out_release_io_1;
1930 }
1931
1932 SET_NETDEV_DEV(dev, &pdev->dev);
1933
1934 pdata = netdev_priv(dev);
1935
1936 dev->irq = platform_get_irq(pdev, 0);
1937 pdata->ioaddr = ioremap_nocache(res->start, res_size);
1938
1939 /* copy config parameters across to pdata */
1940 memcpy(&pdata->config, config, sizeof(pdata->config));
1941
1942 pdata->dev = dev;
1943 pdata->msg_enable = ((1 << debug) - 1);
1944
1945 if (pdata->ioaddr == NULL) {
1946 SMSC_WARNING(PROBE,
1947 "Error smsc911x base address invalid");
1948 retval = -ENOMEM;
1949 goto out_free_netdev_2;
1950 }
1951
1952 retval = smsc911x_init(dev);
1953 if (retval < 0)
1954 goto out_unmap_io_3;
1955
1956 /* configure irq polarity and type before connecting isr */
1957 if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
1958 intcfg |= INT_CFG_IRQ_POL_;
1959
1960 if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
1961 intcfg |= INT_CFG_IRQ_TYPE_;
1962
1963 smsc911x_reg_write(pdata, INT_CFG, intcfg);
1964
1965 /* Ensure interrupts are globally disabled before connecting ISR */
1966 smsc911x_reg_write(pdata, INT_EN, 0);
1967 smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
1968
1969 retval = request_irq(dev->irq, smsc911x_irqhandler, IRQF_DISABLED,
1970 SMSC_CHIPNAME, dev);
1971 if (retval) {
1972 SMSC_WARNING(PROBE,
1973 "Unable to claim requested irq: %d", dev->irq);
1974 goto out_unmap_io_3;
1975 }
1976
1977 platform_set_drvdata(pdev, dev);
1978
1979 retval = register_netdev(dev);
1980 if (retval) {
1981 SMSC_WARNING(PROBE,
1982 "Error %i registering device", retval);
1983 goto out_unset_drvdata_4;
1984 } else {
1985 SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name);
1986 }
1987
1988 spin_lock_init(&pdata->mac_lock);
1989
1990 retval = smsc911x_mii_init(pdev, dev);
1991 if (retval) {
1992 SMSC_WARNING(PROBE,
1993 "Error %i initialising mii", retval);
1994 goto out_unregister_netdev_5;
1995 }
1996
1997 spin_lock_irq(&pdata->mac_lock);
1998
1999 /* Check if mac address has been specified when bringing interface up */
2000 if (is_valid_ether_addr(dev->dev_addr)) {
2001 smsc911x_set_mac_address(pdata, dev->dev_addr);
2002 SMSC_TRACE(PROBE, "MAC Address is specified by configuration");
2003 } else {
2004 /* Try reading mac address from device. if EEPROM is present
2005 * it will already have been set */
2006 u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
2007 u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
2008 dev->dev_addr[0] = (u8)(mac_low32);
2009 dev->dev_addr[1] = (u8)(mac_low32 >> 8);
2010 dev->dev_addr[2] = (u8)(mac_low32 >> 16);
2011 dev->dev_addr[3] = (u8)(mac_low32 >> 24);
2012 dev->dev_addr[4] = (u8)(mac_high16);
2013 dev->dev_addr[5] = (u8)(mac_high16 >> 8);
2014
2015 if (is_valid_ether_addr(dev->dev_addr)) {
2016 /* eeprom values are valid so use them */
2017 SMSC_TRACE(PROBE,
2018 "Mac Address is read from LAN911x EEPROM");
2019 } else {
2020 /* eeprom values are invalid, generate random MAC */
2021 random_ether_addr(dev->dev_addr);
2022 smsc911x_set_mac_address(pdata, dev->dev_addr);
2023 SMSC_TRACE(PROBE,
2024 "MAC Address is set to random_ether_addr");
2025 }
2026 }
2027
2028 spin_unlock_irq(&pdata->mac_lock);
2029
2030 dev_info(&dev->dev, "MAC Address: %s\n",
2031 print_mac(mac, dev->dev_addr));
2032
2033 return 0;
2034
2035out_unregister_netdev_5:
2036 unregister_netdev(dev);
2037out_unset_drvdata_4:
2038 platform_set_drvdata(pdev, NULL);
2039 free_irq(dev->irq, dev);
2040out_unmap_io_3:
2041 iounmap(pdata->ioaddr);
2042out_free_netdev_2:
2043 free_netdev(dev);
2044out_release_io_1:
2045 release_mem_region(res->start, res->end - res->start);
2046out_0:
2047 return retval;
2048}
2049
2050static struct platform_driver smsc911x_driver = {
2051 .probe = smsc911x_drv_probe,
2052 .remove = smsc911x_drv_remove,
2053 .driver = {
2054 .name = SMSC_CHIPNAME,
2055 },
2056};
2057
2058/* Entry point for loading the module */
2059static int __init smsc911x_init_module(void)
2060{
2061 return platform_driver_register(&smsc911x_driver);
2062}
2063
2064/* entry point for unloading the module */
2065static void __exit smsc911x_cleanup_module(void)
2066{
2067 platform_driver_unregister(&smsc911x_driver);
2068}
2069
2070module_init(smsc911x_init_module);
2071module_exit(smsc911x_cleanup_module);
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
new file mode 100644
index 000000000000..2b76654bb958
--- /dev/null
+++ b/drivers/net/smsc911x.h
@@ -0,0 +1,390 @@
1/***************************************************************************
2 *
3 * Copyright (C) 2004-2008 SMSC
4 * Copyright (C) 2005-2008 ARM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 ***************************************************************************/
21#ifndef __SMSC911X_H__
22#define __SMSC911X_H__
23
24#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
25#define SMSC911X_EEPROM_SIZE ((u32)7)
26#define USE_DEBUG 0
27
28/* This is the maximum number of packets to be received every
29 * NAPI poll */
30#define SMSC_NAPI_WEIGHT 16
31
32/* implements a PHY loopback test at initialisation time, to ensure a packet
33 * can be succesfully looped back */
34#define USE_PHY_WORK_AROUND
35
36#define DPRINTK(nlevel, klevel, fmt, args...) \
37 ((void)((NETIF_MSG_##nlevel & pdata->msg_enable) && \
38 printk(KERN_##klevel "%s: %s: " fmt "\n", \
39 pdata->dev->name, __func__, ## args)))
40
41#if USE_DEBUG >= 1
42#define SMSC_WARNING(nlevel, fmt, args...) \
43 DPRINTK(nlevel, WARNING, fmt, ## args)
44#else
45#define SMSC_WARNING(nlevel, fmt, args...) \
46 ({ do {} while (0); 0; })
47#endif
48
49#if USE_DEBUG >= 2
50#define SMSC_TRACE(nlevel, fmt, args...) \
51 DPRINTK(nlevel, INFO, fmt, ## args)
52#else
53#define SMSC_TRACE(nlevel, fmt, args...) \
54 ({ do {} while (0); 0; })
55#endif
56
57#ifdef CONFIG_DEBUG_SPINLOCK
58#define SMSC_ASSERT_MAC_LOCK(pdata) \
59 WARN_ON(!spin_is_locked(&pdata->mac_lock))
60#else
61#define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0)
62#endif /* CONFIG_DEBUG_SPINLOCK */
63
64/* SMSC911x registers and bitfields */
65#define RX_DATA_FIFO 0x00
66
67#define TX_DATA_FIFO 0x20
68#define TX_CMD_A_ON_COMP_ 0x80000000
69#define TX_CMD_A_BUF_END_ALGN_ 0x03000000
70#define TX_CMD_A_4_BYTE_ALGN_ 0x00000000
71#define TX_CMD_A_16_BYTE_ALGN_ 0x01000000
72#define TX_CMD_A_32_BYTE_ALGN_ 0x02000000
73#define TX_CMD_A_DATA_OFFSET_ 0x001F0000
74#define TX_CMD_A_FIRST_SEG_ 0x00002000
75#define TX_CMD_A_LAST_SEG_ 0x00001000
76#define TX_CMD_A_BUF_SIZE_ 0x000007FF
77#define TX_CMD_B_PKT_TAG_ 0xFFFF0000
78#define TX_CMD_B_ADD_CRC_DISABLE_ 0x00002000
79#define TX_CMD_B_DISABLE_PADDING_ 0x00001000
80#define TX_CMD_B_PKT_BYTE_LENGTH_ 0x000007FF
81
82#define RX_STATUS_FIFO 0x40
83#define RX_STS_ES_ 0x00008000
84#define RX_STS_MCAST_ 0x00000400
85
86#define RX_STATUS_FIFO_PEEK 0x44
87
88#define TX_STATUS_FIFO 0x48
89#define TX_STS_ES_ 0x00008000
90
91#define TX_STATUS_FIFO_PEEK 0x4C
92
93#define ID_REV 0x50
94#define ID_REV_CHIP_ID_ 0xFFFF0000
95#define ID_REV_REV_ID_ 0x0000FFFF
96
97#define INT_CFG 0x54
98#define INT_CFG_INT_DEAS_ 0xFF000000
99#define INT_CFG_INT_DEAS_CLR_ 0x00004000
100#define INT_CFG_INT_DEAS_STS_ 0x00002000
101#define INT_CFG_IRQ_INT_ 0x00001000
102#define INT_CFG_IRQ_EN_ 0x00000100
103#define INT_CFG_IRQ_POL_ 0x00000010
104#define INT_CFG_IRQ_TYPE_ 0x00000001
105
106#define INT_STS 0x58
107#define INT_STS_SW_INT_ 0x80000000
108#define INT_STS_TXSTOP_INT_ 0x02000000
109#define INT_STS_RXSTOP_INT_ 0x01000000
110#define INT_STS_RXDFH_INT_ 0x00800000
111#define INT_STS_RXDF_INT_ 0x00400000
112#define INT_STS_TX_IOC_ 0x00200000
113#define INT_STS_RXD_INT_ 0x00100000
114#define INT_STS_GPT_INT_ 0x00080000
115#define INT_STS_PHY_INT_ 0x00040000
116#define INT_STS_PME_INT_ 0x00020000
117#define INT_STS_TXSO_ 0x00010000
118#define INT_STS_RWT_ 0x00008000
119#define INT_STS_RXE_ 0x00004000
120#define INT_STS_TXE_ 0x00002000
121#define INT_STS_TDFU_ 0x00000800
122#define INT_STS_TDFO_ 0x00000400
123#define INT_STS_TDFA_ 0x00000200
124#define INT_STS_TSFF_ 0x00000100
125#define INT_STS_TSFL_ 0x00000080
126#define INT_STS_RXDF_ 0x00000040
127#define INT_STS_RDFL_ 0x00000020
128#define INT_STS_RSFF_ 0x00000010
129#define INT_STS_RSFL_ 0x00000008
130#define INT_STS_GPIO2_INT_ 0x00000004
131#define INT_STS_GPIO1_INT_ 0x00000002
132#define INT_STS_GPIO0_INT_ 0x00000001
133
134#define INT_EN 0x5C
135#define INT_EN_SW_INT_EN_ 0x80000000
136#define INT_EN_TXSTOP_INT_EN_ 0x02000000
137#define INT_EN_RXSTOP_INT_EN_ 0x01000000
138#define INT_EN_RXDFH_INT_EN_ 0x00800000
139#define INT_EN_TIOC_INT_EN_ 0x00200000
140#define INT_EN_RXD_INT_EN_ 0x00100000
141#define INT_EN_GPT_INT_EN_ 0x00080000
142#define INT_EN_PHY_INT_EN_ 0x00040000
143#define INT_EN_PME_INT_EN_ 0x00020000
144#define INT_EN_TXSO_EN_ 0x00010000
145#define INT_EN_RWT_EN_ 0x00008000
146#define INT_EN_RXE_EN_ 0x00004000
147#define INT_EN_TXE_EN_ 0x00002000
148#define INT_EN_TDFU_EN_ 0x00000800
149#define INT_EN_TDFO_EN_ 0x00000400
150#define INT_EN_TDFA_EN_ 0x00000200
151#define INT_EN_TSFF_EN_ 0x00000100
152#define INT_EN_TSFL_EN_ 0x00000080
153#define INT_EN_RXDF_EN_ 0x00000040
154#define INT_EN_RDFL_EN_ 0x00000020
155#define INT_EN_RSFF_EN_ 0x00000010
156#define INT_EN_RSFL_EN_ 0x00000008
157#define INT_EN_GPIO2_INT_ 0x00000004
158#define INT_EN_GPIO1_INT_ 0x00000002
159#define INT_EN_GPIO0_INT_ 0x00000001
160
161#define BYTE_TEST 0x64
162
163#define FIFO_INT 0x68
164#define FIFO_INT_TX_AVAIL_LEVEL_ 0xFF000000
165#define FIFO_INT_TX_STS_LEVEL_ 0x00FF0000
166#define FIFO_INT_RX_AVAIL_LEVEL_ 0x0000FF00
167#define FIFO_INT_RX_STS_LEVEL_ 0x000000FF
168
169#define RX_CFG 0x6C
170#define RX_CFG_RX_END_ALGN_ 0xC0000000
171#define RX_CFG_RX_END_ALGN4_ 0x00000000
172#define RX_CFG_RX_END_ALGN16_ 0x40000000
173#define RX_CFG_RX_END_ALGN32_ 0x80000000
174#define RX_CFG_RX_DMA_CNT_ 0x0FFF0000
175#define RX_CFG_RX_DUMP_ 0x00008000
176#define RX_CFG_RXDOFF_ 0x00001F00
177
178#define TX_CFG 0x70
179#define TX_CFG_TXS_DUMP_ 0x00008000
180#define TX_CFG_TXD_DUMP_ 0x00004000
181#define TX_CFG_TXSAO_ 0x00000004
182#define TX_CFG_TX_ON_ 0x00000002
183#define TX_CFG_STOP_TX_ 0x00000001
184
185#define HW_CFG 0x74
186#define HW_CFG_TTM_ 0x00200000
187#define HW_CFG_SF_ 0x00100000
188#define HW_CFG_TX_FIF_SZ_ 0x000F0000
189#define HW_CFG_TR_ 0x00003000
190#define HW_CFG_SRST_ 0x00000001
191
192/* only available on 115/117 */
193#define HW_CFG_PHY_CLK_SEL_ 0x00000060
194#define HW_CFG_PHY_CLK_SEL_INT_PHY_ 0x00000000
195#define HW_CFG_PHY_CLK_SEL_EXT_PHY_ 0x00000020
196#define HW_CFG_PHY_CLK_SEL_CLK_DIS_ 0x00000040
197#define HW_CFG_SMI_SEL_ 0x00000010
198#define HW_CFG_EXT_PHY_DET_ 0x00000008
199#define HW_CFG_EXT_PHY_EN_ 0x00000004
200#define HW_CFG_SRST_TO_ 0x00000002
201
202/* only available on 116/118 */
203#define HW_CFG_32_16_BIT_MODE_ 0x00000004
204
205#define RX_DP_CTRL 0x78
206#define RX_DP_CTRL_RX_FFWD_ 0x80000000
207
208#define RX_FIFO_INF 0x7C
209#define RX_FIFO_INF_RXSUSED_ 0x00FF0000
210#define RX_FIFO_INF_RXDUSED_ 0x0000FFFF
211
212#define TX_FIFO_INF 0x80
213#define TX_FIFO_INF_TSUSED_ 0x00FF0000
214#define TX_FIFO_INF_TDFREE_ 0x0000FFFF
215
216#define PMT_CTRL 0x84
217#define PMT_CTRL_PM_MODE_ 0x00003000
218#define PMT_CTRL_PM_MODE_D0_ 0x00000000
219#define PMT_CTRL_PM_MODE_D1_ 0x00001000
220#define PMT_CTRL_PM_MODE_D2_ 0x00002000
221#define PMT_CTRL_PM_MODE_D3_ 0x00003000
222#define PMT_CTRL_PHY_RST_ 0x00000400
223#define PMT_CTRL_WOL_EN_ 0x00000200
224#define PMT_CTRL_ED_EN_ 0x00000100
225#define PMT_CTRL_PME_TYPE_ 0x00000040
226#define PMT_CTRL_WUPS_ 0x00000030
227#define PMT_CTRL_WUPS_NOWAKE_ 0x00000000
228#define PMT_CTRL_WUPS_ED_ 0x00000010
229#define PMT_CTRL_WUPS_WOL_ 0x00000020
230#define PMT_CTRL_WUPS_MULTI_ 0x00000030
231#define PMT_CTRL_PME_IND_ 0x00000008
232#define PMT_CTRL_PME_POL_ 0x00000004
233#define PMT_CTRL_PME_EN_ 0x00000002
234#define PMT_CTRL_READY_ 0x00000001
235
236#define GPIO_CFG 0x88
237#define GPIO_CFG_LED3_EN_ 0x40000000
238#define GPIO_CFG_LED2_EN_ 0x20000000
239#define GPIO_CFG_LED1_EN_ 0x10000000
240#define GPIO_CFG_GPIO2_INT_POL_ 0x04000000
241#define GPIO_CFG_GPIO1_INT_POL_ 0x02000000
242#define GPIO_CFG_GPIO0_INT_POL_ 0x01000000
243#define GPIO_CFG_EEPR_EN_ 0x00700000
244#define GPIO_CFG_GPIOBUF2_ 0x00040000
245#define GPIO_CFG_GPIOBUF1_ 0x00020000
246#define GPIO_CFG_GPIOBUF0_ 0x00010000
247#define GPIO_CFG_GPIODIR2_ 0x00000400
248#define GPIO_CFG_GPIODIR1_ 0x00000200
249#define GPIO_CFG_GPIODIR0_ 0x00000100
250#define GPIO_CFG_GPIOD4_ 0x00000020
251#define GPIO_CFG_GPIOD3_ 0x00000010
252#define GPIO_CFG_GPIOD2_ 0x00000004
253#define GPIO_CFG_GPIOD1_ 0x00000002
254#define GPIO_CFG_GPIOD0_ 0x00000001
255
256#define GPT_CFG 0x8C
257#define GPT_CFG_TIMER_EN_ 0x20000000
258#define GPT_CFG_GPT_LOAD_ 0x0000FFFF
259
260#define GPT_CNT 0x90
261#define GPT_CNT_GPT_CNT_ 0x0000FFFF
262
263#define WORD_SWAP 0x98
264
265#define FREE_RUN 0x9C
266
267#define RX_DROP 0xA0
268
269#define MAC_CSR_CMD 0xA4
270#define MAC_CSR_CMD_CSR_BUSY_ 0x80000000
271#define MAC_CSR_CMD_R_NOT_W_ 0x40000000
272#define MAC_CSR_CMD_CSR_ADDR_ 0x000000FF
273
274#define MAC_CSR_DATA 0xA8
275
276#define AFC_CFG 0xAC
277#define AFC_CFG_AFC_HI_ 0x00FF0000
278#define AFC_CFG_AFC_LO_ 0x0000FF00
279#define AFC_CFG_BACK_DUR_ 0x000000F0
280#define AFC_CFG_FCMULT_ 0x00000008
281#define AFC_CFG_FCBRD_ 0x00000004
282#define AFC_CFG_FCADD_ 0x00000002
283#define AFC_CFG_FCANY_ 0x00000001
284
285#define E2P_CMD 0xB0
286#define E2P_CMD_EPC_BUSY_ 0x80000000
287#define E2P_CMD_EPC_CMD_ 0x70000000
288#define E2P_CMD_EPC_CMD_READ_ 0x00000000
289#define E2P_CMD_EPC_CMD_EWDS_ 0x10000000
290#define E2P_CMD_EPC_CMD_EWEN_ 0x20000000
291#define E2P_CMD_EPC_CMD_WRITE_ 0x30000000
292#define E2P_CMD_EPC_CMD_WRAL_ 0x40000000
293#define E2P_CMD_EPC_CMD_ERASE_ 0x50000000
294#define E2P_CMD_EPC_CMD_ERAL_ 0x60000000
295#define E2P_CMD_EPC_CMD_RELOAD_ 0x70000000
296#define E2P_CMD_EPC_TIMEOUT_ 0x00000200
297#define E2P_CMD_MAC_ADDR_LOADED_ 0x00000100
298#define E2P_CMD_EPC_ADDR_ 0x000000FF
299
300#define E2P_DATA 0xB4
301#define E2P_DATA_EEPROM_DATA_ 0x000000FF
302#define LAN_REGISTER_EXTENT 0x00000100
303
304/*
305 * MAC Control and Status Register (Indirect Address)
306 * Offset (through the MAC_CSR CMD and DATA port)
307 */
308#define MAC_CR 0x01
309#define MAC_CR_RXALL_ 0x80000000
310#define MAC_CR_HBDIS_ 0x10000000
311#define MAC_CR_RCVOWN_ 0x00800000
312#define MAC_CR_LOOPBK_ 0x00200000
313#define MAC_CR_FDPX_ 0x00100000
314#define MAC_CR_MCPAS_ 0x00080000
315#define MAC_CR_PRMS_ 0x00040000
316#define MAC_CR_INVFILT_ 0x00020000
317#define MAC_CR_PASSBAD_ 0x00010000
318#define MAC_CR_HFILT_ 0x00008000
319#define MAC_CR_HPFILT_ 0x00002000
320#define MAC_CR_LCOLL_ 0x00001000
321#define MAC_CR_BCAST_ 0x00000800
322#define MAC_CR_DISRTY_ 0x00000400
323#define MAC_CR_PADSTR_ 0x00000100
324#define MAC_CR_BOLMT_MASK_ 0x000000C0
325#define MAC_CR_DFCHK_ 0x00000020
326#define MAC_CR_TXEN_ 0x00000008
327#define MAC_CR_RXEN_ 0x00000004
328
329#define ADDRH 0x02
330
331#define ADDRL 0x03
332
333#define HASHH 0x04
334
335#define HASHL 0x05
336
337#define MII_ACC 0x06
338#define MII_ACC_PHY_ADDR_ 0x0000F800
339#define MII_ACC_MIIRINDA_ 0x000007C0
340#define MII_ACC_MII_WRITE_ 0x00000002
341#define MII_ACC_MII_BUSY_ 0x00000001
342
343#define MII_DATA 0x07
344
345#define FLOW 0x08
346#define FLOW_FCPT_ 0xFFFF0000
347#define FLOW_FCPASS_ 0x00000004
348#define FLOW_FCEN_ 0x00000002
349#define FLOW_FCBSY_ 0x00000001
350
351#define VLAN1 0x09
352
353#define VLAN2 0x0A
354
355#define WUFF 0x0B
356
357#define WUCSR 0x0C
358#define WUCSR_GUE_ 0x00000200
359#define WUCSR_WUFR_ 0x00000040
360#define WUCSR_MPR_ 0x00000020
361#define WUCSR_WAKE_EN_ 0x00000004
362#define WUCSR_MPEN_ 0x00000002
363
364/*
365 * Phy definitions (vendor-specific)
366 */
367#define LAN9118_PHY_ID 0x00C0001C
368
369#define MII_INTSTS 0x1D
370
371#define MII_INTMSK 0x1E
372#define PHY_INTMSK_AN_RCV_ (1 << 1)
373#define PHY_INTMSK_PDFAULT_ (1 << 2)
374#define PHY_INTMSK_AN_ACK_ (1 << 3)
375#define PHY_INTMSK_LNKDOWN_ (1 << 4)
376#define PHY_INTMSK_RFAULT_ (1 << 5)
377#define PHY_INTMSK_AN_COMP_ (1 << 6)
378#define PHY_INTMSK_ENERGYON_ (1 << 7)
379#define PHY_INTMSK_DEFAULT_ (PHY_INTMSK_ENERGYON_ | \
380 PHY_INTMSK_AN_COMP_ | \
381 PHY_INTMSK_RFAULT_ | \
382 PHY_INTMSK_LNKDOWN_)
383
384#define ADVERTISE_PAUSE_ALL (ADVERTISE_PAUSE_CAP | \
385 ADVERTISE_PAUSE_ASYM)
386
387#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \
388 LPA_PAUSE_ASYM)
389
390#endif /* __SMSC911X_H__ */
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
new file mode 100644
index 000000000000..27e017d96966
--- /dev/null
+++ b/drivers/net/smsc9420.c
@@ -0,0 +1,1744 @@
1 /***************************************************************************
2 *
3 * Copyright (C) 2007,2008 SMSC
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 ***************************************************************************
20 */
21
22#include <linux/kernel.h>
23#include <linux/netdevice.h>
24#include <linux/phy.h>
25#include <linux/pci.h>
26#include <linux/if_vlan.h>
27#include <linux/dma-mapping.h>
28#include <linux/crc32.h>
29#include <asm/unaligned.h>
30#include "smsc9420.h"
31
32#define DRV_NAME "smsc9420"
33#define PFX DRV_NAME ": "
34#define DRV_MDIONAME "smsc9420-mdio"
35#define DRV_DESCRIPTION "SMSC LAN9420 driver"
36#define DRV_VERSION "1.01"
37
38MODULE_LICENSE("GPL");
39MODULE_VERSION(DRV_VERSION);
40
41struct smsc9420_dma_desc {
42 u32 status;
43 u32 length;
44 u32 buffer1;
45 u32 buffer2;
46};
47
48struct smsc9420_ring_info {
49 struct sk_buff *skb;
50 dma_addr_t mapping;
51};
52
53struct smsc9420_pdata {
54 void __iomem *base_addr;
55 struct pci_dev *pdev;
56 struct net_device *dev;
57
58 struct smsc9420_dma_desc *rx_ring;
59 struct smsc9420_dma_desc *tx_ring;
60 struct smsc9420_ring_info *tx_buffers;
61 struct smsc9420_ring_info *rx_buffers;
62 dma_addr_t rx_dma_addr;
63 dma_addr_t tx_dma_addr;
64 int tx_ring_head, tx_ring_tail;
65 int rx_ring_head, rx_ring_tail;
66
67 spinlock_t int_lock;
68 spinlock_t phy_lock;
69
70 struct napi_struct napi;
71
72 bool software_irq_signal;
73 bool rx_csum;
74 u32 msg_enable;
75
76 struct phy_device *phy_dev;
77 struct mii_bus *mii_bus;
78 int phy_irq[PHY_MAX_ADDR];
79 int last_duplex;
80 int last_carrier;
81};
82
83static const struct pci_device_id smsc9420_id_table[] = {
84 { PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
85 { 0, }
86};
87
88MODULE_DEVICE_TABLE(pci, smsc9420_id_table);
89
90#define SMSC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
91
92static uint smsc_debug;
93static uint debug = -1;
94module_param(debug, uint, 0);
95MODULE_PARM_DESC(debug, "debug level");
96
97#define smsc_dbg(TYPE, f, a...) \
98do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
99 printk(KERN_DEBUG PFX f "\n", ## a); \
100} while (0)
101
102#define smsc_info(TYPE, f, a...) \
103do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
104 printk(KERN_INFO PFX f "\n", ## a); \
105} while (0)
106
107#define smsc_warn(TYPE, f, a...) \
108do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
109 printk(KERN_WARNING PFX f "\n", ## a); \
110} while (0)
111
112static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
113{
114 return ioread32(pd->base_addr + offset);
115}
116
117static inline void
118smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value)
119{
120 iowrite32(value, pd->base_addr + offset);
121}
122
123static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd)
124{
125 /* to ensure PCI write completion, we must perform a PCI read */
126 smsc9420_reg_read(pd, ID_REV);
127}
128
129static int smsc9420_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
130{
131 struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv;
132 unsigned long flags;
133 u32 addr;
134 int i, reg = -EIO;
135
136 spin_lock_irqsave(&pd->phy_lock, flags);
137
138 /* confirm MII not busy */
139 if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
140 smsc_warn(DRV, "MII is busy???");
141 goto out;
142 }
143
144 /* set the address, index & direction (read from PHY) */
145 addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) |
146 MII_ACCESS_MII_READ_;
147 smsc9420_reg_write(pd, MII_ACCESS, addr);
148
149 /* wait for read to complete with 50us timeout */
150 for (i = 0; i < 5; i++) {
151 if (!(smsc9420_reg_read(pd, MII_ACCESS) &
152 MII_ACCESS_MII_BUSY_)) {
153 reg = (u16)smsc9420_reg_read(pd, MII_DATA);
154 goto out;
155 }
156 udelay(10);
157 }
158
159 smsc_warn(DRV, "MII busy timeout!");
160
161out:
162 spin_unlock_irqrestore(&pd->phy_lock, flags);
163 return reg;
164}
165
166static int smsc9420_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
167 u16 val)
168{
169 struct smsc9420_pdata *pd = (struct smsc9420_pdata *)bus->priv;
170 unsigned long flags;
171 u32 addr;
172 int i, reg = -EIO;
173
174 spin_lock_irqsave(&pd->phy_lock, flags);
175
176 /* confirm MII not busy */
177 if ((smsc9420_reg_read(pd, MII_ACCESS) & MII_ACCESS_MII_BUSY_)) {
178 smsc_warn(DRV, "MII is busy???");
179 goto out;
180 }
181
182 /* put the data to write in the MAC */
183 smsc9420_reg_write(pd, MII_DATA, (u32)val);
184
185 /* set the address, index & direction (write to PHY) */
186 addr = ((phyaddr & 0x1F) << 11) | ((regidx & 0x1F) << 6) |
187 MII_ACCESS_MII_WRITE_;
188 smsc9420_reg_write(pd, MII_ACCESS, addr);
189
190 /* wait for write to complete with 50us timeout */
191 for (i = 0; i < 5; i++) {
192 if (!(smsc9420_reg_read(pd, MII_ACCESS) &
193 MII_ACCESS_MII_BUSY_)) {
194 reg = 0;
195 goto out;
196 }
197 udelay(10);
198 }
199
200 smsc_warn(DRV, "MII busy timeout!");
201
202out:
203 spin_unlock_irqrestore(&pd->phy_lock, flags);
204 return reg;
205}
206
207/* Returns hash bit number for given MAC address
208 * Example:
209 * 01 00 5E 00 00 01 -> returns bit number 31 */
210static u32 smsc9420_hash(u8 addr[ETH_ALEN])
211{
212 return (ether_crc(ETH_ALEN, addr) >> 26) & 0x3f;
213}
214
215static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
216{
217 int timeout = 100000;
218
219 BUG_ON(!pd);
220
221 if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
222 smsc_dbg(DRV, "smsc9420_eeprom_reload: Eeprom busy");
223 return -EIO;
224 }
225
226 smsc9420_reg_write(pd, E2P_CMD,
227 (E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_RELOAD_));
228
229 do {
230 udelay(10);
231 if (!(smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_))
232 return 0;
233 } while (timeout--);
234
235 smsc_warn(DRV, "smsc9420_eeprom_reload: Eeprom timed out");
236 return -EIO;
237}
238
239/* Standard ioctls for mii-tool */
240static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
241{
242 struct smsc9420_pdata *pd = netdev_priv(dev);
243
244 if (!netif_running(dev) || !pd->phy_dev)
245 return -EINVAL;
246
247 return phy_mii_ioctl(pd->phy_dev, if_mii(ifr), cmd);
248}
249
250static int smsc9420_ethtool_get_settings(struct net_device *dev,
251 struct ethtool_cmd *cmd)
252{
253 struct smsc9420_pdata *pd = netdev_priv(dev);
254
255 cmd->maxtxpkt = 1;
256 cmd->maxrxpkt = 1;
257 return phy_ethtool_gset(pd->phy_dev, cmd);
258}
259
260static int smsc9420_ethtool_set_settings(struct net_device *dev,
261 struct ethtool_cmd *cmd)
262{
263 struct smsc9420_pdata *pd = netdev_priv(dev);
264
265 return phy_ethtool_sset(pd->phy_dev, cmd);
266}
267
268static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
269 struct ethtool_drvinfo *drvinfo)
270{
271 struct smsc9420_pdata *pd = netdev_priv(netdev);
272
273 strcpy(drvinfo->driver, DRV_NAME);
274 strcpy(drvinfo->bus_info, pci_name(pd->pdev));
275 strcpy(drvinfo->version, DRV_VERSION);
276}
277
278static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
279{
280 struct smsc9420_pdata *pd = netdev_priv(netdev);
281 return pd->msg_enable;
282}
283
284static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data)
285{
286 struct smsc9420_pdata *pd = netdev_priv(netdev);
287 pd->msg_enable = data;
288}
289
290static int smsc9420_ethtool_nway_reset(struct net_device *netdev)
291{
292 struct smsc9420_pdata *pd = netdev_priv(netdev);
293 return phy_start_aneg(pd->phy_dev);
294}
295
296static int smsc9420_ethtool_getregslen(struct net_device *dev)
297{
298 /* all smsc9420 registers plus all phy registers */
299 return 0x100 + (32 * sizeof(u32));
300}
301
302static void
303smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
304 void *buf)
305{
306 struct smsc9420_pdata *pd = netdev_priv(dev);
307 struct phy_device *phy_dev = pd->phy_dev;
308 unsigned int i, j = 0;
309 u32 *data = buf;
310
311 regs->version = smsc9420_reg_read(pd, ID_REV);
312 for (i = 0; i < 0x100; i += (sizeof(u32)))
313 data[j++] = smsc9420_reg_read(pd, i);
314
315 for (i = 0; i <= 31; i++)
316 data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i);
317}
318
319static void smsc9420_eeprom_enable_access(struct smsc9420_pdata *pd)
320{
321 unsigned int temp = smsc9420_reg_read(pd, GPIO_CFG);
322 temp &= ~GPIO_CFG_EEPR_EN_;
323 smsc9420_reg_write(pd, GPIO_CFG, temp);
324 msleep(1);
325}
326
327static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op)
328{
329 int timeout = 100;
330 u32 e2cmd;
331
332 smsc_dbg(HW, "op 0x%08x", op);
333 if (smsc9420_reg_read(pd, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
334 smsc_warn(HW, "Busy at start");
335 return -EBUSY;
336 }
337
338 e2cmd = op | E2P_CMD_EPC_BUSY_;
339 smsc9420_reg_write(pd, E2P_CMD, e2cmd);
340
341 do {
342 msleep(1);
343 e2cmd = smsc9420_reg_read(pd, E2P_CMD);
344 } while ((e2cmd & E2P_CMD_EPC_BUSY_) && (timeout--));
345
346 if (!timeout) {
347 smsc_info(HW, "TIMED OUT");
348 return -EAGAIN;
349 }
350
351 if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
352 smsc_info(HW, "Error occured during eeprom operation");
353 return -EINVAL;
354 }
355
356 return 0;
357}
358
359static int smsc9420_eeprom_read_location(struct smsc9420_pdata *pd,
360 u8 address, u8 *data)
361{
362 u32 op = E2P_CMD_EPC_CMD_READ_ | address;
363 int ret;
364
365 smsc_dbg(HW, "address 0x%x", address);
366 ret = smsc9420_eeprom_send_cmd(pd, op);
367
368 if (!ret)
369 data[address] = smsc9420_reg_read(pd, E2P_DATA);
370
371 return ret;
372}
373
374static int smsc9420_eeprom_write_location(struct smsc9420_pdata *pd,
375 u8 address, u8 data)
376{
377 u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
378 int ret;
379
380 smsc_dbg(HW, "address 0x%x, data 0x%x", address, data);
381 ret = smsc9420_eeprom_send_cmd(pd, op);
382
383 if (!ret) {
384 op = E2P_CMD_EPC_CMD_WRITE_ | address;
385 smsc9420_reg_write(pd, E2P_DATA, (u32)data);
386 ret = smsc9420_eeprom_send_cmd(pd, op);
387 }
388
389 return ret;
390}
391
392static int smsc9420_ethtool_get_eeprom_len(struct net_device *dev)
393{
394 return SMSC9420_EEPROM_SIZE;
395}
396
397static int smsc9420_ethtool_get_eeprom(struct net_device *dev,
398 struct ethtool_eeprom *eeprom, u8 *data)
399{
400 struct smsc9420_pdata *pd = netdev_priv(dev);
401 u8 eeprom_data[SMSC9420_EEPROM_SIZE];
402 int len, i;
403
404 smsc9420_eeprom_enable_access(pd);
405
406 len = min(eeprom->len, SMSC9420_EEPROM_SIZE);
407 for (i = 0; i < len; i++) {
408 int ret = smsc9420_eeprom_read_location(pd, i, eeprom_data);
409 if (ret < 0) {
410 eeprom->len = 0;
411 return ret;
412 }
413 }
414
415 memcpy(data, &eeprom_data[eeprom->offset], len);
416 eeprom->len = len;
417 return 0;
418}
419
420static int smsc9420_ethtool_set_eeprom(struct net_device *dev,
421 struct ethtool_eeprom *eeprom, u8 *data)
422{
423 struct smsc9420_pdata *pd = netdev_priv(dev);
424 int ret;
425
426 smsc9420_eeprom_enable_access(pd);
427 smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWEN_);
428 ret = smsc9420_eeprom_write_location(pd, eeprom->offset, *data);
429 smsc9420_eeprom_send_cmd(pd, E2P_CMD_EPC_CMD_EWDS_);
430
431 /* Single byte write, according to man page */
432 eeprom->len = 1;
433
434 return ret;
435}
436
437static const struct ethtool_ops smsc9420_ethtool_ops = {
438 .get_settings = smsc9420_ethtool_get_settings,
439 .set_settings = smsc9420_ethtool_set_settings,
440 .get_drvinfo = smsc9420_ethtool_get_drvinfo,
441 .get_msglevel = smsc9420_ethtool_get_msglevel,
442 .set_msglevel = smsc9420_ethtool_set_msglevel,
443 .nway_reset = smsc9420_ethtool_nway_reset,
444 .get_link = ethtool_op_get_link,
445 .get_eeprom_len = smsc9420_ethtool_get_eeprom_len,
446 .get_eeprom = smsc9420_ethtool_get_eeprom,
447 .set_eeprom = smsc9420_ethtool_set_eeprom,
448 .get_regs_len = smsc9420_ethtool_getregslen,
449 .get_regs = smsc9420_ethtool_getregs,
450};
451
452/* Sets the device MAC address to dev_addr */
453static void smsc9420_set_mac_address(struct net_device *dev)
454{
455 struct smsc9420_pdata *pd = netdev_priv(dev);
456 u8 *dev_addr = dev->dev_addr;
457 u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
458 u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
459 (dev_addr[1] << 8) | dev_addr[0];
460
461 smsc9420_reg_write(pd, ADDRH, mac_high16);
462 smsc9420_reg_write(pd, ADDRL, mac_low32);
463}
464
465static void smsc9420_check_mac_address(struct net_device *dev)
466{
467 struct smsc9420_pdata *pd = netdev_priv(dev);
468
469 /* Check if mac address has been specified when bringing interface up */
470 if (is_valid_ether_addr(dev->dev_addr)) {
471 smsc9420_set_mac_address(dev);
472 smsc_dbg(PROBE, "MAC Address is specified by configuration");
473 } else {
474 /* Try reading mac address from device. if EEPROM is present
475 * it will already have been set */
476 u32 mac_high16 = smsc9420_reg_read(pd, ADDRH);
477 u32 mac_low32 = smsc9420_reg_read(pd, ADDRL);
478 dev->dev_addr[0] = (u8)(mac_low32);
479 dev->dev_addr[1] = (u8)(mac_low32 >> 8);
480 dev->dev_addr[2] = (u8)(mac_low32 >> 16);
481 dev->dev_addr[3] = (u8)(mac_low32 >> 24);
482 dev->dev_addr[4] = (u8)(mac_high16);
483 dev->dev_addr[5] = (u8)(mac_high16 >> 8);
484
485 if (is_valid_ether_addr(dev->dev_addr)) {
486 /* eeprom values are valid so use them */
487 smsc_dbg(PROBE, "Mac Address is read from EEPROM");
488 } else {
489 /* eeprom values are invalid, generate random MAC */
490 random_ether_addr(dev->dev_addr);
491 smsc9420_set_mac_address(dev);
492 smsc_dbg(PROBE,
493 "MAC Address is set to random_ether_addr");
494 }
495 }
496}
497
498static void smsc9420_stop_tx(struct smsc9420_pdata *pd)
499{
500 u32 dmac_control, mac_cr, dma_intr_ena;
501 int timeOut = 1000;
502
503 /* disable TX DMAC */
504 dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL);
505 dmac_control &= (~DMAC_CONTROL_ST_);
506 smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control);
507
508 /* Wait max 10ms for transmit process to stop */
509 while (timeOut--) {
510 if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_TS_)
511 break;
512 udelay(10);
513 }
514
515 if (!timeOut)
516 smsc_warn(IFDOWN, "TX DMAC failed to stop");
517
518 /* ACK Tx DMAC stop bit */
519 smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_TXPS_);
520
521 /* mask TX DMAC interrupts */
522 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
523 dma_intr_ena &= ~(DMAC_INTR_ENA_TX_);
524 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
525 smsc9420_pci_flush_write(pd);
526
527 /* stop MAC TX */
528 mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_TXEN_);
529 smsc9420_reg_write(pd, MAC_CR, mac_cr);
530 smsc9420_pci_flush_write(pd);
531}
532
533static void smsc9420_free_tx_ring(struct smsc9420_pdata *pd)
534{
535 int i;
536
537 BUG_ON(!pd->tx_ring);
538
539 if (!pd->tx_buffers)
540 return;
541
542 for (i = 0; i < TX_RING_SIZE; i++) {
543 struct sk_buff *skb = pd->tx_buffers[i].skb;
544
545 if (skb) {
546 BUG_ON(!pd->tx_buffers[i].mapping);
547 pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping,
548 skb->len, PCI_DMA_TODEVICE);
549 dev_kfree_skb_any(skb);
550 }
551
552 pd->tx_ring[i].status = 0;
553 pd->tx_ring[i].length = 0;
554 pd->tx_ring[i].buffer1 = 0;
555 pd->tx_ring[i].buffer2 = 0;
556 }
557 wmb();
558
559 kfree(pd->tx_buffers);
560 pd->tx_buffers = NULL;
561
562 pd->tx_ring_head = 0;
563 pd->tx_ring_tail = 0;
564}
565
566static void smsc9420_free_rx_ring(struct smsc9420_pdata *pd)
567{
568 int i;
569
570 BUG_ON(!pd->rx_ring);
571
572 if (!pd->rx_buffers)
573 return;
574
575 for (i = 0; i < RX_RING_SIZE; i++) {
576 if (pd->rx_buffers[i].skb)
577 dev_kfree_skb_any(pd->rx_buffers[i].skb);
578
579 if (pd->rx_buffers[i].mapping)
580 pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping,
581 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
582
583 pd->rx_ring[i].status = 0;
584 pd->rx_ring[i].length = 0;
585 pd->rx_ring[i].buffer1 = 0;
586 pd->rx_ring[i].buffer2 = 0;
587 }
588 wmb();
589
590 kfree(pd->rx_buffers);
591 pd->rx_buffers = NULL;
592
593 pd->rx_ring_head = 0;
594 pd->rx_ring_tail = 0;
595}
596
597static void smsc9420_stop_rx(struct smsc9420_pdata *pd)
598{
599 int timeOut = 1000;
600 u32 mac_cr, dmac_control, dma_intr_ena;
601
602 /* mask RX DMAC interrupts */
603 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
604 dma_intr_ena &= (~DMAC_INTR_ENA_RX_);
605 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
606 smsc9420_pci_flush_write(pd);
607
608 /* stop RX MAC prior to stoping DMA */
609 mac_cr = smsc9420_reg_read(pd, MAC_CR) & (~MAC_CR_RXEN_);
610 smsc9420_reg_write(pd, MAC_CR, mac_cr);
611 smsc9420_pci_flush_write(pd);
612
613 /* stop RX DMAC */
614 dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL);
615 dmac_control &= (~DMAC_CONTROL_SR_);
616 smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control);
617 smsc9420_pci_flush_write(pd);
618
619 /* wait up to 10ms for receive to stop */
620 while (timeOut--) {
621 if (smsc9420_reg_read(pd, DMAC_STATUS) & DMAC_STS_RS_)
622 break;
623 udelay(10);
624 }
625
626 if (!timeOut)
627 smsc_warn(IFDOWN, "RX DMAC did not stop! timeout.");
628
629 /* ACK the Rx DMAC stop bit */
630 smsc9420_reg_write(pd, DMAC_STATUS, DMAC_STS_RXPS_);
631}
632
633static irqreturn_t smsc9420_isr(int irq, void *dev_id)
634{
635 struct smsc9420_pdata *pd = dev_id;
636 u32 int_cfg, int_sts, int_ctl;
637 irqreturn_t ret = IRQ_NONE;
638 ulong flags;
639
640 BUG_ON(!pd);
641 BUG_ON(!pd->base_addr);
642
643 int_cfg = smsc9420_reg_read(pd, INT_CFG);
644
645 /* check if it's our interrupt */
646 if ((int_cfg & (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_)) !=
647 (INT_CFG_IRQ_EN_ | INT_CFG_IRQ_INT_))
648 return IRQ_NONE;
649
650 int_sts = smsc9420_reg_read(pd, INT_STAT);
651
652 if (likely(INT_STAT_DMAC_INT_ & int_sts)) {
653 u32 status = smsc9420_reg_read(pd, DMAC_STATUS);
654 u32 ints_to_clear = 0;
655
656 if (status & DMAC_STS_TX_) {
657 ints_to_clear |= (DMAC_STS_TX_ | DMAC_STS_NIS_);
658 netif_wake_queue(pd->dev);
659 }
660
661 if (status & DMAC_STS_RX_) {
662 /* mask RX DMAC interrupts */
663 u32 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
664 dma_intr_ena &= (~DMAC_INTR_ENA_RX_);
665 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
666 smsc9420_pci_flush_write(pd);
667
668 ints_to_clear |= (DMAC_STS_RX_ | DMAC_STS_NIS_);
669 netif_rx_schedule(&pd->napi);
670 }
671
672 if (ints_to_clear)
673 smsc9420_reg_write(pd, DMAC_STATUS, ints_to_clear);
674
675 ret = IRQ_HANDLED;
676 }
677
678 if (unlikely(INT_STAT_SW_INT_ & int_sts)) {
679 /* mask software interrupt */
680 spin_lock_irqsave(&pd->int_lock, flags);
681 int_ctl = smsc9420_reg_read(pd, INT_CTL);
682 int_ctl &= (~INT_CTL_SW_INT_EN_);
683 smsc9420_reg_write(pd, INT_CTL, int_ctl);
684 spin_unlock_irqrestore(&pd->int_lock, flags);
685
686 smsc9420_reg_write(pd, INT_STAT, INT_STAT_SW_INT_);
687 pd->software_irq_signal = true;
688 smp_wmb();
689
690 ret = IRQ_HANDLED;
691 }
692
693 /* to ensure PCI write completion, we must perform a PCI read */
694 smsc9420_pci_flush_write(pd);
695
696 return ret;
697}
698
699#ifdef CONFIG_NET_POLL_CONTROLLER
700static void smsc9420_poll_controller(struct net_device *dev)
701{
702 disable_irq(dev->irq);
703 smsc9420_isr(0, dev);
704 enable_irq(dev->irq);
705}
706#endif /* CONFIG_NET_POLL_CONTROLLER */
707
708static void smsc9420_dmac_soft_reset(struct smsc9420_pdata *pd)
709{
710 smsc9420_reg_write(pd, BUS_MODE, BUS_MODE_SWR_);
711 smsc9420_reg_read(pd, BUS_MODE);
712 udelay(2);
713 if (smsc9420_reg_read(pd, BUS_MODE) & BUS_MODE_SWR_)
714 smsc_warn(DRV, "Software reset not cleared");
715}
716
717static int smsc9420_stop(struct net_device *dev)
718{
719 struct smsc9420_pdata *pd = netdev_priv(dev);
720 u32 int_cfg;
721 ulong flags;
722
723 BUG_ON(!pd);
724 BUG_ON(!pd->phy_dev);
725
726 /* disable master interrupt */
727 spin_lock_irqsave(&pd->int_lock, flags);
728 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
729 smsc9420_reg_write(pd, INT_CFG, int_cfg);
730 spin_unlock_irqrestore(&pd->int_lock, flags);
731
732 netif_tx_disable(dev);
733 napi_disable(&pd->napi);
734
735 smsc9420_stop_tx(pd);
736 smsc9420_free_tx_ring(pd);
737
738 smsc9420_stop_rx(pd);
739 smsc9420_free_rx_ring(pd);
740
741 free_irq(dev->irq, pd);
742
743 smsc9420_dmac_soft_reset(pd);
744
745 phy_stop(pd->phy_dev);
746
747 phy_disconnect(pd->phy_dev);
748 pd->phy_dev = NULL;
749 mdiobus_unregister(pd->mii_bus);
750 mdiobus_free(pd->mii_bus);
751
752 return 0;
753}
754
755static void smsc9420_rx_count_stats(struct net_device *dev, u32 desc_status)
756{
757 if (unlikely(desc_status & RDES0_ERROR_SUMMARY_)) {
758 dev->stats.rx_errors++;
759 if (desc_status & RDES0_DESCRIPTOR_ERROR_)
760 dev->stats.rx_over_errors++;
761 else if (desc_status & (RDES0_FRAME_TOO_LONG_ |
762 RDES0_RUNT_FRAME_ | RDES0_COLLISION_SEEN_))
763 dev->stats.rx_frame_errors++;
764 else if (desc_status & RDES0_CRC_ERROR_)
765 dev->stats.rx_crc_errors++;
766 }
767
768 if (unlikely(desc_status & RDES0_LENGTH_ERROR_))
769 dev->stats.rx_length_errors++;
770
771 if (unlikely(!((desc_status & RDES0_LAST_DESCRIPTOR_) &&
772 (desc_status & RDES0_FIRST_DESCRIPTOR_))))
773 dev->stats.rx_length_errors++;
774
775 if (desc_status & RDES0_MULTICAST_FRAME_)
776 dev->stats.multicast++;
777}
778
779static void smsc9420_rx_handoff(struct smsc9420_pdata *pd, const int index,
780 const u32 status)
781{
782 struct net_device *dev = pd->dev;
783 struct sk_buff *skb;
784 u16 packet_length = (status & RDES0_FRAME_LENGTH_MASK_)
785 >> RDES0_FRAME_LENGTH_SHFT_;
786
787 /* remove crc from packet lendth */
788 packet_length -= 4;
789
790 if (pd->rx_csum)
791 packet_length -= 2;
792
793 dev->stats.rx_packets++;
794 dev->stats.rx_bytes += packet_length;
795
796 pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping,
797 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
798 pd->rx_buffers[index].mapping = 0;
799
800 skb = pd->rx_buffers[index].skb;
801 pd->rx_buffers[index].skb = NULL;
802
803 if (pd->rx_csum) {
804 u16 hw_csum = get_unaligned_le16(skb_tail_pointer(skb) +
805 NET_IP_ALIGN + packet_length + 4);
806 put_unaligned_le16(cpu_to_le16(hw_csum), &skb->csum);
807 skb->ip_summed = CHECKSUM_COMPLETE;
808 }
809
810 skb_reserve(skb, NET_IP_ALIGN);
811 skb_put(skb, packet_length);
812
813 skb->protocol = eth_type_trans(skb, dev);
814
815 netif_receive_skb(skb);
816 dev->last_rx = jiffies;
817}
818
819static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
820{
821 struct sk_buff *skb = netdev_alloc_skb(pd->dev, PKT_BUF_SZ);
822 dma_addr_t mapping;
823
824 BUG_ON(pd->rx_buffers[index].skb);
825 BUG_ON(pd->rx_buffers[index].mapping);
826
827 if (unlikely(!skb)) {
828 smsc_warn(RX_ERR, "Failed to allocate new skb!");
829 return -ENOMEM;
830 }
831
832 skb->dev = pd->dev;
833
834 mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
835 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
836 if (pci_dma_mapping_error(pd->pdev, mapping)) {
837 dev_kfree_skb_any(skb);
838 smsc_warn(RX_ERR, "pci_map_single failed!");
839 return -ENOMEM;
840 }
841
842 pd->rx_buffers[index].skb = skb;
843 pd->rx_buffers[index].mapping = mapping;
844 pd->rx_ring[index].buffer1 = mapping + NET_IP_ALIGN;
845 pd->rx_ring[index].status = RDES0_OWN_;
846 wmb();
847
848 return 0;
849}
850
851static void smsc9420_alloc_new_rx_buffers(struct smsc9420_pdata *pd)
852{
853 while (pd->rx_ring_tail != pd->rx_ring_head) {
854 if (smsc9420_alloc_rx_buffer(pd, pd->rx_ring_tail))
855 break;
856
857 pd->rx_ring_tail = (pd->rx_ring_tail + 1) % RX_RING_SIZE;
858 }
859}
860
861static int smsc9420_rx_poll(struct napi_struct *napi, int budget)
862{
863 struct smsc9420_pdata *pd =
864 container_of(napi, struct smsc9420_pdata, napi);
865 struct net_device *dev = pd->dev;
866 u32 drop_frame_cnt, dma_intr_ena, status;
867 int work_done;
868
869 for (work_done = 0; work_done < budget; work_done++) {
870 rmb();
871 status = pd->rx_ring[pd->rx_ring_head].status;
872
873 /* stop if DMAC owns this dma descriptor */
874 if (status & RDES0_OWN_)
875 break;
876
877 smsc9420_rx_count_stats(dev, status);
878 smsc9420_rx_handoff(pd, pd->rx_ring_head, status);
879 pd->rx_ring_head = (pd->rx_ring_head + 1) % RX_RING_SIZE;
880 smsc9420_alloc_new_rx_buffers(pd);
881 }
882
883 drop_frame_cnt = smsc9420_reg_read(pd, MISS_FRAME_CNTR);
884 dev->stats.rx_dropped +=
885 (drop_frame_cnt & 0xFFFF) + ((drop_frame_cnt >> 17) & 0x3FF);
886
887 /* Kick RXDMA */
888 smsc9420_reg_write(pd, RX_POLL_DEMAND, 1);
889 smsc9420_pci_flush_write(pd);
890
891 if (work_done < budget) {
892 netif_rx_complete(&pd->napi);
893
894 /* re-enable RX DMA interrupts */
895 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
896 dma_intr_ena |= (DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_);
897 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
898 smsc9420_pci_flush_write(pd);
899 }
900 return work_done;
901}
902
903static void
904smsc9420_tx_update_stats(struct net_device *dev, u32 status, u32 length)
905{
906 if (unlikely(status & TDES0_ERROR_SUMMARY_)) {
907 dev->stats.tx_errors++;
908 if (status & (TDES0_EXCESSIVE_DEFERRAL_ |
909 TDES0_EXCESSIVE_COLLISIONS_))
910 dev->stats.tx_aborted_errors++;
911
912 if (status & (TDES0_LOSS_OF_CARRIER_ | TDES0_NO_CARRIER_))
913 dev->stats.tx_carrier_errors++;
914 } else {
915 dev->stats.tx_packets++;
916 dev->stats.tx_bytes += (length & 0x7FF);
917 }
918
919 if (unlikely(status & TDES0_EXCESSIVE_COLLISIONS_)) {
920 dev->stats.collisions += 16;
921 } else {
922 dev->stats.collisions +=
923 (status & TDES0_COLLISION_COUNT_MASK_) >>
924 TDES0_COLLISION_COUNT_SHFT_;
925 }
926
927 if (unlikely(status & TDES0_HEARTBEAT_FAIL_))
928 dev->stats.tx_heartbeat_errors++;
929}
930
931/* Check for completed dma transfers, update stats and free skbs */
932static void smsc9420_complete_tx(struct net_device *dev)
933{
934 struct smsc9420_pdata *pd = netdev_priv(dev);
935
936 while (pd->tx_ring_tail != pd->tx_ring_head) {
937 int index = pd->tx_ring_tail;
938 u32 status, length;
939
940 rmb();
941 status = pd->tx_ring[index].status;
942 length = pd->tx_ring[index].length;
943
944 /* Check if DMA still owns this descriptor */
945 if (unlikely(TDES0_OWN_ & status))
946 break;
947
948 smsc9420_tx_update_stats(dev, status, length);
949
950 BUG_ON(!pd->tx_buffers[index].skb);
951 BUG_ON(!pd->tx_buffers[index].mapping);
952
953 pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping,
954 pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE);
955 pd->tx_buffers[index].mapping = 0;
956
957 dev_kfree_skb_any(pd->tx_buffers[index].skb);
958 pd->tx_buffers[index].skb = NULL;
959
960 pd->tx_ring[index].buffer1 = 0;
961 wmb();
962
963 pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE;
964 }
965}
966
967static int smsc9420_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
968{
969 struct smsc9420_pdata *pd = netdev_priv(dev);
970 dma_addr_t mapping;
971 int index = pd->tx_ring_head;
972 u32 tmp_desc1;
973 bool about_to_take_last_desc =
974 (((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail);
975
976 smsc9420_complete_tx(dev);
977
978 rmb();
979 BUG_ON(pd->tx_ring[index].status & TDES0_OWN_);
980 BUG_ON(pd->tx_buffers[index].skb);
981 BUG_ON(pd->tx_buffers[index].mapping);
982
983 mapping = pci_map_single(pd->pdev, skb->data,
984 skb->len, PCI_DMA_TODEVICE);
985 if (pci_dma_mapping_error(pd->pdev, mapping)) {
986 smsc_warn(TX_ERR, "pci_map_single failed, dropping packet");
987 return NETDEV_TX_BUSY;
988 }
989
990 pd->tx_buffers[index].skb = skb;
991 pd->tx_buffers[index].mapping = mapping;
992
993 tmp_desc1 = (TDES1_LS_ | ((u32)skb->len & 0x7FF));
994 if (unlikely(about_to_take_last_desc)) {
995 tmp_desc1 |= TDES1_IC_;
996 netif_stop_queue(pd->dev);
997 }
998
999 /* check if we are at the last descriptor and need to set EOR */
1000 if (unlikely(index == (TX_RING_SIZE - 1)))
1001 tmp_desc1 |= TDES1_TER_;
1002
1003 pd->tx_ring[index].buffer1 = mapping;
1004 pd->tx_ring[index].length = tmp_desc1;
1005 wmb();
1006
1007 /* increment head */
1008 pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE;
1009
1010 /* assign ownership to DMAC */
1011 pd->tx_ring[index].status = TDES0_OWN_;
1012 wmb();
1013
1014 /* kick the DMA */
1015 smsc9420_reg_write(pd, TX_POLL_DEMAND, 1);
1016 smsc9420_pci_flush_write(pd);
1017
1018 dev->trans_start = jiffies;
1019
1020 return NETDEV_TX_OK;
1021}
1022
1023static struct net_device_stats *smsc9420_get_stats(struct net_device *dev)
1024{
1025 struct smsc9420_pdata *pd = netdev_priv(dev);
1026 u32 counter = smsc9420_reg_read(pd, MISS_FRAME_CNTR);
1027 dev->stats.rx_dropped +=
1028 (counter & 0x0000FFFF) + ((counter >> 17) & 0x000003FF);
1029 return &dev->stats;
1030}
1031
1032static void smsc9420_set_multicast_list(struct net_device *dev)
1033{
1034 struct smsc9420_pdata *pd = netdev_priv(dev);
1035 u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
1036
1037 if (dev->flags & IFF_PROMISC) {
1038 smsc_dbg(HW, "Promiscuous Mode Enabled");
1039 mac_cr |= MAC_CR_PRMS_;
1040 mac_cr &= (~MAC_CR_MCPAS_);
1041 mac_cr &= (~MAC_CR_HPFILT_);
1042 } else if (dev->flags & IFF_ALLMULTI) {
1043 smsc_dbg(HW, "Receive all Multicast Enabled");
1044 mac_cr &= (~MAC_CR_PRMS_);
1045 mac_cr |= MAC_CR_MCPAS_;
1046 mac_cr &= (~MAC_CR_HPFILT_);
1047 } else if (dev->mc_count > 0) {
1048 struct dev_mc_list *mc_list = dev->mc_list;
1049 u32 hash_lo = 0, hash_hi = 0;
1050
1051 smsc_dbg(HW, "Multicast filter enabled");
1052 while (mc_list) {
1053 u32 bit_num = smsc9420_hash(mc_list->dmi_addr);
1054 u32 mask = 1 << (bit_num & 0x1F);
1055
1056 if (bit_num & 0x20)
1057 hash_hi |= mask;
1058 else
1059 hash_lo |= mask;
1060
1061 mc_list = mc_list->next;
1062 }
1063 smsc9420_reg_write(pd, HASHH, hash_hi);
1064 smsc9420_reg_write(pd, HASHL, hash_lo);
1065
1066 mac_cr &= (~MAC_CR_PRMS_);
1067 mac_cr &= (~MAC_CR_MCPAS_);
1068 mac_cr |= MAC_CR_HPFILT_;
1069 } else {
1070 smsc_dbg(HW, "Receive own packets only.");
1071 smsc9420_reg_write(pd, HASHH, 0);
1072 smsc9420_reg_write(pd, HASHL, 0);
1073
1074 mac_cr &= (~MAC_CR_PRMS_);
1075 mac_cr &= (~MAC_CR_MCPAS_);
1076 mac_cr &= (~MAC_CR_HPFILT_);
1077 }
1078
1079 smsc9420_reg_write(pd, MAC_CR, mac_cr);
1080 smsc9420_pci_flush_write(pd);
1081}
1082
1083static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd)
1084{
1085 struct phy_device *phy_dev = pd->phy_dev;
1086 u32 flow;
1087
1088 if (phy_dev->duplex == DUPLEX_FULL) {
1089 u16 lcladv = phy_read(phy_dev, MII_ADVERTISE);
1090 u16 rmtadv = phy_read(phy_dev, MII_LPA);
1091 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1092
1093 if (cap & FLOW_CTRL_RX)
1094 flow = 0xFFFF0002;
1095 else
1096 flow = 0;
1097
1098 smsc_info(LINK, "rx pause %s, tx pause %s",
1099 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1100 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1101 } else {
1102 smsc_info(LINK, "half duplex");
1103 flow = 0;
1104 }
1105
1106 smsc9420_reg_write(pd, FLOW, flow);
1107}
1108
1109/* Update link mode if anything has changed. Called periodically when the
1110 * PHY is in polling mode, even if nothing has changed. */
1111static void smsc9420_phy_adjust_link(struct net_device *dev)
1112{
1113 struct smsc9420_pdata *pd = netdev_priv(dev);
1114 struct phy_device *phy_dev = pd->phy_dev;
1115 int carrier;
1116
1117 if (phy_dev->duplex != pd->last_duplex) {
1118 u32 mac_cr = smsc9420_reg_read(pd, MAC_CR);
1119 if (phy_dev->duplex) {
1120 smsc_dbg(LINK, "full duplex mode");
1121 mac_cr |= MAC_CR_FDPX_;
1122 } else {
1123 smsc_dbg(LINK, "half duplex mode");
1124 mac_cr &= ~MAC_CR_FDPX_;
1125 }
1126 smsc9420_reg_write(pd, MAC_CR, mac_cr);
1127
1128 smsc9420_phy_update_flowcontrol(pd);
1129 pd->last_duplex = phy_dev->duplex;
1130 }
1131
1132 carrier = netif_carrier_ok(dev);
1133 if (carrier != pd->last_carrier) {
1134 if (carrier)
1135 smsc_dbg(LINK, "carrier OK");
1136 else
1137 smsc_dbg(LINK, "no carrier");
1138 pd->last_carrier = carrier;
1139 }
1140}
1141
1142static int smsc9420_mii_probe(struct net_device *dev)
1143{
1144 struct smsc9420_pdata *pd = netdev_priv(dev);
1145 struct phy_device *phydev = NULL;
1146
1147 BUG_ON(pd->phy_dev);
1148
1149 /* Device only supports internal PHY at address 1 */
1150 if (!pd->mii_bus->phy_map[1]) {
1151 pr_err("%s: no PHY found at address 1\n", dev->name);
1152 return -ENODEV;
1153 }
1154
1155 phydev = pd->mii_bus->phy_map[1];
1156 smsc_info(PROBE, "PHY addr %d, phy_id 0x%08X", phydev->addr,
1157 phydev->phy_id);
1158
1159 phydev = phy_connect(dev, phydev->dev.bus_id,
1160 &smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII);
1161
1162 if (IS_ERR(phydev)) {
1163 pr_err("%s: Could not attach to PHY\n", dev->name);
1164 return PTR_ERR(phydev);
1165 }
1166
1167 pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1168 dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq);
1169
1170 /* mask with MAC supported features */
1171 phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
1172 SUPPORTED_Asym_Pause);
1173 phydev->advertising = phydev->supported;
1174
1175 pd->phy_dev = phydev;
1176 pd->last_duplex = -1;
1177 pd->last_carrier = -1;
1178
1179 return 0;
1180}
1181
1182static int smsc9420_mii_init(struct net_device *dev)
1183{
1184 struct smsc9420_pdata *pd = netdev_priv(dev);
1185 int err = -ENXIO, i;
1186
1187 pd->mii_bus = mdiobus_alloc();
1188 if (!pd->mii_bus) {
1189 err = -ENOMEM;
1190 goto err_out_1;
1191 }
1192 pd->mii_bus->name = DRV_MDIONAME;
1193 snprintf(pd->mii_bus->id, MII_BUS_ID_SIZE, "%x",
1194 (pd->pdev->bus->number << 8) | pd->pdev->devfn);
1195 pd->mii_bus->priv = pd;
1196 pd->mii_bus->read = smsc9420_mii_read;
1197 pd->mii_bus->write = smsc9420_mii_write;
1198 pd->mii_bus->irq = pd->phy_irq;
1199 for (i = 0; i < PHY_MAX_ADDR; ++i)
1200 pd->mii_bus->irq[i] = PHY_POLL;
1201
1202 /* Mask all PHYs except ID 1 (internal) */
1203 pd->mii_bus->phy_mask = ~(1 << 1);
1204
1205 if (mdiobus_register(pd->mii_bus)) {
1206 smsc_warn(PROBE, "Error registering mii bus");
1207 goto err_out_free_bus_2;
1208 }
1209
1210 if (smsc9420_mii_probe(dev) < 0) {
1211 smsc_warn(PROBE, "Error probing mii bus");
1212 goto err_out_unregister_bus_3;
1213 }
1214
1215 return 0;
1216
1217err_out_unregister_bus_3:
1218 mdiobus_unregister(pd->mii_bus);
1219err_out_free_bus_2:
1220 mdiobus_free(pd->mii_bus);
1221err_out_1:
1222 return err;
1223}
1224
1225static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd)
1226{
1227 int i;
1228
1229 BUG_ON(!pd->tx_ring);
1230
1231 pd->tx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
1232 TX_RING_SIZE), GFP_KERNEL);
1233 if (!pd->tx_buffers) {
1234 smsc_warn(IFUP, "Failed to allocated tx_buffers");
1235 return -ENOMEM;
1236 }
1237
1238 /* Initialize the TX Ring */
1239 for (i = 0; i < TX_RING_SIZE; i++) {
1240 pd->tx_buffers[i].skb = NULL;
1241 pd->tx_buffers[i].mapping = 0;
1242 pd->tx_ring[i].status = 0;
1243 pd->tx_ring[i].length = 0;
1244 pd->tx_ring[i].buffer1 = 0;
1245 pd->tx_ring[i].buffer2 = 0;
1246 }
1247 pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_;
1248 wmb();
1249
1250 pd->tx_ring_head = 0;
1251 pd->tx_ring_tail = 0;
1252
1253 smsc9420_reg_write(pd, TX_BASE_ADDR, pd->tx_dma_addr);
1254 smsc9420_pci_flush_write(pd);
1255
1256 return 0;
1257}
1258
1259static int smsc9420_alloc_rx_ring(struct smsc9420_pdata *pd)
1260{
1261 int i;
1262
1263 BUG_ON(!pd->rx_ring);
1264
1265 pd->rx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
1266 RX_RING_SIZE), GFP_KERNEL);
1267 if (pd->rx_buffers == NULL) {
1268 smsc_warn(IFUP, "Failed to allocated rx_buffers");
1269 goto out;
1270 }
1271
1272 /* initialize the rx ring */
1273 for (i = 0; i < RX_RING_SIZE; i++) {
1274 pd->rx_ring[i].status = 0;
1275 pd->rx_ring[i].length = PKT_BUF_SZ;
1276 pd->rx_ring[i].buffer2 = 0;
1277 pd->rx_buffers[i].skb = NULL;
1278 pd->rx_buffers[i].mapping = 0;
1279 }
1280 pd->rx_ring[RX_RING_SIZE - 1].length = (PKT_BUF_SZ | RDES1_RER_);
1281
1282 /* now allocate the entire ring of skbs */
1283 for (i = 0; i < RX_RING_SIZE; i++) {
1284 if (smsc9420_alloc_rx_buffer(pd, i)) {
1285 smsc_warn(IFUP, "failed to allocate rx skb %d", i);
1286 goto out_free_rx_skbs;
1287 }
1288 }
1289
1290 pd->rx_ring_head = 0;
1291 pd->rx_ring_tail = 0;
1292
1293 smsc9420_reg_write(pd, VLAN1, ETH_P_8021Q);
1294 smsc_dbg(IFUP, "VLAN1 = 0x%08x", smsc9420_reg_read(pd, VLAN1));
1295
1296 if (pd->rx_csum) {
1297 /* Enable RX COE */
1298 u32 coe = smsc9420_reg_read(pd, COE_CR) | RX_COE_EN;
1299 smsc9420_reg_write(pd, COE_CR, coe);
1300 smsc_dbg(IFUP, "COE_CR = 0x%08x", coe);
1301 }
1302
1303 smsc9420_reg_write(pd, RX_BASE_ADDR, pd->rx_dma_addr);
1304 smsc9420_pci_flush_write(pd);
1305
1306 return 0;
1307
1308out_free_rx_skbs:
1309 smsc9420_free_rx_ring(pd);
1310out:
1311 return -ENOMEM;
1312}
1313
1314static int smsc9420_open(struct net_device *dev)
1315{
1316 struct smsc9420_pdata *pd;
1317 u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl;
1318 unsigned long flags;
1319 int result = 0, timeout;
1320
1321 BUG_ON(!dev);
1322 pd = netdev_priv(dev);
1323 BUG_ON(!pd);
1324
1325 if (!is_valid_ether_addr(dev->dev_addr)) {
1326 smsc_warn(IFUP, "dev_addr is not a valid MAC address");
1327 result = -EADDRNOTAVAIL;
1328 goto out_0;
1329 }
1330
1331 netif_carrier_off(dev);
1332
1333 /* disable, mask and acknowlege all interrupts */
1334 spin_lock_irqsave(&pd->int_lock, flags);
1335 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
1336 smsc9420_reg_write(pd, INT_CFG, int_cfg);
1337 smsc9420_reg_write(pd, INT_CTL, 0);
1338 spin_unlock_irqrestore(&pd->int_lock, flags);
1339 smsc9420_reg_write(pd, DMAC_INTR_ENA, 0);
1340 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
1341 smsc9420_pci_flush_write(pd);
1342
1343 if (request_irq(dev->irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
1344 DRV_NAME, pd)) {
1345 smsc_warn(IFUP, "Unable to use IRQ = %d", dev->irq);
1346 result = -ENODEV;
1347 goto out_0;
1348 }
1349
1350 smsc9420_dmac_soft_reset(pd);
1351
1352 /* make sure MAC_CR is sane */
1353 smsc9420_reg_write(pd, MAC_CR, 0);
1354
1355 smsc9420_set_mac_address(dev);
1356
1357 /* Configure GPIO pins to drive LEDs */
1358 smsc9420_reg_write(pd, GPIO_CFG,
1359 (GPIO_CFG_LED_3_ | GPIO_CFG_LED_2_ | GPIO_CFG_LED_1_));
1360
1361 bus_mode = BUS_MODE_DMA_BURST_LENGTH_16;
1362
1363#ifdef __BIG_ENDIAN
1364 bus_mode |= BUS_MODE_DBO_;
1365#endif
1366
1367 smsc9420_reg_write(pd, BUS_MODE, bus_mode);
1368
1369 smsc9420_pci_flush_write(pd);
1370
1371 /* set bus master bridge arbitration priority for Rx and TX DMA */
1372 smsc9420_reg_write(pd, BUS_CFG, BUS_CFG_RXTXWEIGHT_4_1);
1373
1374 smsc9420_reg_write(pd, DMAC_CONTROL,
1375 (DMAC_CONTROL_SF_ | DMAC_CONTROL_OSF_));
1376
1377 smsc9420_pci_flush_write(pd);
1378
1379 /* test the IRQ connection to the ISR */
1380 smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq);
1381
1382 spin_lock_irqsave(&pd->int_lock, flags);
1383 /* configure interrupt deassertion timer and enable interrupts */
1384 int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_;
1385 int_cfg &= ~(INT_CFG_INT_DEAS_MASK);
1386 int_cfg |= (INT_DEAS_TIME & INT_CFG_INT_DEAS_MASK);
1387 smsc9420_reg_write(pd, INT_CFG, int_cfg);
1388
1389 /* unmask software interrupt */
1390 int_ctl = smsc9420_reg_read(pd, INT_CTL) | INT_CTL_SW_INT_EN_;
1391 smsc9420_reg_write(pd, INT_CTL, int_ctl);
1392 spin_unlock_irqrestore(&pd->int_lock, flags);
1393 smsc9420_pci_flush_write(pd);
1394
1395 timeout = 1000;
1396 pd->software_irq_signal = false;
1397 smp_wmb();
1398 while (timeout--) {
1399 if (pd->software_irq_signal)
1400 break;
1401 msleep(1);
1402 }
1403
1404 /* disable interrupts */
1405 spin_lock_irqsave(&pd->int_lock, flags);
1406 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
1407 smsc9420_reg_write(pd, INT_CFG, int_cfg);
1408 spin_unlock_irqrestore(&pd->int_lock, flags);
1409
1410 if (!pd->software_irq_signal) {
1411 smsc_warn(IFUP, "ISR failed signaling test");
1412 result = -ENODEV;
1413 goto out_free_irq_1;
1414 }
1415
1416 smsc_dbg(IFUP, "ISR passed test using IRQ %d", dev->irq);
1417
1418 result = smsc9420_alloc_tx_ring(pd);
1419 if (result) {
1420 smsc_warn(IFUP, "Failed to Initialize tx dma ring");
1421 result = -ENOMEM;
1422 goto out_free_irq_1;
1423 }
1424
1425 result = smsc9420_alloc_rx_ring(pd);
1426 if (result) {
1427 smsc_warn(IFUP, "Failed to Initialize rx dma ring");
1428 result = -ENOMEM;
1429 goto out_free_tx_ring_2;
1430 }
1431
1432 result = smsc9420_mii_init(dev);
1433 if (result) {
1434 smsc_warn(IFUP, "Failed to initialize Phy");
1435 result = -ENODEV;
1436 goto out_free_rx_ring_3;
1437 }
1438
1439 /* Bring the PHY up */
1440 phy_start(pd->phy_dev);
1441
1442 napi_enable(&pd->napi);
1443
1444 /* start tx and rx */
1445 mac_cr = smsc9420_reg_read(pd, MAC_CR) | MAC_CR_TXEN_ | MAC_CR_RXEN_;
1446 smsc9420_reg_write(pd, MAC_CR, mac_cr);
1447
1448 dmac_control = smsc9420_reg_read(pd, DMAC_CONTROL);
1449 dmac_control |= DMAC_CONTROL_ST_ | DMAC_CONTROL_SR_;
1450 smsc9420_reg_write(pd, DMAC_CONTROL, dmac_control);
1451 smsc9420_pci_flush_write(pd);
1452
1453 dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
1454 dma_intr_ena |=
1455 (DMAC_INTR_ENA_TX_ | DMAC_INTR_ENA_RX_ | DMAC_INTR_ENA_NIS_);
1456 smsc9420_reg_write(pd, DMAC_INTR_ENA, dma_intr_ena);
1457 smsc9420_pci_flush_write(pd);
1458
1459 netif_wake_queue(dev);
1460
1461 smsc9420_reg_write(pd, RX_POLL_DEMAND, 1);
1462
1463 /* enable interrupts */
1464 spin_lock_irqsave(&pd->int_lock, flags);
1465 int_cfg = smsc9420_reg_read(pd, INT_CFG) | INT_CFG_IRQ_EN_;
1466 smsc9420_reg_write(pd, INT_CFG, int_cfg);
1467 spin_unlock_irqrestore(&pd->int_lock, flags);
1468
1469 return 0;
1470
1471out_free_rx_ring_3:
1472 smsc9420_free_rx_ring(pd);
1473out_free_tx_ring_2:
1474 smsc9420_free_tx_ring(pd);
1475out_free_irq_1:
1476 free_irq(dev->irq, pd);
1477out_0:
1478 return result;
1479}
1480
1481#ifdef CONFIG_PM
1482
1483static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state)
1484{
1485 struct net_device *dev = pci_get_drvdata(pdev);
1486 struct smsc9420_pdata *pd = netdev_priv(dev);
1487 u32 int_cfg;
1488 ulong flags;
1489
1490 /* disable interrupts */
1491 spin_lock_irqsave(&pd->int_lock, flags);
1492 int_cfg = smsc9420_reg_read(pd, INT_CFG) & (~INT_CFG_IRQ_EN_);
1493 smsc9420_reg_write(pd, INT_CFG, int_cfg);
1494 spin_unlock_irqrestore(&pd->int_lock, flags);
1495
1496 if (netif_running(dev)) {
1497 netif_tx_disable(dev);
1498 smsc9420_stop_tx(pd);
1499 smsc9420_free_tx_ring(pd);
1500
1501 napi_disable(&pd->napi);
1502 smsc9420_stop_rx(pd);
1503 smsc9420_free_rx_ring(pd);
1504
1505 free_irq(dev->irq, pd);
1506
1507 netif_device_detach(dev);
1508 }
1509
1510 pci_save_state(pdev);
1511 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
1512 pci_disable_device(pdev);
1513 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1514
1515 return 0;
1516}
1517
1518static int smsc9420_resume(struct pci_dev *pdev)
1519{
1520 struct net_device *dev = pci_get_drvdata(pdev);
1521 struct smsc9420_pdata *pd = netdev_priv(dev);
1522 int err;
1523
1524 pci_set_power_state(pdev, PCI_D0);
1525 pci_restore_state(pdev);
1526
1527 err = pci_enable_device(pdev);
1528 if (err)
1529 return err;
1530
1531 pci_set_master(pdev);
1532
1533 err = pci_enable_wake(pdev, 0, 0);
1534 if (err)
1535 smsc_warn(IFUP, "pci_enable_wake failed: %d", err);
1536
1537 if (netif_running(dev)) {
1538 err = smsc9420_open(dev);
1539 netif_device_attach(dev);
1540 }
1541 return err;
1542}
1543
1544#endif /* CONFIG_PM */
1545
1546static const struct net_device_ops smsc9420_netdev_ops = {
1547 .ndo_open = smsc9420_open,
1548 .ndo_stop = smsc9420_stop,
1549 .ndo_start_xmit = smsc9420_hard_start_xmit,
1550 .ndo_get_stats = smsc9420_get_stats,
1551 .ndo_set_multicast_list = smsc9420_set_multicast_list,
1552 .ndo_do_ioctl = smsc9420_do_ioctl,
1553 .ndo_validate_addr = eth_validate_addr,
1554#ifdef CONFIG_NET_POLL_CONTROLLER
1555 .ndo_poll_controller = smsc9420_poll_controller,
1556#endif /* CONFIG_NET_POLL_CONTROLLER */
1557};
1558
1559static int __devinit
1560smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1561{
1562 struct net_device *dev;
1563 struct smsc9420_pdata *pd;
1564 void __iomem *virt_addr;
1565 int result = 0;
1566 u32 id_rev;
1567
1568 printk(KERN_INFO DRV_DESCRIPTION " version " DRV_VERSION "\n");
1569
1570 /* First do the PCI initialisation */
1571 result = pci_enable_device(pdev);
1572 if (unlikely(result)) {
1573 printk(KERN_ERR "Cannot enable smsc9420\n");
1574 goto out_0;
1575 }
1576
1577 pci_set_master(pdev);
1578
1579 dev = alloc_etherdev(sizeof(*pd));
1580 if (!dev) {
1581 printk(KERN_ERR "ether device alloc failed\n");
1582 goto out_disable_pci_device_1;
1583 }
1584
1585 SET_NETDEV_DEV(dev, &pdev->dev);
1586
1587 if (!(pci_resource_flags(pdev, SMSC_BAR) & IORESOURCE_MEM)) {
1588 printk(KERN_ERR "Cannot find PCI device base address\n");
1589 goto out_free_netdev_2;
1590 }
1591
1592 if ((pci_request_regions(pdev, DRV_NAME))) {
1593 printk(KERN_ERR "Cannot obtain PCI resources, aborting.\n");
1594 goto out_free_netdev_2;
1595 }
1596
1597 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1598 printk(KERN_ERR "No usable DMA configuration, aborting.\n");
1599 goto out_free_regions_3;
1600 }
1601
1602 virt_addr = ioremap(pci_resource_start(pdev, SMSC_BAR),
1603 pci_resource_len(pdev, SMSC_BAR));
1604 if (!virt_addr) {
1605 printk(KERN_ERR "Cannot map device registers, aborting.\n");
1606 goto out_free_regions_3;
1607 }
1608
1609 /* registers are double mapped with 0 offset for LE and 0x200 for BE */
1610 virt_addr += LAN9420_CPSR_ENDIAN_OFFSET;
1611
1612 dev->base_addr = (ulong)virt_addr;
1613
1614 pd = netdev_priv(dev);
1615
1616 /* pci descriptors are created in the PCI consistent area */
1617 pd->rx_ring = pci_alloc_consistent(pdev,
1618 sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE +
1619 sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE,
1620 &pd->rx_dma_addr);
1621
1622 if (!pd->rx_ring)
1623 goto out_free_io_4;
1624
1625 /* descriptors are aligned due to the nature of pci_alloc_consistent */
1626 pd->tx_ring = (struct smsc9420_dma_desc *)
1627 (pd->rx_ring + RX_RING_SIZE);
1628 pd->tx_dma_addr = pd->rx_dma_addr +
1629 sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
1630
1631 pd->pdev = pdev;
1632 pd->dev = dev;
1633 pd->base_addr = virt_addr;
1634 pd->msg_enable = smsc_debug;
1635 pd->rx_csum = true;
1636
1637 smsc_dbg(PROBE, "lan_base=0x%08lx", (ulong)virt_addr);
1638
1639 id_rev = smsc9420_reg_read(pd, ID_REV);
1640 switch (id_rev & 0xFFFF0000) {
1641 case 0x94200000:
1642 smsc_info(PROBE, "LAN9420 identified, ID_REV=0x%08X", id_rev);
1643 break;
1644 default:
1645 smsc_warn(PROBE, "LAN9420 NOT identified");
1646 smsc_warn(PROBE, "ID_REV=0x%08X", id_rev);
1647 goto out_free_dmadesc_5;
1648 }
1649
1650 smsc9420_dmac_soft_reset(pd);
1651 smsc9420_eeprom_reload(pd);
1652 smsc9420_check_mac_address(dev);
1653
1654 dev->netdev_ops = &smsc9420_netdev_ops;
1655 dev->ethtool_ops = &smsc9420_ethtool_ops;
1656 dev->irq = pdev->irq;
1657
1658 netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT);
1659
1660 result = register_netdev(dev);
1661 if (result) {
1662 smsc_warn(PROBE, "error %i registering device", result);
1663 goto out_free_dmadesc_5;
1664 }
1665
1666 pci_set_drvdata(pdev, dev);
1667
1668 spin_lock_init(&pd->int_lock);
1669 spin_lock_init(&pd->phy_lock);
1670
1671 dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr);
1672
1673 return 0;
1674
1675out_free_dmadesc_5:
1676 pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
1677 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
1678out_free_io_4:
1679 iounmap(virt_addr - LAN9420_CPSR_ENDIAN_OFFSET);
1680out_free_regions_3:
1681 pci_release_regions(pdev);
1682out_free_netdev_2:
1683 free_netdev(dev);
1684out_disable_pci_device_1:
1685 pci_disable_device(pdev);
1686out_0:
1687 return -ENODEV;
1688}
1689
1690static void __devexit smsc9420_remove(struct pci_dev *pdev)
1691{
1692 struct net_device *dev;
1693 struct smsc9420_pdata *pd;
1694
1695 dev = pci_get_drvdata(pdev);
1696 if (!dev)
1697 return;
1698
1699 pci_set_drvdata(pdev, NULL);
1700
1701 pd = netdev_priv(dev);
1702 unregister_netdev(dev);
1703
1704 /* tx_buffers and rx_buffers are freed in stop */
1705 BUG_ON(pd->tx_buffers);
1706 BUG_ON(pd->rx_buffers);
1707
1708 BUG_ON(!pd->tx_ring);
1709 BUG_ON(!pd->rx_ring);
1710
1711 pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
1712 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
1713
1714 iounmap(pd->base_addr - LAN9420_CPSR_ENDIAN_OFFSET);
1715 pci_release_regions(pdev);
1716 free_netdev(dev);
1717 pci_disable_device(pdev);
1718}
1719
1720static struct pci_driver smsc9420_driver = {
1721 .name = DRV_NAME,
1722 .id_table = smsc9420_id_table,
1723 .probe = smsc9420_probe,
1724 .remove = __devexit_p(smsc9420_remove),
1725#ifdef CONFIG_PM
1726 .suspend = smsc9420_suspend,
1727 .resume = smsc9420_resume,
1728#endif /* CONFIG_PM */
1729};
1730
1731static int __init smsc9420_init_module(void)
1732{
1733 smsc_debug = netif_msg_init(debug, SMSC_MSG_DEFAULT);
1734
1735 return pci_register_driver(&smsc9420_driver);
1736}
1737
1738static void __exit smsc9420_exit_module(void)
1739{
1740 pci_unregister_driver(&smsc9420_driver);
1741}
1742
1743module_init(smsc9420_init_module);
1744module_exit(smsc9420_exit_module);
diff --git a/drivers/net/smsc9420.h b/drivers/net/smsc9420.h
new file mode 100644
index 000000000000..69c351f93f86
--- /dev/null
+++ b/drivers/net/smsc9420.h
@@ -0,0 +1,275 @@
1 /***************************************************************************
2 *
3 * Copyright (C) 2007,2008 SMSC
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 ***************************************************************************
20 */
21
22#ifndef _SMSC9420_H
23#define _SMSC9420_H
24
25#define TX_RING_SIZE (32)
26#define RX_RING_SIZE (128)
27
28/* interrupt deassertion in multiples of 10us */
29#define INT_DEAS_TIME (50)
30
31#define NAPI_WEIGHT (64)
32#define SMSC_BAR (3)
33
34#ifdef __BIG_ENDIAN
35/* Register set is duplicated for BE at an offset of 0x200 */
36#define LAN9420_CPSR_ENDIAN_OFFSET (0x200)
37#else
38#define LAN9420_CPSR_ENDIAN_OFFSET (0)
39#endif
40
41#define PCI_VENDOR_ID_9420 (0x1055)
42#define PCI_DEVICE_ID_9420 (0xE420)
43
44#define LAN_REGISTER_EXTENT (0x400)
45
46#define SMSC9420_EEPROM_SIZE ((u32)11)
47
48#define PKT_BUF_SZ (VLAN_ETH_FRAME_LEN + NET_IP_ALIGN + 4)
49
50/***********************************************/
51/* DMA Controller Control and Status Registers */
52/***********************************************/
53#define BUS_MODE (0x00)
54#define BUS_MODE_SWR_ (BIT(0))
55#define BUS_MODE_DMA_BURST_LENGTH_1 (BIT(8))
56#define BUS_MODE_DMA_BURST_LENGTH_2 (BIT(9))
57#define BUS_MODE_DMA_BURST_LENGTH_4 (BIT(10))
58#define BUS_MODE_DMA_BURST_LENGTH_8 (BIT(11))
59#define BUS_MODE_DMA_BURST_LENGTH_16 (BIT(12))
60#define BUS_MODE_DMA_BURST_LENGTH_32 (BIT(13))
61#define BUS_MODE_DBO_ (BIT(20))
62
63#define TX_POLL_DEMAND (0x04)
64
65#define RX_POLL_DEMAND (0x08)
66
67#define RX_BASE_ADDR (0x0C)
68
69#define TX_BASE_ADDR (0x10)
70
71#define DMAC_STATUS (0x14)
72#define DMAC_STS_TS_ (7 << 20)
73#define DMAC_STS_RS_ (7 << 17)
74#define DMAC_STS_NIS_ (BIT(16))
75#define DMAC_STS_AIS_ (BIT(15))
76#define DMAC_STS_RWT_ (BIT(9))
77#define DMAC_STS_RXPS_ (BIT(8))
78#define DMAC_STS_RXBU_ (BIT(7))
79#define DMAC_STS_RX_ (BIT(6))
80#define DMAC_STS_TXUNF_ (BIT(5))
81#define DMAC_STS_TXBU_ (BIT(2))
82#define DMAC_STS_TXPS_ (BIT(1))
83#define DMAC_STS_TX_ (BIT(0))
84
85#define DMAC_CONTROL (0x18)
86#define DMAC_CONTROL_TTM_ (BIT(22))
87#define DMAC_CONTROL_SF_ (BIT(21))
88#define DMAC_CONTROL_ST_ (BIT(13))
89#define DMAC_CONTROL_OSF_ (BIT(2))
90#define DMAC_CONTROL_SR_ (BIT(1))
91
92#define DMAC_INTR_ENA (0x1C)
93#define DMAC_INTR_ENA_NIS_ (BIT(16))
94#define DMAC_INTR_ENA_AIS_ (BIT(15))
95#define DMAC_INTR_ENA_RWT_ (BIT(9))
96#define DMAC_INTR_ENA_RXPS_ (BIT(8))
97#define DMAC_INTR_ENA_RXBU_ (BIT(7))
98#define DMAC_INTR_ENA_RX_ (BIT(6))
99#define DMAC_INTR_ENA_TXBU_ (BIT(2))
100#define DMAC_INTR_ENA_TXPS_ (BIT(1))
101#define DMAC_INTR_ENA_TX_ (BIT(0))
102
103#define MISS_FRAME_CNTR (0x20)
104
105#define TX_BUFF_ADDR (0x50)
106
107#define RX_BUFF_ADDR (0x54)
108
109/* Transmit Descriptor Bit Defs */
110#define TDES0_OWN_ (0x80000000)
111#define TDES0_ERROR_SUMMARY_ (0x00008000)
112#define TDES0_LOSS_OF_CARRIER_ (0x00000800)
113#define TDES0_NO_CARRIER_ (0x00000400)
114#define TDES0_LATE_COLLISION_ (0x00000200)
115#define TDES0_EXCESSIVE_COLLISIONS_ (0x00000100)
116#define TDES0_HEARTBEAT_FAIL_ (0x00000080)
117#define TDES0_COLLISION_COUNT_MASK_ (0x00000078)
118#define TDES0_COLLISION_COUNT_SHFT_ (3)
119#define TDES0_EXCESSIVE_DEFERRAL_ (0x00000004)
120#define TDES0_DEFERRED_ (0x00000001)
121
122#define TDES1_IC_ 0x80000000
123#define TDES1_LS_ 0x40000000
124#define TDES1_FS_ 0x20000000
125#define TDES1_TXCSEN_ 0x08000000
126#define TDES1_TER_ (BIT(25))
127#define TDES1_TCH_ 0x01000000
128
129/* Receive Descriptor 0 Bit Defs */
130#define RDES0_OWN_ (0x80000000)
131#define RDES0_FRAME_LENGTH_MASK_ (0x07FF0000)
132#define RDES0_FRAME_LENGTH_SHFT_ (16)
133#define RDES0_ERROR_SUMMARY_ (0x00008000)
134#define RDES0_DESCRIPTOR_ERROR_ (0x00004000)
135#define RDES0_LENGTH_ERROR_ (0x00001000)
136#define RDES0_RUNT_FRAME_ (0x00000800)
137#define RDES0_MULTICAST_FRAME_ (0x00000400)
138#define RDES0_FIRST_DESCRIPTOR_ (0x00000200)
139#define RDES0_LAST_DESCRIPTOR_ (0x00000100)
140#define RDES0_FRAME_TOO_LONG_ (0x00000080)
141#define RDES0_COLLISION_SEEN_ (0x00000040)
142#define RDES0_FRAME_TYPE_ (0x00000020)
143#define RDES0_WATCHDOG_TIMEOUT_ (0x00000010)
144#define RDES0_MII_ERROR_ (0x00000008)
145#define RDES0_DRIBBLING_BIT_ (0x00000004)
146#define RDES0_CRC_ERROR_ (0x00000002)
147
148/* Receive Descriptor 1 Bit Defs */
149#define RDES1_RER_ (0x02000000)
150
151/***********************************************/
152/* MAC Control and Status Registers */
153/***********************************************/
154#define MAC_CR (0x80)
155#define MAC_CR_RXALL_ (0x80000000)
156#define MAC_CR_DIS_RXOWN_ (0x00800000)
157#define MAC_CR_LOOPBK_ (0x00200000)
158#define MAC_CR_FDPX_ (0x00100000)
159#define MAC_CR_MCPAS_ (0x00080000)
160#define MAC_CR_PRMS_ (0x00040000)
161#define MAC_CR_INVFILT_ (0x00020000)
162#define MAC_CR_PASSBAD_ (0x00010000)
163#define MAC_CR_HFILT_ (0x00008000)
164#define MAC_CR_HPFILT_ (0x00002000)
165#define MAC_CR_LCOLL_ (0x00001000)
166#define MAC_CR_DIS_BCAST_ (0x00000800)
167#define MAC_CR_DIS_RTRY_ (0x00000400)
168#define MAC_CR_PADSTR_ (0x00000100)
169#define MAC_CR_BOLMT_MSK (0x000000C0)
170#define MAC_CR_MFCHK_ (0x00000020)
171#define MAC_CR_TXEN_ (0x00000008)
172#define MAC_CR_RXEN_ (0x00000004)
173
174#define ADDRH (0x84)
175
176#define ADDRL (0x88)
177
178#define HASHH (0x8C)
179
180#define HASHL (0x90)
181
182#define MII_ACCESS (0x94)
183#define MII_ACCESS_MII_BUSY_ (0x00000001)
184#define MII_ACCESS_MII_WRITE_ (0x00000002)
185#define MII_ACCESS_MII_READ_ (0x00000000)
186#define MII_ACCESS_INDX_MSK_ (0x000007C0)
187#define MII_ACCESS_PHYADDR_MSK_ (0x0000F8C0)
188#define MII_ACCESS_INDX_SHFT_CNT (6)
189#define MII_ACCESS_PHYADDR_SHFT_CNT (11)
190
191#define MII_DATA (0x98)
192
193#define FLOW (0x9C)
194
195#define VLAN1 (0xA0)
196
197#define VLAN2 (0xA4)
198
199#define WUFF (0xA8)
200
201#define WUCSR (0xAC)
202
203#define COE_CR (0xB0)
204#define TX_COE_EN (0x00010000)
205#define RX_COE_MODE (0x00000002)
206#define RX_COE_EN (0x00000001)
207
208/***********************************************/
209/* System Control and Status Registers */
210/***********************************************/
211#define ID_REV (0xC0)
212
213#define INT_CTL (0xC4)
214#define INT_CTL_SW_INT_EN_ (0x00008000)
215#define INT_CTL_SBERR_INT_EN_ (1 << 12)
216#define INT_CTL_MBERR_INT_EN_ (1 << 13)
217#define INT_CTL_GPT_INT_EN_ (0x00000008)
218#define INT_CTL_PHY_INT_EN_ (0x00000004)
219#define INT_CTL_WAKE_INT_EN_ (0x00000002)
220
221#define INT_STAT (0xC8)
222#define INT_STAT_SW_INT_ (1 << 15)
223#define INT_STAT_MBERR_INT_ (1 << 13)
224#define INT_STAT_SBERR_INT_ (1 << 12)
225#define INT_STAT_GPT_INT_ (1 << 3)
226#define INT_STAT_PHY_INT_ (0x00000004)
227#define INT_STAT_WAKE_INT_ (0x00000002)
228#define INT_STAT_DMAC_INT_ (0x00000001)
229
230#define INT_CFG (0xCC)
231#define INT_CFG_IRQ_INT_ (0x00080000)
232#define INT_CFG_IRQ_EN_ (0x00040000)
233#define INT_CFG_INT_DEAS_CLR_ (0x00000200)
234#define INT_CFG_INT_DEAS_MASK (0x000000FF)
235
236#define GPIO_CFG (0xD0)
237#define GPIO_CFG_LED_3_ (0x40000000)
238#define GPIO_CFG_LED_2_ (0x20000000)
239#define GPIO_CFG_LED_1_ (0x10000000)
240#define GPIO_CFG_EEPR_EN_ (0x00700000)
241
242#define GPT_CFG (0xD4)
243#define GPT_CFG_TIMER_EN_ (0x20000000)
244
245#define GPT_CNT (0xD8)
246
247#define BUS_CFG (0xDC)
248#define BUS_CFG_RXTXWEIGHT_1_1 (0 << 25)
249#define BUS_CFG_RXTXWEIGHT_2_1 (1 << 25)
250#define BUS_CFG_RXTXWEIGHT_3_1 (2 << 25)
251#define BUS_CFG_RXTXWEIGHT_4_1 (3 << 25)
252
253#define PMT_CTRL (0xE0)
254
255#define FREE_RUN (0xF4)
256
257#define E2P_CMD (0xF8)
258#define E2P_CMD_EPC_BUSY_ (0x80000000)
259#define E2P_CMD_EPC_CMD_ (0x70000000)
260#define E2P_CMD_EPC_CMD_READ_ (0x00000000)
261#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000)
262#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000)
263#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000)
264#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000)
265#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000)
266#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000)
267#define E2P_CMD_EPC_CMD_RELOAD_ (0x70000000)
268#define E2P_CMD_EPC_TIMEOUT_ (0x00000200)
269#define E2P_CMD_MAC_ADDR_LOADED_ (0x00000100)
270#define E2P_CMD_EPC_ADDR_ (0x000000FF)
271
272#define E2P_DATA (0xFC)
273#define E2P_DATA_EEPROM_DATA_ (0x000000FF)
274
275#endif /* _SMSC9420_H */
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 8069f3e32d83..211e805c1223 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -450,7 +450,6 @@ static void sonic_rx(struct net_device *dev)
450 skb_trim(used_skb, pkt_len); 450 skb_trim(used_skb, pkt_len);
451 used_skb->protocol = eth_type_trans(used_skb, dev); 451 used_skb->protocol = eth_type_trans(used_skb, dev);
452 netif_rx(used_skb); 452 netif_rx(used_skb);
453 dev->last_rx = jiffies;
454 lp->stats.rx_packets++; 453 lp->stats.rx_packets++;
455 lp->stats.rx_bytes += pkt_len; 454 lp->stats.rx_bytes += pkt_len;
456 455
diff --git a/drivers/net/sonic.h b/drivers/net/sonic.h
index 7db13e4a7ea5..07091dd27e5d 100644
--- a/drivers/net/sonic.h
+++ b/drivers/net/sonic.h
@@ -371,7 +371,7 @@ static inline __u16 sonic_buf_get(void* base, int bitmode,
371static inline void sonic_cda_put(struct net_device* dev, int entry, 371static inline void sonic_cda_put(struct net_device* dev, int entry,
372 int offset, __u16 val) 372 int offset, __u16 val)
373{ 373{
374 struct sonic_local* lp = (struct sonic_local *) dev->priv; 374 struct sonic_local *lp = netdev_priv(dev);
375 sonic_buf_put(lp->cda, lp->dma_bitmode, 375 sonic_buf_put(lp->cda, lp->dma_bitmode,
376 (entry * SIZEOF_SONIC_CD) + offset, val); 376 (entry * SIZEOF_SONIC_CD) + offset, val);
377} 377}
@@ -379,27 +379,27 @@ static inline void sonic_cda_put(struct net_device* dev, int entry,
379static inline __u16 sonic_cda_get(struct net_device* dev, int entry, 379static inline __u16 sonic_cda_get(struct net_device* dev, int entry,
380 int offset) 380 int offset)
381{ 381{
382 struct sonic_local* lp = (struct sonic_local *) dev->priv; 382 struct sonic_local *lp = netdev_priv(dev);
383 return sonic_buf_get(lp->cda, lp->dma_bitmode, 383 return sonic_buf_get(lp->cda, lp->dma_bitmode,
384 (entry * SIZEOF_SONIC_CD) + offset); 384 (entry * SIZEOF_SONIC_CD) + offset);
385} 385}
386 386
387static inline void sonic_set_cam_enable(struct net_device* dev, __u16 val) 387static inline void sonic_set_cam_enable(struct net_device* dev, __u16 val)
388{ 388{
389 struct sonic_local* lp = (struct sonic_local *) dev->priv; 389 struct sonic_local *lp = netdev_priv(dev);
390 sonic_buf_put(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE, val); 390 sonic_buf_put(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE, val);
391} 391}
392 392
393static inline __u16 sonic_get_cam_enable(struct net_device* dev) 393static inline __u16 sonic_get_cam_enable(struct net_device* dev)
394{ 394{
395 struct sonic_local* lp = (struct sonic_local *) dev->priv; 395 struct sonic_local *lp = netdev_priv(dev);
396 return sonic_buf_get(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE); 396 return sonic_buf_get(lp->cda, lp->dma_bitmode, SONIC_CDA_CAM_ENABLE);
397} 397}
398 398
399static inline void sonic_tda_put(struct net_device* dev, int entry, 399static inline void sonic_tda_put(struct net_device* dev, int entry,
400 int offset, __u16 val) 400 int offset, __u16 val)
401{ 401{
402 struct sonic_local* lp = (struct sonic_local *) dev->priv; 402 struct sonic_local *lp = netdev_priv(dev);
403 sonic_buf_put(lp->tda, lp->dma_bitmode, 403 sonic_buf_put(lp->tda, lp->dma_bitmode,
404 (entry * SIZEOF_SONIC_TD) + offset, val); 404 (entry * SIZEOF_SONIC_TD) + offset, val);
405} 405}
@@ -407,7 +407,7 @@ static inline void sonic_tda_put(struct net_device* dev, int entry,
407static inline __u16 sonic_tda_get(struct net_device* dev, int entry, 407static inline __u16 sonic_tda_get(struct net_device* dev, int entry,
408 int offset) 408 int offset)
409{ 409{
410 struct sonic_local* lp = (struct sonic_local *) dev->priv; 410 struct sonic_local *lp = netdev_priv(dev);
411 return sonic_buf_get(lp->tda, lp->dma_bitmode, 411 return sonic_buf_get(lp->tda, lp->dma_bitmode,
412 (entry * SIZEOF_SONIC_TD) + offset); 412 (entry * SIZEOF_SONIC_TD) + offset);
413} 413}
@@ -415,7 +415,7 @@ static inline __u16 sonic_tda_get(struct net_device* dev, int entry,
415static inline void sonic_rda_put(struct net_device* dev, int entry, 415static inline void sonic_rda_put(struct net_device* dev, int entry,
416 int offset, __u16 val) 416 int offset, __u16 val)
417{ 417{
418 struct sonic_local* lp = (struct sonic_local *) dev->priv; 418 struct sonic_local *lp = netdev_priv(dev);
419 sonic_buf_put(lp->rda, lp->dma_bitmode, 419 sonic_buf_put(lp->rda, lp->dma_bitmode,
420 (entry * SIZEOF_SONIC_RD) + offset, val); 420 (entry * SIZEOF_SONIC_RD) + offset, val);
421} 421}
@@ -423,7 +423,7 @@ static inline void sonic_rda_put(struct net_device* dev, int entry,
423static inline __u16 sonic_rda_get(struct net_device* dev, int entry, 423static inline __u16 sonic_rda_get(struct net_device* dev, int entry,
424 int offset) 424 int offset)
425{ 425{
426 struct sonic_local* lp = (struct sonic_local *) dev->priv; 426 struct sonic_local *lp = netdev_priv(dev);
427 return sonic_buf_get(lp->rda, lp->dma_bitmode, 427 return sonic_buf_get(lp->rda, lp->dma_bitmode,
428 (entry * SIZEOF_SONIC_RD) + offset); 428 (entry * SIZEOF_SONIC_RD) + offset);
429} 429}
@@ -431,7 +431,7 @@ static inline __u16 sonic_rda_get(struct net_device* dev, int entry,
431static inline void sonic_rra_put(struct net_device* dev, int entry, 431static inline void sonic_rra_put(struct net_device* dev, int entry,
432 int offset, __u16 val) 432 int offset, __u16 val)
433{ 433{
434 struct sonic_local* lp = (struct sonic_local *) dev->priv; 434 struct sonic_local *lp = netdev_priv(dev);
435 sonic_buf_put(lp->rra, lp->dma_bitmode, 435 sonic_buf_put(lp->rra, lp->dma_bitmode,
436 (entry * SIZEOF_SONIC_RR) + offset, val); 436 (entry * SIZEOF_SONIC_RR) + offset, val);
437} 437}
@@ -439,7 +439,7 @@ static inline void sonic_rra_put(struct net_device* dev, int entry,
439static inline __u16 sonic_rra_get(struct net_device* dev, int entry, 439static inline __u16 sonic_rra_get(struct net_device* dev, int entry,
440 int offset) 440 int offset)
441{ 441{
442 struct sonic_local* lp = (struct sonic_local *) dev->priv; 442 struct sonic_local *lp = netdev_priv(dev);
443 return sonic_buf_get(lp->rra, lp->dma_bitmode, 443 return sonic_buf_get(lp->rra, lp->dma_bitmode,
444 (entry * SIZEOF_SONIC_RR) + offset); 444 (entry * SIZEOF_SONIC_RR) + offset);
445} 445}
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 07599b492359..c5c123d3af57 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -789,7 +789,7 @@ spider_net_set_low_watermark(struct spider_net_card *card)
789 * spider_net_release_tx_chain releases the tx descriptors that spider has 789 * spider_net_release_tx_chain releases the tx descriptors that spider has
790 * finished with (if non-brutal) or simply release tx descriptors (if brutal). 790 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
791 * If some other context is calling this function, we return 1 so that we're 791 * If some other context is calling this function, we return 1 so that we're
792 * scheduled again (if we were scheduled) and will not loose initiative. 792 * scheduled again (if we were scheduled) and will not lose initiative.
793 */ 793 */
794static int 794static int
795spider_net_release_tx_chain(struct spider_net_card *card, int brutal) 795spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
@@ -1302,7 +1302,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
1302 /* if all packets are in the stack, enable interrupts and return 0 */ 1302 /* if all packets are in the stack, enable interrupts and return 0 */
1303 /* if not, return 1 */ 1303 /* if not, return 1 */
1304 if (packets_done < budget) { 1304 if (packets_done < budget) {
1305 netif_rx_complete(netdev, napi); 1305 netif_rx_complete(napi);
1306 spider_net_rx_irq_on(card); 1306 spider_net_rx_irq_on(card);
1307 card->ignore_rx_ramfull = 0; 1307 card->ignore_rx_ramfull = 0;
1308 } 1308 }
@@ -1529,8 +1529,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1529 spider_net_refill_rx_chain(card); 1529 spider_net_refill_rx_chain(card);
1530 spider_net_enable_rxdmac(card); 1530 spider_net_enable_rxdmac(card);
1531 card->num_rx_ints ++; 1531 card->num_rx_ints ++;
1532 netif_rx_schedule(card->netdev, 1532 netif_rx_schedule(&card->napi);
1533 &card->napi);
1534 } 1533 }
1535 show_error = 0; 1534 show_error = 0;
1536 break; 1535 break;
@@ -1550,8 +1549,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1550 spider_net_refill_rx_chain(card); 1549 spider_net_refill_rx_chain(card);
1551 spider_net_enable_rxdmac(card); 1550 spider_net_enable_rxdmac(card);
1552 card->num_rx_ints ++; 1551 card->num_rx_ints ++;
1553 netif_rx_schedule(card->netdev, 1552 netif_rx_schedule(&card->napi);
1554 &card->napi);
1555 show_error = 0; 1553 show_error = 0;
1556 break; 1554 break;
1557 1555
@@ -1565,8 +1563,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1565 spider_net_refill_rx_chain(card); 1563 spider_net_refill_rx_chain(card);
1566 spider_net_enable_rxdmac(card); 1564 spider_net_enable_rxdmac(card);
1567 card->num_rx_ints ++; 1565 card->num_rx_ints ++;
1568 netif_rx_schedule(card->netdev, 1566 netif_rx_schedule(&card->napi);
1569 &card->napi);
1570 show_error = 0; 1567 show_error = 0;
1571 break; 1568 break;
1572 1569
@@ -1660,11 +1657,11 @@ spider_net_interrupt(int irq, void *ptr)
1660 1657
1661 if (status_reg & SPIDER_NET_RXINT ) { 1658 if (status_reg & SPIDER_NET_RXINT ) {
1662 spider_net_rx_irq_off(card); 1659 spider_net_rx_irq_off(card);
1663 netif_rx_schedule(netdev, &card->napi); 1660 netif_rx_schedule(&card->napi);
1664 card->num_rx_ints ++; 1661 card->num_rx_ints ++;
1665 } 1662 }
1666 if (status_reg & SPIDER_NET_TXINT) 1663 if (status_reg & SPIDER_NET_TXINT)
1667 netif_rx_schedule(netdev, &card->napi); 1664 netif_rx_schedule(&card->napi);
1668 1665
1669 if (status_reg & SPIDER_NET_LINKINT) 1666 if (status_reg & SPIDER_NET_LINKINT)
1670 spider_net_link_reset(netdev); 1667 spider_net_link_reset(netdev);
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index 85691d2a0be2..5bae728c3820 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -118,7 +118,7 @@ spider_net_ethtool_nway_reset(struct net_device *netdev)
118static u32 118static u32
119spider_net_ethtool_get_rx_csum(struct net_device *netdev) 119spider_net_ethtool_get_rx_csum(struct net_device *netdev)
120{ 120{
121 struct spider_net_card *card = netdev->priv; 121 struct spider_net_card *card = netdev_priv(netdev);
122 122
123 return card->options.rx_csum; 123 return card->options.rx_csum;
124} 124}
@@ -126,7 +126,7 @@ spider_net_ethtool_get_rx_csum(struct net_device *netdev)
126static int 126static int
127spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n) 127spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
128{ 128{
129 struct spider_net_card *card = netdev->priv; 129 struct spider_net_card *card = netdev_priv(netdev);
130 130
131 card->options.rx_csum = n; 131 card->options.rx_csum = n;
132 return 0; 132 return 0;
@@ -137,7 +137,7 @@ static void
137spider_net_ethtool_get_ringparam(struct net_device *netdev, 137spider_net_ethtool_get_ringparam(struct net_device *netdev,
138 struct ethtool_ringparam *ering) 138 struct ethtool_ringparam *ering)
139{ 139{
140 struct spider_net_card *card = netdev->priv; 140 struct spider_net_card *card = netdev_priv(netdev);
141 141
142 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; 142 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
143 ering->tx_pending = card->tx_chain.num_desc; 143 ering->tx_pending = card->tx_chain.num_desc;
@@ -158,7 +158,7 @@ static int spider_net_get_sset_count(struct net_device *netdev, int sset)
158static void spider_net_get_ethtool_stats(struct net_device *netdev, 158static void spider_net_get_ethtool_stats(struct net_device *netdev,
159 struct ethtool_stats *stats, u64 *data) 159 struct ethtool_stats *stats, u64 *data)
160{ 160{
161 struct spider_net_card *card = netdev->priv; 161 struct spider_net_card *card = netdev_priv(netdev);
162 162
163 data[0] = netdev->stats.tx_packets; 163 data[0] = netdev->stats.tx_packets;
164 data[1] = netdev->stats.tx_bytes; 164 data[1] = netdev->stats.tx_bytes;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 5a40f2d78beb..f54ac2389da2 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -653,7 +653,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
653 void __iomem *base; 653 void __iomem *base;
654 int drv_flags, io_size; 654 int drv_flags, io_size;
655 int boguscnt; 655 int boguscnt;
656 DECLARE_MAC_BUF(mac);
657 656
658/* when built into the kernel, we only print version if device is found */ 657/* when built into the kernel, we only print version if device is found */
659#ifndef MODULE 658#ifndef MODULE
@@ -823,9 +822,9 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
823 if (register_netdev(dev)) 822 if (register_netdev(dev))
824 goto err_out_cleardev; 823 goto err_out_cleardev;
825 824
826 printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", 825 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
827 dev->name, netdrv_tbl[chip_idx].name, base, 826 dev->name, netdrv_tbl[chip_idx].name, base,
828 print_mac(mac, dev->dev_addr), irq); 827 dev->dev_addr, irq);
829 828
830 if (drv_flags & CanHaveMII) { 829 if (drv_flags & CanHaveMII) {
831 int phy, phy_idx = 0; 830 int phy, phy_idx = 0;
@@ -881,9 +880,9 @@ static int mdio_read(struct net_device *dev, int phy_id, int location)
881 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2); 880 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
882 int result, boguscnt=1000; 881 int result, boguscnt=1000;
883 /* ??? Should we add a busy-wait here? */ 882 /* ??? Should we add a busy-wait here? */
884 do 883 do {
885 result = readl(mdio_addr); 884 result = readl(mdio_addr);
886 while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0); 885 } while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
887 if (boguscnt == 0) 886 if (boguscnt == 0)
888 return 0; 887 return 0;
889 if ((result & 0xffff) == 0xffff) 888 if ((result & 0xffff) == 0xffff)
@@ -1291,8 +1290,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
1291 if (intr_status & (IntrRxDone | IntrRxEmpty)) { 1290 if (intr_status & (IntrRxDone | IntrRxEmpty)) {
1292 u32 enable; 1291 u32 enable;
1293 1292
1294 if (likely(netif_rx_schedule_prep(dev, &np->napi))) { 1293 if (likely(netif_rx_schedule_prep(&np->napi))) {
1295 __netif_rx_schedule(dev, &np->napi); 1294 __netif_rx_schedule(&np->napi);
1296 enable = readl(ioaddr + IntrEnable); 1295 enable = readl(ioaddr + IntrEnable);
1297 enable &= ~(IntrRxDone | IntrRxEmpty); 1296 enable &= ~(IntrRxDone | IntrRxEmpty);
1298 writel(enable, ioaddr + IntrEnable); 1297 writel(enable, ioaddr + IntrEnable);
@@ -1452,12 +1451,8 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1452#ifndef final_version /* Remove after testing. */ 1451#ifndef final_version /* Remove after testing. */
1453 /* You will want this info for the initial debug. */ 1452 /* You will want this info for the initial debug. */
1454 if (debug > 5) { 1453 if (debug > 5) {
1455 printk(KERN_DEBUG " Rx data " MAC_FMT " " MAC_FMT 1454 printk(KERN_DEBUG " Rx data %pM %pM %2.2x%2.2x.\n",
1456 " %2.2x%2.2x.\n", 1455 skb->data, skb->data + 6,
1457 skb->data[0], skb->data[1], skb->data[2],
1458 skb->data[3], skb->data[4], skb->data[5],
1459 skb->data[6], skb->data[7], skb->data[8],
1460 skb->data[9], skb->data[10], skb->data[11],
1461 skb->data[12], skb->data[13]); 1456 skb->data[12], skb->data[13]);
1462 } 1457 }
1463#endif 1458#endif
@@ -1501,7 +1496,6 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1501 } else 1496 } else
1502#endif /* VLAN_SUPPORT */ 1497#endif /* VLAN_SUPPORT */
1503 netif_receive_skb(skb); 1498 netif_receive_skb(skb);
1504 dev->last_rx = jiffies;
1505 np->stats.rx_packets++; 1499 np->stats.rx_packets++;
1506 1500
1507 next_rx: 1501 next_rx:
@@ -1541,7 +1535,7 @@ static int netdev_poll(struct napi_struct *napi, int budget)
1541 intr_status = readl(ioaddr + IntrStatus); 1535 intr_status = readl(ioaddr + IntrStatus);
1542 } while (intr_status & (IntrRxDone | IntrRxEmpty)); 1536 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1543 1537
1544 netif_rx_complete(dev, napi); 1538 netif_rx_complete(napi);
1545 intr_status = readl(ioaddr + IntrEnable); 1539 intr_status = readl(ioaddr + IntrEnable);
1546 intr_status |= IntrRxDone | IntrRxEmpty; 1540 intr_status |= IntrRxDone | IntrRxEmpty;
1547 writel(intr_status, ioaddr + IntrEnable); 1541 writel(intr_status, ioaddr + IntrEnable);
diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c
index 2ed0bd596815..87a6b8eabc67 100644
--- a/drivers/net/stnic.c
+++ b/drivers/net/stnic.c
@@ -60,8 +60,6 @@ static byte stnic_eadr[6] =
60 60
61static struct net_device *stnic_dev; 61static struct net_device *stnic_dev;
62 62
63static int stnic_open (struct net_device *dev);
64static int stnic_close (struct net_device *dev);
65static void stnic_reset (struct net_device *dev); 63static void stnic_reset (struct net_device *dev);
66static void stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr, 64static void stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr,
67 int ring_page); 65 int ring_page);
@@ -122,11 +120,7 @@ static int __init stnic_probe(void)
122 /* Set the base address to point to the NIC, not the "real" base! */ 120 /* Set the base address to point to the NIC, not the "real" base! */
123 dev->base_addr = 0x1000; 121 dev->base_addr = 0x1000;
124 dev->irq = IRQ_STNIC; 122 dev->irq = IRQ_STNIC;
125 dev->open = &stnic_open; 123 dev->netdev_ops = &ei_netdev_ops;
126 dev->stop = &stnic_close;
127#ifdef CONFIG_NET_POLL_CONTROLLER
128 dev->poll_controller = ei_poll;
129#endif
130 124
131 /* Snarf the interrupt now. There's no point in waiting since we cannot 125 /* Snarf the interrupt now. There's no point in waiting since we cannot
132 share and the board will usually be enabled. */ 126 share and the board will usually be enabled. */
@@ -168,23 +162,6 @@ static int __init stnic_probe(void)
168 return 0; 162 return 0;
169} 163}
170 164
171static int
172stnic_open (struct net_device *dev)
173{
174#if 0
175 printk (KERN_DEBUG "stnic open\n");
176#endif
177 ei_open (dev);
178 return 0;
179}
180
181static int
182stnic_close (struct net_device *dev)
183{
184 ei_close (dev);
185 return 0;
186}
187
188static void 165static void
189stnic_reset (struct net_device *dev) 166stnic_reset (struct net_device *dev)
190{ 167{
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index e531302d95f5..e0d84772771c 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -209,7 +209,7 @@ static int sun3_82586_open(struct net_device *dev)
209static int check586(struct net_device *dev,char *where,unsigned size) 209static int check586(struct net_device *dev,char *where,unsigned size)
210{ 210{
211 struct priv pb; 211 struct priv pb;
212 struct priv *p = /* (struct priv *) dev->priv*/ &pb; 212 struct priv *p = &pb;
213 char *iscp_addr; 213 char *iscp_addr;
214 int i; 214 int i;
215 215
@@ -247,7 +247,7 @@ static int check586(struct net_device *dev,char *where,unsigned size)
247 */ 247 */
248static void alloc586(struct net_device *dev) 248static void alloc586(struct net_device *dev)
249{ 249{
250 struct priv *p = (struct priv *) dev->priv; 250 struct priv *p = netdev_priv(dev);
251 251
252 sun3_reset586(); 252 sun3_reset586();
253 DELAY(1); 253 DELAY(1);
@@ -363,17 +363,21 @@ static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
363 goto out; 363 goto out;
364 } 364 }
365 365
366 ((struct priv *) (dev->priv))->memtop = (char *)dvma_btov(dev->mem_start); 366 ((struct priv *)netdev_priv(dev))->memtop =
367 ((struct priv *) (dev->priv))->base = (unsigned long) dvma_btov(0); 367 (char *)dvma_btov(dev->mem_start);
368 ((struct priv *)netdev_priv(dev))->base = (unsigned long) dvma_btov(0);
368 alloc586(dev); 369 alloc586(dev);
369 370
370 /* set number of receive-buffs according to memsize */ 371 /* set number of receive-buffs according to memsize */
371 if(size == 0x2000) 372 if(size == 0x2000)
372 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8; 373 ((struct priv *)netdev_priv(dev))->num_recv_buffs =
374 NUM_RECV_BUFFS_8;
373 else if(size == 0x4000) 375 else if(size == 0x4000)
374 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16; 376 ((struct priv *)netdev_priv(dev))->num_recv_buffs =
377 NUM_RECV_BUFFS_16;
375 else 378 else
376 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_32; 379 ((struct priv *)netdev_priv(dev))->num_recv_buffs =
380 NUM_RECV_BUFFS_32;
377 381
378 printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq); 382 printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq);
379 383
@@ -397,7 +401,7 @@ static int init586(struct net_device *dev)
397{ 401{
398 void *ptr; 402 void *ptr;
399 int i,result=0; 403 int i,result=0;
400 struct priv *p = (struct priv *) dev->priv; 404 struct priv *p = netdev_priv(dev);
401 volatile struct configure_cmd_struct *cfg_cmd; 405 volatile struct configure_cmd_struct *cfg_cmd;
402 volatile struct iasetup_cmd_struct *ias_cmd; 406 volatile struct iasetup_cmd_struct *ias_cmd;
403 volatile struct tdr_cmd_struct *tdr_cmd; 407 volatile struct tdr_cmd_struct *tdr_cmd;
@@ -631,7 +635,7 @@ static void *alloc_rfa(struct net_device *dev,void *ptr)
631 volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr; 635 volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr;
632 volatile struct rbd_struct *rbd; 636 volatile struct rbd_struct *rbd;
633 int i; 637 int i;
634 struct priv *p = (struct priv *) dev->priv; 638 struct priv *p = netdev_priv(dev);
635 639
636 memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd)); 640 memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd));
637 p->rfd_first = rfd; 641 p->rfd_first = rfd;
@@ -683,7 +687,7 @@ static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id)
683 printk ("sun3_82586-interrupt: irq %d for unknown device.\n",irq); 687 printk ("sun3_82586-interrupt: irq %d for unknown device.\n",irq);
684 return IRQ_NONE; 688 return IRQ_NONE;
685 } 689 }
686 p = (struct priv *) dev->priv; 690 p = netdev_priv(dev);
687 691
688 if(debuglevel > 1) 692 if(debuglevel > 1)
689 printk("I"); 693 printk("I");
@@ -753,7 +757,7 @@ static void sun3_82586_rcv_int(struct net_device *dev)
753 unsigned short totlen; 757 unsigned short totlen;
754 struct sk_buff *skb; 758 struct sk_buff *skb;
755 struct rbd_struct *rbd; 759 struct rbd_struct *rbd;
756 struct priv *p = (struct priv *) dev->priv; 760 struct priv *p = netdev_priv(dev);
757 761
758 if(debuglevel > 0) 762 if(debuglevel > 0)
759 printk("R"); 763 printk("R");
@@ -871,7 +875,7 @@ static void sun3_82586_rcv_int(struct net_device *dev)
871 875
872static void sun3_82586_rnr_int(struct net_device *dev) 876static void sun3_82586_rnr_int(struct net_device *dev)
873{ 877{
874 struct priv *p = (struct priv *) dev->priv; 878 struct priv *p = netdev_priv(dev);
875 879
876 p->stats.rx_errors++; 880 p->stats.rx_errors++;
877 881
@@ -895,7 +899,7 @@ static void sun3_82586_rnr_int(struct net_device *dev)
895static void sun3_82586_xmt_int(struct net_device *dev) 899static void sun3_82586_xmt_int(struct net_device *dev)
896{ 900{
897 int status; 901 int status;
898 struct priv *p = (struct priv *) dev->priv; 902 struct priv *p = netdev_priv(dev);
899 903
900 if(debuglevel > 0) 904 if(debuglevel > 0)
901 printk("X"); 905 printk("X");
@@ -945,7 +949,7 @@ static void sun3_82586_xmt_int(struct net_device *dev)
945 949
946static void startrecv586(struct net_device *dev) 950static void startrecv586(struct net_device *dev)
947{ 951{
948 struct priv *p = (struct priv *) dev->priv; 952 struct priv *p = netdev_priv(dev);
949 953
950 WAIT_4_SCB_CMD(); 954 WAIT_4_SCB_CMD();
951 WAIT_4_SCB_CMD_RUC(); 955 WAIT_4_SCB_CMD_RUC();
@@ -957,7 +961,7 @@ static void startrecv586(struct net_device *dev)
957 961
958static void sun3_82586_timeout(struct net_device *dev) 962static void sun3_82586_timeout(struct net_device *dev)
959{ 963{
960 struct priv *p = (struct priv *) dev->priv; 964 struct priv *p = netdev_priv(dev);
961#ifndef NO_NOPCOMMANDS 965#ifndef NO_NOPCOMMANDS
962 if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */ 966 if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */
963 { 967 {
@@ -999,7 +1003,7 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
999#ifndef NO_NOPCOMMANDS 1003#ifndef NO_NOPCOMMANDS
1000 int next_nop; 1004 int next_nop;
1001#endif 1005#endif
1002 struct priv *p = (struct priv *) dev->priv; 1006 struct priv *p = netdev_priv(dev);
1003 1007
1004 if(skb->len > XMIT_BUFF_SIZE) 1008 if(skb->len > XMIT_BUFF_SIZE)
1005 { 1009 {
@@ -1108,7 +1112,7 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
1108 1112
1109static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev) 1113static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev)
1110{ 1114{
1111 struct priv *p = (struct priv *) dev->priv; 1115 struct priv *p = netdev_priv(dev);
1112 unsigned short crc,aln,rsc,ovrn; 1116 unsigned short crc,aln,rsc,ovrn;
1113 1117
1114 crc = swab16(p->scb->crc_errs); /* get error-statistic from the ni82586 */ 1118 crc = swab16(p->scb->crc_errs); /* get error-statistic from the ni82586 */
@@ -1171,7 +1175,7 @@ void cleanup_module(void)
1171 */ 1175 */
1172void sun3_82586_dump(struct net_device *dev,void *ptr) 1176void sun3_82586_dump(struct net_device *dev,void *ptr)
1173{ 1177{
1174 struct priv *p = (struct priv *) dev->priv; 1178 struct priv *p = netdev_priv(dev);
1175 struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr; 1179 struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr;
1176 int i; 1180 int i;
1177 1181
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 359452a06c67..4bb8f72c65cc 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -303,7 +303,6 @@ static int __init lance_probe( struct net_device *dev)
303 static int did_version; 303 static int did_version;
304 volatile unsigned short *ioaddr_probe; 304 volatile unsigned short *ioaddr_probe;
305 unsigned short tmp1, tmp2; 305 unsigned short tmp1, tmp2;
306 DECLARE_MAC_BUF(mac);
307 306
308#ifdef CONFIG_SUN3 307#ifdef CONFIG_SUN3
309 ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE); 308 ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE);
@@ -379,7 +378,7 @@ static int __init lance_probe( struct net_device *dev)
379 MEM->init.hwaddr[4] = dev->dev_addr[5]; 378 MEM->init.hwaddr[4] = dev->dev_addr[5];
380 MEM->init.hwaddr[5] = dev->dev_addr[4]; 379 MEM->init.hwaddr[5] = dev->dev_addr[4];
381 380
382 printk("%s\n", print_mac(mac, dev->dev_addr)); 381 printk("%pM\n", dev->dev_addr);
383 382
384 MEM->init.mode = 0x0000; 383 MEM->init.mode = 0x0000;
385 MEM->init.filter[0] = 0x00000000; 384 MEM->init.filter[0] = 0x00000000;
@@ -824,12 +823,10 @@ static int lance_rx( struct net_device *dev )
824#if 0 823#if 0
825 if (lance_debug >= 3) { 824 if (lance_debug >= 3) {
826 u_char *data = PKTBUF_ADDR(head); 825 u_char *data = PKTBUF_ADDR(head);
827 DECLARE_MAC_BUF(mac);
828 DECLARE_MAC_BUF(mac2)
829 printk("%s: RX pkt %d type 0x%04x" 826 printk("%s: RX pkt %d type 0x%04x"
830 " from %s to %s", 827 " from %pM to %pM",
831 dev->name, lp->new_tx, ((u_short *)data)[6], 828 dev->name, lp->new_tx, ((u_short *)data)[6],
832 print_mac(mac, &data[6]), print_mac(mac2, data)); 829 &data[6], data);
833 830
834 printk(" data %02x %02x %02x %02x %02x %02x %02x %02x " 831 printk(" data %02x %02x %02x %02x %02x %02x %02x %02x "
835 "len %d at %08x\n", 832 "len %d at %08x\n",
@@ -852,7 +849,6 @@ static int lance_rx( struct net_device *dev )
852 849
853 skb->protocol = eth_type_trans( skb, dev ); 850 skb->protocol = eth_type_trans( skb, dev );
854 netif_rx( skb ); 851 netif_rx( skb );
855 dev->last_rx = jiffies;
856 dev->stats.rx_packets++; 852 dev->stats.rx_packets++;
857 dev->stats.rx_bytes += pkt_len; 853 dev->stats.rx_bytes += pkt_len;
858 } 854 }
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 018d0fca9422..7f69c7f176c4 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -878,7 +878,6 @@ static void bigmac_rx(struct bigmac *bp)
878 /* No checksums done by the BigMAC ;-( */ 878 /* No checksums done by the BigMAC ;-( */
879 skb->protocol = eth_type_trans(skb, bp->dev); 879 skb->protocol = eth_type_trans(skb, bp->dev);
880 netif_rx(skb); 880 netif_rx(skb);
881 bp->dev->last_rx = jiffies;
882 bp->enet_stats.rx_packets++; 881 bp->enet_stats.rx_packets++;
883 bp->enet_stats.rx_bytes += len; 882 bp->enet_stats.rx_bytes += len;
884 next: 883 next:
@@ -917,7 +916,7 @@ static irqreturn_t bigmac_interrupt(int irq, void *dev_id)
917 916
918static int bigmac_open(struct net_device *dev) 917static int bigmac_open(struct net_device *dev)
919{ 918{
920 struct bigmac *bp = (struct bigmac *) dev->priv; 919 struct bigmac *bp = netdev_priv(dev);
921 int ret; 920 int ret;
922 921
923 ret = request_irq(dev->irq, &bigmac_interrupt, IRQF_SHARED, dev->name, bp); 922 ret = request_irq(dev->irq, &bigmac_interrupt, IRQF_SHARED, dev->name, bp);
@@ -934,7 +933,7 @@ static int bigmac_open(struct net_device *dev)
934 933
935static int bigmac_close(struct net_device *dev) 934static int bigmac_close(struct net_device *dev)
936{ 935{
937 struct bigmac *bp = (struct bigmac *) dev->priv; 936 struct bigmac *bp = netdev_priv(dev);
938 937
939 del_timer(&bp->bigmac_timer); 938 del_timer(&bp->bigmac_timer);
940 bp->timer_state = asleep; 939 bp->timer_state = asleep;
@@ -948,7 +947,7 @@ static int bigmac_close(struct net_device *dev)
948 947
949static void bigmac_tx_timeout(struct net_device *dev) 948static void bigmac_tx_timeout(struct net_device *dev)
950{ 949{
951 struct bigmac *bp = (struct bigmac *) dev->priv; 950 struct bigmac *bp = netdev_priv(dev);
952 951
953 bigmac_init_hw(bp, 0); 952 bigmac_init_hw(bp, 0);
954 netif_wake_queue(dev); 953 netif_wake_queue(dev);
@@ -957,7 +956,7 @@ static void bigmac_tx_timeout(struct net_device *dev)
957/* Put a packet on the wire. */ 956/* Put a packet on the wire. */
958static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) 957static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
959{ 958{
960 struct bigmac *bp = (struct bigmac *) dev->priv; 959 struct bigmac *bp = netdev_priv(dev);
961 int len, entry; 960 int len, entry;
962 u32 mapping; 961 u32 mapping;
963 962
@@ -990,7 +989,7 @@ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
990 989
991static struct net_device_stats *bigmac_get_stats(struct net_device *dev) 990static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
992{ 991{
993 struct bigmac *bp = (struct bigmac *) dev->priv; 992 struct bigmac *bp = netdev_priv(dev);
994 993
995 bigmac_get_counters(bp, bp->bregs); 994 bigmac_get_counters(bp, bp->bregs);
996 return &bp->enet_stats; 995 return &bp->enet_stats;
@@ -998,7 +997,7 @@ static struct net_device_stats *bigmac_get_stats(struct net_device *dev)
998 997
999static void bigmac_set_multicast(struct net_device *dev) 998static void bigmac_set_multicast(struct net_device *dev)
1000{ 999{
1001 struct bigmac *bp = (struct bigmac *) dev->priv; 1000 struct bigmac *bp = netdev_priv(dev);
1002 void __iomem *bregs = bp->bregs; 1001 void __iomem *bregs = bp->bregs;
1003 struct dev_mc_list *dmi = dev->mc_list; 1002 struct dev_mc_list *dmi = dev->mc_list;
1004 char *addrs; 1003 char *addrs;
@@ -1061,7 +1060,7 @@ static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
1061 1060
1062static u32 bigmac_get_link(struct net_device *dev) 1061static u32 bigmac_get_link(struct net_device *dev)
1063{ 1062{
1064 struct bigmac *bp = dev->priv; 1063 struct bigmac *bp = netdev_priv(dev);
1065 1064
1066 spin_lock_irq(&bp->lock); 1065 spin_lock_irq(&bp->lock);
1067 bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, BIGMAC_BMSR); 1066 bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, BIGMAC_BMSR);
@@ -1081,7 +1080,6 @@ static int __devinit bigmac_ether_init(struct of_device *op,
1081 static int version_printed; 1080 static int version_printed;
1082 struct net_device *dev; 1081 struct net_device *dev;
1083 u8 bsizes, bsizes_more; 1082 u8 bsizes, bsizes_more;
1084 DECLARE_MAC_BUF(mac);
1085 struct bigmac *bp; 1083 struct bigmac *bp;
1086 int i; 1084 int i;
1087 1085
@@ -1212,8 +1210,8 @@ static int __devinit bigmac_ether_init(struct of_device *op,
1212 1210
1213 dev_set_drvdata(&bp->bigmac_op->dev, bp); 1211 dev_set_drvdata(&bp->bigmac_op->dev, bp);
1214 1212
1215 printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %s\n", 1213 printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %pM\n",
1216 dev->name, print_mac(mac, dev->dev_addr)); 1214 dev->name, dev->dev_addr);
1217 1215
1218 return 0; 1216 return 0;
1219 1217
@@ -1235,7 +1233,7 @@ fail_and_cleanup:
1235 bp->bmac_block, 1233 bp->bmac_block,
1236 bp->bblock_dvma); 1234 bp->bblock_dvma);
1237 1235
1238 /* This also frees the co-located 'dev->priv' */ 1236 /* This also frees the co-located private data */
1239 free_netdev(dev); 1237 free_netdev(dev);
1240 return -ENODEV; 1238 return -ENODEV;
1241} 1239}
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index f860ea150395..698893b92003 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -468,7 +468,6 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
468 int bar = 1; 468 int bar = 1;
469#endif 469#endif
470 int phy, phy_end, phy_idx = 0; 470 int phy, phy_end, phy_idx = 0;
471 DECLARE_MAC_BUF(mac);
472 471
473/* when built into the kernel, we only print version if device is found */ 472/* when built into the kernel, we only print version if device is found */
474#ifndef MODULE 473#ifndef MODULE
@@ -547,9 +546,9 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
547 if (i) 546 if (i)
548 goto err_out_unmap_rx; 547 goto err_out_unmap_rx;
549 548
550 printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", 549 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
551 dev->name, pci_id_tbl[chip_idx].name, ioaddr, 550 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
552 print_mac(mac, dev->dev_addr), irq); 551 dev->dev_addr, irq);
553 552
554 np->phys[0] = 1; /* Default setting */ 553 np->phys[0] = 1; /* Default setting */
555 np->mii_preamble_required++; 554 np->mii_preamble_required++;
@@ -1351,7 +1350,6 @@ static void rx_poll(unsigned long data)
1351 skb->protocol = eth_type_trans(skb, dev); 1350 skb->protocol = eth_type_trans(skb, dev);
1352 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */ 1351 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1353 netif_rx(skb); 1352 netif_rx(skb);
1354 dev->last_rx = jiffies;
1355 } 1353 }
1356 entry = (entry + 1) % RX_RING_SIZE; 1354 entry = (entry + 1) % RX_RING_SIZE;
1357 received++; 1355 received++;
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index fed7eba65ead..8a7460412482 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -164,7 +164,7 @@ static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
164 164
165static inline int _phy_read(struct net_device *dev, int mii_id, int reg) 165static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
166{ 166{
167 struct gem *gp = dev->priv; 167 struct gem *gp = netdev_priv(dev);
168 return __phy_read(gp, mii_id, reg); 168 return __phy_read(gp, mii_id, reg);
169} 169}
170 170
@@ -197,7 +197,7 @@ static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
197 197
198static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val) 198static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
199{ 199{
200 struct gem *gp = dev->priv; 200 struct gem *gp = netdev_priv(dev);
201 __phy_write(gp, mii_id, reg, val & 0xffff); 201 __phy_write(gp, mii_id, reg, val & 0xffff);
202} 202}
203 203
@@ -863,7 +863,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
863 863
864 gp->net_stats.rx_packets++; 864 gp->net_stats.rx_packets++;
865 gp->net_stats.rx_bytes += len; 865 gp->net_stats.rx_bytes += len;
866 gp->dev->last_rx = jiffies;
867 866
868 next: 867 next:
869 entry = NEXT_RX(entry); 868 entry = NEXT_RX(entry);
@@ -922,7 +921,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
922 gp->status = readl(gp->regs + GREG_STAT); 921 gp->status = readl(gp->regs + GREG_STAT);
923 } while (gp->status & GREG_STAT_NAPI); 922 } while (gp->status & GREG_STAT_NAPI);
924 923
925 __netif_rx_complete(dev, napi); 924 __netif_rx_complete(napi);
926 gem_enable_ints(gp); 925 gem_enable_ints(gp);
927 926
928 spin_unlock_irqrestore(&gp->lock, flags); 927 spin_unlock_irqrestore(&gp->lock, flags);
@@ -933,7 +932,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
933static irqreturn_t gem_interrupt(int irq, void *dev_id) 932static irqreturn_t gem_interrupt(int irq, void *dev_id)
934{ 933{
935 struct net_device *dev = dev_id; 934 struct net_device *dev = dev_id;
936 struct gem *gp = dev->priv; 935 struct gem *gp = netdev_priv(dev);
937 unsigned long flags; 936 unsigned long flags;
938 937
939 /* Swallow interrupts when shutting the chip down, though 938 /* Swallow interrupts when shutting the chip down, though
@@ -945,7 +944,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
945 944
946 spin_lock_irqsave(&gp->lock, flags); 945 spin_lock_irqsave(&gp->lock, flags);
947 946
948 if (netif_rx_schedule_prep(dev, &gp->napi)) { 947 if (netif_rx_schedule_prep(&gp->napi)) {
949 u32 gem_status = readl(gp->regs + GREG_STAT); 948 u32 gem_status = readl(gp->regs + GREG_STAT);
950 949
951 if (gem_status == 0) { 950 if (gem_status == 0) {
@@ -955,7 +954,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
955 } 954 }
956 gp->status = gem_status; 955 gp->status = gem_status;
957 gem_disable_ints(gp); 956 gem_disable_ints(gp);
958 __netif_rx_schedule(dev, &gp->napi); 957 __netif_rx_schedule(&gp->napi);
959 } 958 }
960 959
961 spin_unlock_irqrestore(&gp->lock, flags); 960 spin_unlock_irqrestore(&gp->lock, flags);
@@ -979,7 +978,7 @@ static void gem_poll_controller(struct net_device *dev)
979 978
980static void gem_tx_timeout(struct net_device *dev) 979static void gem_tx_timeout(struct net_device *dev)
981{ 980{
982 struct gem *gp = dev->priv; 981 struct gem *gp = netdev_priv(dev);
983 982
984 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 983 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
985 if (!gp->running) { 984 if (!gp->running) {
@@ -1018,7 +1017,7 @@ static __inline__ int gem_intme(int entry)
1018 1017
1019static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) 1018static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
1020{ 1019{
1021 struct gem *gp = dev->priv; 1020 struct gem *gp = netdev_priv(dev);
1022 int entry; 1021 int entry;
1023 u64 ctrl; 1022 u64 ctrl;
1024 unsigned long flags; 1023 unsigned long flags;
@@ -2208,7 +2207,7 @@ static void gem_stop_phy(struct gem *gp, int wol)
2208 2207
2209static int gem_do_start(struct net_device *dev) 2208static int gem_do_start(struct net_device *dev)
2210{ 2209{
2211 struct gem *gp = dev->priv; 2210 struct gem *gp = netdev_priv(dev);
2212 unsigned long flags; 2211 unsigned long flags;
2213 2212
2214 spin_lock_irqsave(&gp->lock, flags); 2213 spin_lock_irqsave(&gp->lock, flags);
@@ -2255,7 +2254,7 @@ static int gem_do_start(struct net_device *dev)
2255 2254
2256static void gem_do_stop(struct net_device *dev, int wol) 2255static void gem_do_stop(struct net_device *dev, int wol)
2257{ 2256{
2258 struct gem *gp = dev->priv; 2257 struct gem *gp = netdev_priv(dev);
2259 unsigned long flags; 2258 unsigned long flags;
2260 2259
2261 spin_lock_irqsave(&gp->lock, flags); 2260 spin_lock_irqsave(&gp->lock, flags);
@@ -2330,7 +2329,7 @@ static void gem_reset_task(struct work_struct *work)
2330 2329
2331static int gem_open(struct net_device *dev) 2330static int gem_open(struct net_device *dev)
2332{ 2331{
2333 struct gem *gp = dev->priv; 2332 struct gem *gp = netdev_priv(dev);
2334 int rc = 0; 2333 int rc = 0;
2335 2334
2336 mutex_lock(&gp->pm_mutex); 2335 mutex_lock(&gp->pm_mutex);
@@ -2349,7 +2348,7 @@ static int gem_open(struct net_device *dev)
2349 2348
2350static int gem_close(struct net_device *dev) 2349static int gem_close(struct net_device *dev)
2351{ 2350{
2352 struct gem *gp = dev->priv; 2351 struct gem *gp = netdev_priv(dev);
2353 2352
2354 mutex_lock(&gp->pm_mutex); 2353 mutex_lock(&gp->pm_mutex);
2355 2354
@@ -2368,7 +2367,7 @@ static int gem_close(struct net_device *dev)
2368static int gem_suspend(struct pci_dev *pdev, pm_message_t state) 2367static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2369{ 2368{
2370 struct net_device *dev = pci_get_drvdata(pdev); 2369 struct net_device *dev = pci_get_drvdata(pdev);
2371 struct gem *gp = dev->priv; 2370 struct gem *gp = netdev_priv(dev);
2372 unsigned long flags; 2371 unsigned long flags;
2373 2372
2374 mutex_lock(&gp->pm_mutex); 2373 mutex_lock(&gp->pm_mutex);
@@ -2432,7 +2431,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2432static int gem_resume(struct pci_dev *pdev) 2431static int gem_resume(struct pci_dev *pdev)
2433{ 2432{
2434 struct net_device *dev = pci_get_drvdata(pdev); 2433 struct net_device *dev = pci_get_drvdata(pdev);
2435 struct gem *gp = dev->priv; 2434 struct gem *gp = netdev_priv(dev);
2436 unsigned long flags; 2435 unsigned long flags;
2437 2436
2438 printk(KERN_INFO "%s: resuming\n", dev->name); 2437 printk(KERN_INFO "%s: resuming\n", dev->name);
@@ -2506,7 +2505,7 @@ static int gem_resume(struct pci_dev *pdev)
2506 2505
2507static struct net_device_stats *gem_get_stats(struct net_device *dev) 2506static struct net_device_stats *gem_get_stats(struct net_device *dev)
2508{ 2507{
2509 struct gem *gp = dev->priv; 2508 struct gem *gp = netdev_priv(dev);
2510 struct net_device_stats *stats = &gp->net_stats; 2509 struct net_device_stats *stats = &gp->net_stats;
2511 2510
2512 spin_lock_irq(&gp->lock); 2511 spin_lock_irq(&gp->lock);
@@ -2542,7 +2541,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
2542static int gem_set_mac_address(struct net_device *dev, void *addr) 2541static int gem_set_mac_address(struct net_device *dev, void *addr)
2543{ 2542{
2544 struct sockaddr *macaddr = (struct sockaddr *) addr; 2543 struct sockaddr *macaddr = (struct sockaddr *) addr;
2545 struct gem *gp = dev->priv; 2544 struct gem *gp = netdev_priv(dev);
2546 unsigned char *e = &dev->dev_addr[0]; 2545 unsigned char *e = &dev->dev_addr[0];
2547 2546
2548 if (!is_valid_ether_addr(macaddr->sa_data)) 2547 if (!is_valid_ether_addr(macaddr->sa_data))
@@ -2570,7 +2569,7 @@ static int gem_set_mac_address(struct net_device *dev, void *addr)
2570 2569
2571static void gem_set_multicast(struct net_device *dev) 2570static void gem_set_multicast(struct net_device *dev)
2572{ 2571{
2573 struct gem *gp = dev->priv; 2572 struct gem *gp = netdev_priv(dev);
2574 u32 rxcfg, rxcfg_new; 2573 u32 rxcfg, rxcfg_new;
2575 int limit = 10000; 2574 int limit = 10000;
2576 2575
@@ -2619,7 +2618,7 @@ static void gem_set_multicast(struct net_device *dev)
2619 2618
2620static int gem_change_mtu(struct net_device *dev, int new_mtu) 2619static int gem_change_mtu(struct net_device *dev, int new_mtu)
2621{ 2620{
2622 struct gem *gp = dev->priv; 2621 struct gem *gp = netdev_priv(dev);
2623 2622
2624 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) 2623 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
2625 return -EINVAL; 2624 return -EINVAL;
@@ -2650,7 +2649,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
2650 2649
2651static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2650static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2652{ 2651{
2653 struct gem *gp = dev->priv; 2652 struct gem *gp = netdev_priv(dev);
2654 2653
2655 strcpy(info->driver, DRV_NAME); 2654 strcpy(info->driver, DRV_NAME);
2656 strcpy(info->version, DRV_VERSION); 2655 strcpy(info->version, DRV_VERSION);
@@ -2659,7 +2658,7 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
2659 2658
2660static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2659static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2661{ 2660{
2662 struct gem *gp = dev->priv; 2661 struct gem *gp = netdev_priv(dev);
2663 2662
2664 if (gp->phy_type == phy_mii_mdio0 || 2663 if (gp->phy_type == phy_mii_mdio0 ||
2665 gp->phy_type == phy_mii_mdio1) { 2664 gp->phy_type == phy_mii_mdio1) {
@@ -2720,7 +2719,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2720 2719
2721static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2720static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2722{ 2721{
2723 struct gem *gp = dev->priv; 2722 struct gem *gp = netdev_priv(dev);
2724 2723
2725 /* Verify the settings we care about. */ 2724 /* Verify the settings we care about. */
2726 if (cmd->autoneg != AUTONEG_ENABLE && 2725 if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -2751,7 +2750,7 @@ static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2751 2750
2752static int gem_nway_reset(struct net_device *dev) 2751static int gem_nway_reset(struct net_device *dev)
2753{ 2752{
2754 struct gem *gp = dev->priv; 2753 struct gem *gp = netdev_priv(dev);
2755 2754
2756 if (!gp->want_autoneg) 2755 if (!gp->want_autoneg)
2757 return -EINVAL; 2756 return -EINVAL;
@@ -2768,13 +2767,13 @@ static int gem_nway_reset(struct net_device *dev)
2768 2767
2769static u32 gem_get_msglevel(struct net_device *dev) 2768static u32 gem_get_msglevel(struct net_device *dev)
2770{ 2769{
2771 struct gem *gp = dev->priv; 2770 struct gem *gp = netdev_priv(dev);
2772 return gp->msg_enable; 2771 return gp->msg_enable;
2773} 2772}
2774 2773
2775static void gem_set_msglevel(struct net_device *dev, u32 value) 2774static void gem_set_msglevel(struct net_device *dev, u32 value)
2776{ 2775{
2777 struct gem *gp = dev->priv; 2776 struct gem *gp = netdev_priv(dev);
2778 gp->msg_enable = value; 2777 gp->msg_enable = value;
2779} 2778}
2780 2779
@@ -2786,7 +2785,7 @@ static void gem_set_msglevel(struct net_device *dev, u32 value)
2786 2785
2787static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2786static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2788{ 2787{
2789 struct gem *gp = dev->priv; 2788 struct gem *gp = netdev_priv(dev);
2790 2789
2791 /* Add more when I understand how to program the chip */ 2790 /* Add more when I understand how to program the chip */
2792 if (gp->has_wol) { 2791 if (gp->has_wol) {
@@ -2800,7 +2799,7 @@ static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2800 2799
2801static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2800static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2802{ 2801{
2803 struct gem *gp = dev->priv; 2802 struct gem *gp = netdev_priv(dev);
2804 2803
2805 if (!gp->has_wol) 2804 if (!gp->has_wol)
2806 return -EOPNOTSUPP; 2805 return -EOPNOTSUPP;
@@ -2822,7 +2821,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
2822 2821
2823static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2822static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2824{ 2823{
2825 struct gem *gp = dev->priv; 2824 struct gem *gp = netdev_priv(dev);
2826 struct mii_ioctl_data *data = if_mii(ifr); 2825 struct mii_ioctl_data *data = if_mii(ifr);
2827 int rc = -EOPNOTSUPP; 2826 int rc = -EOPNOTSUPP;
2828 unsigned long flags; 2827 unsigned long flags;
@@ -2954,7 +2953,7 @@ static void gem_remove_one(struct pci_dev *pdev)
2954 struct net_device *dev = pci_get_drvdata(pdev); 2953 struct net_device *dev = pci_get_drvdata(pdev);
2955 2954
2956 if (dev) { 2955 if (dev) {
2957 struct gem *gp = dev->priv; 2956 struct gem *gp = netdev_priv(dev);
2958 2957
2959 unregister_netdev(dev); 2958 unregister_netdev(dev);
2960 2959
@@ -2998,7 +2997,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
2998 struct net_device *dev; 2997 struct net_device *dev;
2999 struct gem *gp; 2998 struct gem *gp;
3000 int err, pci_using_dac; 2999 int err, pci_using_dac;
3001 DECLARE_MAC_BUF(mac);
3002 3000
3003 if (gem_version_printed++ == 0) 3001 if (gem_version_printed++ == 0)
3004 printk(KERN_INFO "%s", version); 3002 printk(KERN_INFO "%s", version);
@@ -3058,7 +3056,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3058 } 3056 }
3059 SET_NETDEV_DEV(dev, &pdev->dev); 3057 SET_NETDEV_DEV(dev, &pdev->dev);
3060 3058
3061 gp = dev->priv; 3059 gp = netdev_priv(dev);
3062 3060
3063 err = pci_request_regions(pdev, DRV_NAME); 3061 err = pci_request_regions(pdev, DRV_NAME);
3064 if (err) { 3062 if (err) {
@@ -3182,9 +3180,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3182 goto err_out_free_consistent; 3180 goto err_out_free_consistent;
3183 } 3181 }
3184 3182
3185 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet " 3183 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n",
3186 "%s\n", 3184 dev->name, dev->dev_addr);
3187 dev->name, print_mac(mac, dev->dev_addr));
3188 3185
3189 if (gp->phy_type == phy_mii_mdio0 || 3186 if (gp->phy_type == phy_mii_mdio0 ||
3190 gp->phy_type == phy_mii_mdio1) 3187 gp->phy_type == phy_mii_mdio1)
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index f1ebeb5f65b2..b22d3355fb45 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2072,7 +2072,6 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2072 skb->protocol = eth_type_trans(skb, dev); 2072 skb->protocol = eth_type_trans(skb, dev);
2073 netif_rx(skb); 2073 netif_rx(skb);
2074 2074
2075 dev->last_rx = jiffies;
2076 hp->net_stats.rx_packets++; 2075 hp->net_stats.rx_packets++;
2077 hp->net_stats.rx_bytes += len; 2076 hp->net_stats.rx_bytes += len;
2078 next: 2077 next:
@@ -2131,7 +2130,7 @@ static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
2131 2130
2132 for (i = 0; i < 4; i++) { 2131 for (i = 0; i < 4; i++) {
2133 struct net_device *dev = qp->happy_meals[i]; 2132 struct net_device *dev = qp->happy_meals[i];
2134 struct happy_meal *hp = dev->priv; 2133 struct happy_meal *hp = netdev_priv(dev);
2135 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT); 2134 u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
2136 2135
2137 HMD(("quattro_interrupt: status=%08x ", happy_status)); 2136 HMD(("quattro_interrupt: status=%08x ", happy_status));
@@ -2176,7 +2175,7 @@ static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
2176 2175
2177static int happy_meal_open(struct net_device *dev) 2176static int happy_meal_open(struct net_device *dev)
2178{ 2177{
2179 struct happy_meal *hp = dev->priv; 2178 struct happy_meal *hp = netdev_priv(dev);
2180 int res; 2179 int res;
2181 2180
2182 HMD(("happy_meal_open: ")); 2181 HMD(("happy_meal_open: "));
@@ -2208,7 +2207,7 @@ static int happy_meal_open(struct net_device *dev)
2208 2207
2209static int happy_meal_close(struct net_device *dev) 2208static int happy_meal_close(struct net_device *dev)
2210{ 2209{
2211 struct happy_meal *hp = dev->priv; 2210 struct happy_meal *hp = netdev_priv(dev);
2212 2211
2213 spin_lock_irq(&hp->happy_lock); 2212 spin_lock_irq(&hp->happy_lock);
2214 happy_meal_stop(hp, hp->gregs); 2213 happy_meal_stop(hp, hp->gregs);
@@ -2237,7 +2236,7 @@ static int happy_meal_close(struct net_device *dev)
2237 2236
2238static void happy_meal_tx_timeout(struct net_device *dev) 2237static void happy_meal_tx_timeout(struct net_device *dev)
2239{ 2238{
2240 struct happy_meal *hp = dev->priv; 2239 struct happy_meal *hp = netdev_priv(dev);
2241 2240
2242 printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 2241 printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
2243 tx_dump_log(); 2242 tx_dump_log();
@@ -2255,7 +2254,7 @@ static void happy_meal_tx_timeout(struct net_device *dev)
2255 2254
2256static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) 2255static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
2257{ 2256{
2258 struct happy_meal *hp = dev->priv; 2257 struct happy_meal *hp = netdev_priv(dev);
2259 int entry; 2258 int entry;
2260 u32 tx_flags; 2259 u32 tx_flags;
2261 2260
@@ -2344,7 +2343,7 @@ static int happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev)
2344 2343
2345static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) 2344static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2346{ 2345{
2347 struct happy_meal *hp = dev->priv; 2346 struct happy_meal *hp = netdev_priv(dev);
2348 2347
2349 spin_lock_irq(&hp->happy_lock); 2348 spin_lock_irq(&hp->happy_lock);
2350 happy_meal_get_counters(hp, hp->bigmacregs); 2349 happy_meal_get_counters(hp, hp->bigmacregs);
@@ -2355,7 +2354,7 @@ static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
2355 2354
2356static void happy_meal_set_multicast(struct net_device *dev) 2355static void happy_meal_set_multicast(struct net_device *dev)
2357{ 2356{
2358 struct happy_meal *hp = dev->priv; 2357 struct happy_meal *hp = netdev_priv(dev);
2359 void __iomem *bregs = hp->bigmacregs; 2358 void __iomem *bregs = hp->bigmacregs;
2360 struct dev_mc_list *dmi = dev->mc_list; 2359 struct dev_mc_list *dmi = dev->mc_list;
2361 char *addrs; 2360 char *addrs;
@@ -2401,7 +2400,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
2401/* Ethtool support... */ 2400/* Ethtool support... */
2402static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2401static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2403{ 2402{
2404 struct happy_meal *hp = dev->priv; 2403 struct happy_meal *hp = netdev_priv(dev);
2405 2404
2406 cmd->supported = 2405 cmd->supported =
2407 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 2406 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
@@ -2446,7 +2445,7 @@ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2446 2445
2447static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2446static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2448{ 2447{
2449 struct happy_meal *hp = dev->priv; 2448 struct happy_meal *hp = netdev_priv(dev);
2450 2449
2451 /* Verify the settings we care about. */ 2450 /* Verify the settings we care about. */
2452 if (cmd->autoneg != AUTONEG_ENABLE && 2451 if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -2470,7 +2469,7 @@ static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2470 2469
2471static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2470static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2472{ 2471{
2473 struct happy_meal *hp = dev->priv; 2472 struct happy_meal *hp = netdev_priv(dev);
2474 2473
2475 strcpy(info->driver, "sunhme"); 2474 strcpy(info->driver, "sunhme");
2476 strcpy(info->version, "2.02"); 2475 strcpy(info->version, "2.02");
@@ -2492,7 +2491,7 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
2492 2491
2493static u32 hme_get_link(struct net_device *dev) 2492static u32 hme_get_link(struct net_device *dev)
2494{ 2493{
2495 struct happy_meal *hp = dev->priv; 2494 struct happy_meal *hp = netdev_priv(dev);
2496 2495
2497 spin_lock_irq(&hp->happy_lock); 2496 spin_lock_irq(&hp->happy_lock);
2498 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); 2497 hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
@@ -2617,7 +2616,6 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
2617 struct net_device *dev; 2616 struct net_device *dev;
2618 int i, qfe_slot = -1; 2617 int i, qfe_slot = -1;
2619 int err = -ENODEV; 2618 int err = -ENODEV;
2620 DECLARE_MAC_BUF(mac);
2621 2619
2622 if (is_qfe) { 2620 if (is_qfe) {
2623 qp = quattro_sbus_find(op); 2621 qp = quattro_sbus_find(op);
@@ -2797,7 +2795,7 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
2797 printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ", 2795 printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
2798 dev->name); 2796 dev->name);
2799 2797
2800 printk("%s\n", print_mac(mac, dev->dev_addr)); 2798 printk("%pM\n", dev->dev_addr);
2801 2799
2802 return 0; 2800 return 0;
2803 2801
@@ -2932,7 +2930,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
2932 int i, qfe_slot = -1; 2930 int i, qfe_slot = -1;
2933 char prom_name[64]; 2931 char prom_name[64];
2934 int err; 2932 int err;
2935 DECLARE_MAC_BUF(mac);
2936 2933
2937 /* Now make sure pci_dev cookie is there. */ 2934 /* Now make sure pci_dev cookie is there. */
2938#ifdef CONFIG_SPARC 2935#ifdef CONFIG_SPARC
@@ -2973,7 +2970,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
2973 2970
2974 dev->base_addr = (long) pdev; 2971 dev->base_addr = (long) pdev;
2975 2972
2976 hp = (struct happy_meal *)dev->priv; 2973 hp = netdev_priv(dev);
2977 memset(hp, 0, sizeof(*hp)); 2974 memset(hp, 0, sizeof(*hp));
2978 2975
2979 hp->happy_dev = pdev; 2976 hp->happy_dev = pdev;
@@ -3141,7 +3138,7 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3141 printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ", 3138 printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
3142 dev->name); 3139 dev->name);
3143 3140
3144 printk("%s\n", print_mac(mac, dev->dev_addr)); 3141 printk("%pM\n", dev->dev_addr);
3145 3142
3146 return 0; 3143 return 0;
3147 3144
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 704301a5a7ff..281373281756 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -555,7 +555,6 @@ static void lance_rx_dvma(struct net_device *dev)
555 len); 555 len);
556 skb->protocol = eth_type_trans(skb, dev); 556 skb->protocol = eth_type_trans(skb, dev);
557 netif_rx(skb); 557 netif_rx(skb);
558 dev->last_rx = jiffies;
559 dev->stats.rx_packets++; 558 dev->stats.rx_packets++;
560 } 559 }
561 560
@@ -726,7 +725,6 @@ static void lance_rx_pio(struct net_device *dev)
726 lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len); 725 lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
727 skb->protocol = eth_type_trans(skb, dev); 726 skb->protocol = eth_type_trans(skb, dev);
728 netif_rx(skb); 727 netif_rx(skb);
729 dev->last_rx = jiffies;
730 dev->stats.rx_packets++; 728 dev->stats.rx_packets++;
731 } 729 }
732 730
@@ -1321,7 +1319,6 @@ static int __devinit sparc_lance_probe_one(struct of_device *op,
1321 static unsigned version_printed; 1319 static unsigned version_printed;
1322 struct lance_private *lp; 1320 struct lance_private *lp;
1323 struct net_device *dev; 1321 struct net_device *dev;
1324 DECLARE_MAC_BUF(mac);
1325 int i; 1322 int i;
1326 1323
1327 dev = alloc_etherdev(sizeof(struct lance_private) + 8); 1324 dev = alloc_etherdev(sizeof(struct lance_private) + 8);
@@ -1491,8 +1488,8 @@ no_link_test:
1491 1488
1492 dev_set_drvdata(&op->dev, lp); 1489 dev_set_drvdata(&op->dev, lp);
1493 1490
1494 printk(KERN_INFO "%s: LANCE %s\n", 1491 printk(KERN_INFO "%s: LANCE %pM\n",
1495 dev->name, print_mac(mac, dev->dev_addr)); 1492 dev->name, dev->dev_addr);
1496 1493
1497 return 0; 1494 return 0;
1498 1495
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index f63644744ff9..6e8f377355fe 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -446,7 +446,6 @@ static void qe_rx(struct sunqe *qep)
446 len); 446 len);
447 skb->protocol = eth_type_trans(skb, qep->dev); 447 skb->protocol = eth_type_trans(skb, qep->dev);
448 netif_rx(skb); 448 netif_rx(skb);
449 qep->dev->last_rx = jiffies;
450 dev->stats.rx_packets++; 449 dev->stats.rx_packets++;
451 dev->stats.rx_bytes += len; 450 dev->stats.rx_bytes += len;
452 } 451 }
@@ -513,7 +512,7 @@ static irqreturn_t qec_interrupt(int irq, void *dev_id)
513 512
514static int qe_open(struct net_device *dev) 513static int qe_open(struct net_device *dev)
515{ 514{
516 struct sunqe *qep = (struct sunqe *) dev->priv; 515 struct sunqe *qep = netdev_priv(dev);
517 516
518 qep->mconfig = (MREGS_MCONFIG_TXENAB | 517 qep->mconfig = (MREGS_MCONFIG_TXENAB |
519 MREGS_MCONFIG_RXENAB | 518 MREGS_MCONFIG_RXENAB |
@@ -523,7 +522,7 @@ static int qe_open(struct net_device *dev)
523 522
524static int qe_close(struct net_device *dev) 523static int qe_close(struct net_device *dev)
525{ 524{
526 struct sunqe *qep = (struct sunqe *) dev->priv; 525 struct sunqe *qep = netdev_priv(dev);
527 526
528 qe_stop(qep); 527 qe_stop(qep);
529 return 0; 528 return 0;
@@ -549,7 +548,7 @@ static void qe_tx_reclaim(struct sunqe *qep)
549 548
550static void qe_tx_timeout(struct net_device *dev) 549static void qe_tx_timeout(struct net_device *dev)
551{ 550{
552 struct sunqe *qep = (struct sunqe *) dev->priv; 551 struct sunqe *qep = netdev_priv(dev);
553 int tx_full; 552 int tx_full;
554 553
555 spin_lock_irq(&qep->lock); 554 spin_lock_irq(&qep->lock);
@@ -575,7 +574,7 @@ out:
575/* Get a packet queued to go onto the wire. */ 574/* Get a packet queued to go onto the wire. */
576static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) 575static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
577{ 576{
578 struct sunqe *qep = (struct sunqe *) dev->priv; 577 struct sunqe *qep = netdev_priv(dev);
579 struct sunqe_buffers *qbufs = qep->buffers; 578 struct sunqe_buffers *qbufs = qep->buffers;
580 __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; 579 __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
581 unsigned char *txbuf; 580 unsigned char *txbuf;
@@ -627,7 +626,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
627 626
628static void qe_set_multicast(struct net_device *dev) 627static void qe_set_multicast(struct net_device *dev)
629{ 628{
630 struct sunqe *qep = (struct sunqe *) dev->priv; 629 struct sunqe *qep = netdev_priv(dev);
631 struct dev_mc_list *dmi = dev->mc_list; 630 struct dev_mc_list *dmi = dev->mc_list;
632 u8 new_mconfig = qep->mconfig; 631 u8 new_mconfig = qep->mconfig;
633 char *addrs; 632 char *addrs;
@@ -693,7 +692,7 @@ static void qe_set_multicast(struct net_device *dev)
693static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 692static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
694{ 693{
695 const struct linux_prom_registers *regs; 694 const struct linux_prom_registers *regs;
696 struct sunqe *qep = dev->priv; 695 struct sunqe *qep = netdev_priv(dev);
697 struct of_device *op; 696 struct of_device *op;
698 697
699 strcpy(info->driver, "sunqe"); 698 strcpy(info->driver, "sunqe");
@@ -708,7 +707,7 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
708 707
709static u32 qe_get_link(struct net_device *dev) 708static u32 qe_get_link(struct net_device *dev)
710{ 709{
711 struct sunqe *qep = dev->priv; 710 struct sunqe *qep = netdev_priv(dev);
712 void __iomem *mregs = qep->mregs; 711 void __iomem *mregs = qep->mregs;
713 u8 phyconfig; 712 u8 phyconfig;
714 713
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index a720065553df..233f1cda36e5 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -1149,7 +1149,6 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
1149 struct vnet *vp; 1149 struct vnet *vp;
1150 const u64 *rmac; 1150 const u64 *rmac;
1151 int len, i, err, switch_port; 1151 int len, i, err, switch_port;
1152 DECLARE_MAC_BUF(mac);
1153 1152
1154 print_version(); 1153 print_version();
1155 1154
@@ -1214,8 +1213,8 @@ static int __devinit vnet_port_probe(struct vio_dev *vdev,
1214 1213
1215 dev_set_drvdata(&vdev->dev, port); 1214 dev_set_drvdata(&vdev->dev, port);
1216 1215
1217 printk(KERN_INFO "%s: PORT ( remote-mac %s%s )\n", 1216 printk(KERN_INFO "%s: PORT ( remote-mac %pM%s )\n",
1218 vp->dev->name, print_mac(mac, port->raddr), 1217 vp->dev->name, port->raddr,
1219 switch_port ? " switch-port" : ""); 1218 switch_port ? " switch-port" : "");
1220 1219
1221 vio_port_up(&port->vio); 1220 vio_port_up(&port->vio);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index df20cafff7dd..bcd0e60cbda9 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -37,6 +37,7 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
37#include <linux/interrupt.h> 37#include <linux/interrupt.h>
38#include <linux/ioport.h> 38#include <linux/ioport.h>
39#include <linux/in.h> 39#include <linux/in.h>
40#include <linux/if_vlan.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
41#include <linux/string.h> 42#include <linux/string.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
@@ -236,7 +237,7 @@ struct tc35815_regs {
236#define Rx_Halted 0x00008000 /* Rx Halted */ 237#define Rx_Halted 0x00008000 /* Rx Halted */
237#define Rx_Good 0x00004000 /* Rx Good */ 238#define Rx_Good 0x00004000 /* Rx Good */
238#define Rx_RxPar 0x00002000 /* Rx Parity Error */ 239#define Rx_RxPar 0x00002000 /* Rx Parity Error */
239 /* 0x00001000 not use */ 240#define Rx_TypePkt 0x00001000 /* Rx Type Packet */
240#define Rx_LongErr 0x00000800 /* Rx Long Error */ 241#define Rx_LongErr 0x00000800 /* Rx Long Error */
241#define Rx_Over 0x00000400 /* Rx Overflow */ 242#define Rx_Over 0x00000400 /* Rx Overflow */
242#define Rx_CRCErr 0x00000200 /* Rx CRC Error */ 243#define Rx_CRCErr 0x00000200 /* Rx CRC Error */
@@ -244,8 +245,9 @@ struct tc35815_regs {
244#define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */ 245#define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */
245#define Rx_IntRx 0x00000040 /* Rx Interrupt */ 246#define Rx_IntRx 0x00000040 /* Rx Interrupt */
246#define Rx_CtlRecd 0x00000020 /* Rx Control Receive */ 247#define Rx_CtlRecd 0x00000020 /* Rx Control Receive */
248#define Rx_InLenErr 0x00000010 /* Rx In Range Frame Length Error */
247 249
248#define Rx_Stat_Mask 0x0000EFC0 /* Rx All Status Mask */ 250#define Rx_Stat_Mask 0x0000FFF0 /* Rx All Status Mask */
249 251
250/* Int_En bit asign -------------------------------------------------------- */ 252/* Int_En bit asign -------------------------------------------------------- */
251#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */ 253#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */
@@ -340,7 +342,7 @@ struct BDesc {
340 Tx_En) /* maybe 0x7b01 */ 342 Tx_En) /* maybe 0x7b01 */
341#endif 343#endif
342#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \ 344#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
343 | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */ 345 | Rx_EnCRCErr | Rx_EnAlign | Rx_StripCRC | Rx_RxEn) /* maybe 0x6f11 */
344#define INT_EN_CMD (Int_NRAbtEn | \ 346#define INT_EN_CMD (Int_NRAbtEn | \
345 Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \ 347 Int_DmParErrEn | Int_DParDEn | Int_DParErrEn | \
346 Int_SSysErrEn | Int_RMasAbtEn | Int_RTargAbtEn | \ 348 Int_SSysErrEn | Int_RMasAbtEn | Int_RTargAbtEn | \
@@ -372,9 +374,11 @@ struct BDesc {
372#if RX_CTL_CMD & Rx_LongEn 374#if RX_CTL_CMD & Rx_LongEn
373#define RX_BUF_SIZE PAGE_SIZE 375#define RX_BUF_SIZE PAGE_SIZE
374#elif RX_CTL_CMD & Rx_StripCRC 376#elif RX_CTL_CMD & Rx_StripCRC
375#define RX_BUF_SIZE ALIGN(ETH_FRAME_LEN + 4 + 2, 32) /* +2: reserve */ 377#define RX_BUF_SIZE \
378 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + NET_IP_ALIGN)
376#else 379#else
377#define RX_BUF_SIZE ALIGN(ETH_FRAME_LEN + 2, 32) /* +2: reserve */ 380#define RX_BUF_SIZE \
381 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
378#endif 382#endif
379#endif /* TC35815_USE_PACKEDBUFFER */ 383#endif /* TC35815_USE_PACKEDBUFFER */
380#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */ 384#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */
@@ -865,7 +869,6 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
865 struct net_device *dev; 869 struct net_device *dev;
866 struct tc35815_local *lp; 870 struct tc35815_local *lp;
867 int rc; 871 int rc;
868 DECLARE_MAC_BUF(mac);
869 872
870 static int printed_version; 873 static int printed_version;
871 if (!printed_version++) { 874 if (!printed_version++) {
@@ -942,11 +945,11 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
942 goto err_out; 945 goto err_out;
943 946
944 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 947 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
945 printk(KERN_INFO "%s: %s at 0x%lx, %s, IRQ %d\n", 948 printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
946 dev->name, 949 dev->name,
947 chip_info[ent->driver_data].name, 950 chip_info[ent->driver_data].name,
948 dev->base_addr, 951 dev->base_addr,
949 print_mac(mac, dev->dev_addr), 952 dev->dev_addr,
950 dev->irq); 953 dev->irq);
951 954
952 rc = tc_mii_init(dev); 955 rc = tc_mii_init(dev);
@@ -1288,12 +1291,9 @@ panic_queues(struct net_device *dev)
1288 1291
1289static void print_eth(const u8 *add) 1292static void print_eth(const u8 *add)
1290{ 1293{
1291 DECLARE_MAC_BUF(mac);
1292
1293 printk(KERN_DEBUG "print_eth(%p)\n", add); 1294 printk(KERN_DEBUG "print_eth(%p)\n", add);
1294 printk(KERN_DEBUG " %s =>", print_mac(mac, add + 6)); 1295 printk(KERN_DEBUG " %pM => %pM : %02x%02x\n",
1295 printk(KERN_CONT " %s : %02x%02x\n", 1296 add + 6, add, add[12], add[13]);
1296 print_mac(mac, add), add[12], add[13]);
1297} 1297}
1298 1298
1299static int tc35815_tx_full(struct net_device *dev) 1299static int tc35815_tx_full(struct net_device *dev)
@@ -1609,8 +1609,8 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1609 if (!(dmactl & DMA_IntMask)) { 1609 if (!(dmactl & DMA_IntMask)) {
1610 /* disable interrupts */ 1610 /* disable interrupts */
1611 tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl); 1611 tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
1612 if (netif_rx_schedule_prep(dev, &lp->napi)) 1612 if (netif_rx_schedule_prep(&lp->napi))
1613 __netif_rx_schedule(dev, &lp->napi); 1613 __netif_rx_schedule(&lp->napi);
1614 else { 1614 else {
1615 printk(KERN_ERR "%s: interrupt taken in poll\n", 1615 printk(KERN_ERR "%s: interrupt taken in poll\n",
1616 dev->name); 1616 dev->name);
@@ -1669,7 +1669,7 @@ tc35815_rx(struct net_device *dev)
1669 struct RxFD *next_rfd; 1669 struct RxFD *next_rfd;
1670#endif 1670#endif
1671#if (RX_CTL_CMD & Rx_StripCRC) == 0 1671#if (RX_CTL_CMD & Rx_StripCRC) == 0
1672 pkt_len -= 4; 1672 pkt_len -= ETH_FCS_LEN;
1673#endif 1673#endif
1674 1674
1675 if (netif_msg_rx_status(lp)) 1675 if (netif_msg_rx_status(lp))
@@ -1688,14 +1688,14 @@ tc35815_rx(struct net_device *dev)
1688#endif 1688#endif
1689#ifdef TC35815_USE_PACKEDBUFFER 1689#ifdef TC35815_USE_PACKEDBUFFER
1690 BUG_ON(bd_count > 2); 1690 BUG_ON(bd_count > 2);
1691 skb = dev_alloc_skb(pkt_len + 2); /* +2: for reserve */ 1691 skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
1692 if (skb == NULL) { 1692 if (skb == NULL) {
1693 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", 1693 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
1694 dev->name); 1694 dev->name);
1695 dev->stats.rx_dropped++; 1695 dev->stats.rx_dropped++;
1696 break; 1696 break;
1697 } 1697 }
1698 skb_reserve(skb, 2); /* 16 bit alignment */ 1698 skb_reserve(skb, NET_IP_ALIGN);
1699 1699
1700 data = skb_put(skb, pkt_len); 1700 data = skb_put(skb, pkt_len);
1701 1701
@@ -1747,8 +1747,9 @@ tc35815_rx(struct net_device *dev)
1747 pci_unmap_single(lp->pci_dev, 1747 pci_unmap_single(lp->pci_dev,
1748 lp->rx_skbs[cur_bd].skb_dma, 1748 lp->rx_skbs[cur_bd].skb_dma,
1749 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1749 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1750 if (!HAVE_DMA_RXALIGN(lp)) 1750 if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN)
1751 memmove(skb->data, skb->data - 2, pkt_len); 1751 memmove(skb->data, skb->data - NET_IP_ALIGN,
1752 pkt_len);
1752 data = skb_put(skb, pkt_len); 1753 data = skb_put(skb, pkt_len);
1753#endif /* TC35815_USE_PACKEDBUFFER */ 1754#endif /* TC35815_USE_PACKEDBUFFER */
1754 if (netif_msg_pktdata(lp)) 1755 if (netif_msg_pktdata(lp))
@@ -1760,7 +1761,6 @@ tc35815_rx(struct net_device *dev)
1760#else 1761#else
1761 netif_rx(skb); 1762 netif_rx(skb);
1762#endif 1763#endif
1763 dev->last_rx = jiffies;
1764 dev->stats.rx_packets++; 1764 dev->stats.rx_packets++;
1765 dev->stats.rx_bytes += pkt_len; 1765 dev->stats.rx_bytes += pkt_len;
1766 } else { 1766 } else {
@@ -1919,7 +1919,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1919 spin_unlock(&lp->lock); 1919 spin_unlock(&lp->lock);
1920 1920
1921 if (received < budget) { 1921 if (received < budget) {
1922 netif_rx_complete(dev, napi); 1922 netif_rx_complete(napi);
1923 /* enable interrupts */ 1923 /* enable interrupts */
1924 tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl); 1924 tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
1925 } 1925 }
@@ -2153,13 +2153,12 @@ static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned ch
2153 int cam_index = index * 6; 2153 int cam_index = index * 6;
2154 u32 cam_data; 2154 u32 cam_data;
2155 u32 saved_addr; 2155 u32 saved_addr;
2156 DECLARE_MAC_BUF(mac);
2157 2156
2158 saved_addr = tc_readl(&tr->CAM_Adr); 2157 saved_addr = tc_readl(&tr->CAM_Adr);
2159 2158
2160 if (netif_msg_hw(lp)) 2159 if (netif_msg_hw(lp))
2161 printk(KERN_DEBUG "%s: CAM %d: %s\n", 2160 printk(KERN_DEBUG "%s: CAM %d: %pM\n",
2162 dev->name, index, print_mac(mac, addr)); 2161 dev->name, index, addr);
2163 if (index & 1) { 2162 if (index & 1) {
2164 /* read modify write */ 2163 /* read modify write */
2165 tc_writel(cam_index - 2, &tr->CAM_Adr); 2164 tc_writel(cam_index - 2, &tr->CAM_Adr);
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 91f9054a1d95..a10a83a11d9f 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -251,7 +251,7 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
251static irqreturn_t bdx_isr_napi(int irq, void *dev) 251static irqreturn_t bdx_isr_napi(int irq, void *dev)
252{ 252{
253 struct net_device *ndev = dev; 253 struct net_device *ndev = dev;
254 struct bdx_priv *priv = ndev->priv; 254 struct bdx_priv *priv = netdev_priv(ndev);
255 u32 isr; 255 u32 isr;
256 256
257 ENTER; 257 ENTER;
@@ -265,8 +265,8 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev)
265 bdx_isr_extra(priv, isr); 265 bdx_isr_extra(priv, isr);
266 266
267 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) { 267 if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) {
268 if (likely(netif_rx_schedule_prep(ndev, &priv->napi))) { 268 if (likely(netif_rx_schedule_prep(&priv->napi))) {
269 __netif_rx_schedule(ndev, &priv->napi); 269 __netif_rx_schedule(&priv->napi);
270 RET(IRQ_HANDLED); 270 RET(IRQ_HANDLED);
271 } else { 271 } else {
272 /* NOTE: we get here if intr has slipped into window 272 /* NOTE: we get here if intr has slipped into window
@@ -289,7 +289,6 @@ static irqreturn_t bdx_isr_napi(int irq, void *dev)
289static int bdx_poll(struct napi_struct *napi, int budget) 289static int bdx_poll(struct napi_struct *napi, int budget)
290{ 290{
291 struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi); 291 struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi);
292 struct net_device *dev = priv->ndev;
293 int work_done; 292 int work_done;
294 293
295 ENTER; 294 ENTER;
@@ -303,7 +302,7 @@ static int bdx_poll(struct napi_struct *napi, int budget)
303 * device lock and allow waiting tasks (eg rmmod) to advance) */ 302 * device lock and allow waiting tasks (eg rmmod) to advance) */
304 priv->napi_stop = 0; 303 priv->napi_stop = 0;
305 304
306 netif_rx_complete(dev, napi); 305 netif_rx_complete(napi);
307 bdx_enable_interrupts(priv); 306 bdx_enable_interrupts(priv);
308 } 307 }
309 return work_done; 308 return work_done;
@@ -559,7 +558,7 @@ static int bdx_close(struct net_device *ndev)
559 struct bdx_priv *priv = NULL; 558 struct bdx_priv *priv = NULL;
560 559
561 ENTER; 560 ENTER;
562 priv = ndev->priv; 561 priv = netdev_priv(ndev);
563 562
564 napi_disable(&priv->napi); 563 napi_disable(&priv->napi);
565 564
@@ -588,7 +587,7 @@ static int bdx_open(struct net_device *ndev)
588 int rc; 587 int rc;
589 588
590 ENTER; 589 ENTER;
591 priv = ndev->priv; 590 priv = netdev_priv(ndev);
592 bdx_reset(priv); 591 bdx_reset(priv);
593 if (netif_running(ndev)) 592 if (netif_running(ndev))
594 netif_stop_queue(priv->ndev); 593 netif_stop_queue(priv->ndev);
@@ -633,7 +632,7 @@ static int bdx_range_check(struct bdx_priv *priv, u32 offset)
633 632
634static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) 633static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
635{ 634{
636 struct bdx_priv *priv = ndev->priv; 635 struct bdx_priv *priv = netdev_priv(ndev);
637 u32 data[3]; 636 u32 data[3];
638 int error; 637 int error;
639 638
@@ -698,7 +697,7 @@ static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
698 */ 697 */
699static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) 698static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
700{ 699{
701 struct bdx_priv *priv = ndev->priv; 700 struct bdx_priv *priv = netdev_priv(ndev);
702 u32 reg, bit, val; 701 u32 reg, bit, val;
703 702
704 ENTER; 703 ENTER;
@@ -748,7 +747,7 @@ static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
748static void 747static void
749bdx_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) 748bdx_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
750{ 749{
751 struct bdx_priv *priv = ndev->priv; 750 struct bdx_priv *priv = netdev_priv(ndev);
752 751
753 ENTER; 752 ENTER;
754 DBG("device='%s', group='%p'\n", ndev->name, grp); 753 DBG("device='%s', group='%p'\n", ndev->name, grp);
@@ -787,7 +786,7 @@ static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
787 786
788static void bdx_setmulti(struct net_device *ndev) 787static void bdx_setmulti(struct net_device *ndev)
789{ 788{
790 struct bdx_priv *priv = ndev->priv; 789 struct bdx_priv *priv = netdev_priv(ndev);
791 790
792 u32 rxf_val = 791 u32 rxf_val =
793 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN; 792 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB | GMAC_RX_FILTER_OSEN;
@@ -847,7 +846,7 @@ static void bdx_setmulti(struct net_device *ndev)
847 846
848static int bdx_set_mac(struct net_device *ndev, void *p) 847static int bdx_set_mac(struct net_device *ndev, void *p)
849{ 848{
850 struct bdx_priv *priv = ndev->priv; 849 struct bdx_priv *priv = netdev_priv(ndev);
851 struct sockaddr *addr = p; 850 struct sockaddr *addr = p;
852 851
853 ENTER; 852 ENTER;
@@ -929,7 +928,7 @@ static void bdx_update_stats(struct bdx_priv *priv)
929 928
930static struct net_device_stats *bdx_get_stats(struct net_device *ndev) 929static struct net_device_stats *bdx_get_stats(struct net_device *ndev)
931{ 930{
932 struct bdx_priv *priv = ndev->priv; 931 struct bdx_priv *priv = netdev_priv(ndev);
933 struct net_device_stats *net_stat = &priv->net_stats; 932 struct net_device_stats *net_stat = &priv->net_stats;
934 return net_stat; 933 return net_stat;
935} 934}
@@ -1237,7 +1236,6 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1237 ENTER; 1236 ENTER;
1238 max_done = budget; 1237 max_done = budget;
1239 1238
1240 priv->ndev->last_rx = jiffies;
1241 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR; 1239 f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_WR_PTR;
1242 1240
1243 size = f->m.wptr - f->m.rptr; 1241 size = f->m.wptr - f->m.rptr;
@@ -1624,7 +1622,7 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
1624 */ 1622 */
1625static int bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev) 1623static int bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev)
1626{ 1624{
1627 struct bdx_priv *priv = ndev->priv; 1625 struct bdx_priv *priv = netdev_priv(ndev);
1628 struct txd_fifo *f = &priv->txd_fifo0; 1626 struct txd_fifo *f = &priv->txd_fifo0;
1629 int txd_checksum = 7; /* full checksum */ 1627 int txd_checksum = 7; /* full checksum */
1630 int txd_lgsnd = 0; 1628 int txd_lgsnd = 0;
@@ -1886,6 +1884,21 @@ static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1886 RET(); 1884 RET();
1887} 1885}
1888 1886
1887static const struct net_device_ops bdx_netdev_ops = {
1888 .ndo_open = bdx_open,
1889 .ndo_stop = bdx_close,
1890 .ndo_start_xmit = bdx_tx_transmit,
1891 .ndo_validate_addr = eth_validate_addr,
1892 .ndo_do_ioctl = bdx_ioctl,
1893 .ndo_set_multicast_list = bdx_setmulti,
1894 .ndo_get_stats = bdx_get_stats,
1895 .ndo_change_mtu = bdx_change_mtu,
1896 .ndo_set_mac_address = bdx_set_mac,
1897 .ndo_vlan_rx_register = bdx_vlan_rx_register,
1898 .ndo_vlan_rx_add_vid = bdx_vlan_rx_add_vid,
1899 .ndo_vlan_rx_kill_vid = bdx_vlan_rx_kill_vid,
1900};
1901
1889/** 1902/**
1890 * bdx_probe - Device Initialization Routine 1903 * bdx_probe - Device Initialization Routine
1891 * @pdev: PCI device information struct 1904 * @pdev: PCI device information struct
@@ -1995,18 +2008,8 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1995 goto err_out_iomap; 2008 goto err_out_iomap;
1996 } 2009 }
1997 2010
1998 ndev->open = bdx_open; 2011 ndev->netdev_ops = &bdx_netdev_ops;
1999 ndev->stop = bdx_close;
2000 ndev->hard_start_xmit = bdx_tx_transmit;
2001 ndev->do_ioctl = bdx_ioctl;
2002 ndev->set_multicast_list = bdx_setmulti;
2003 ndev->get_stats = bdx_get_stats;
2004 ndev->change_mtu = bdx_change_mtu;
2005 ndev->set_mac_address = bdx_set_mac;
2006 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN; 2012 ndev->tx_queue_len = BDX_NDEV_TXQ_LEN;
2007 ndev->vlan_rx_register = bdx_vlan_rx_register;
2008 ndev->vlan_rx_add_vid = bdx_vlan_rx_add_vid;
2009 ndev->vlan_rx_kill_vid = bdx_vlan_rx_kill_vid;
2010 2013
2011 bdx_ethtool_ops(ndev); /* ethtool interface */ 2014 bdx_ethtool_ops(ndev); /* ethtool interface */
2012 2015
@@ -2027,7 +2030,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2027 ndev->features |= NETIF_F_HIGHDMA; 2030 ndev->features |= NETIF_F_HIGHDMA;
2028 2031
2029 /************** priv ****************/ 2032 /************** priv ****************/
2030 priv = nic->priv[port] = ndev->priv; 2033 priv = nic->priv[port] = netdev_priv(ndev);
2031 2034
2032 memset(priv, 0, sizeof(struct bdx_priv)); 2035 memset(priv, 0, sizeof(struct bdx_priv));
2033 priv->pBdxRegs = nic->regs + port * 0x8000; 2036 priv->pBdxRegs = nic->regs + port * 0x8000;
@@ -2150,7 +2153,7 @@ static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
2150{ 2153{
2151 u32 rdintcm; 2154 u32 rdintcm;
2152 u32 tdintcm; 2155 u32 tdintcm;
2153 struct bdx_priv *priv = netdev->priv; 2156 struct bdx_priv *priv = netdev_priv(netdev);
2154 2157
2155 rdintcm = priv->rdintcm; 2158 rdintcm = priv->rdintcm;
2156 tdintcm = priv->tdintcm; 2159 tdintcm = priv->tdintcm;
@@ -2181,7 +2184,7 @@ static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
2181static void 2184static void
2182bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) 2185bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
2183{ 2186{
2184 struct bdx_priv *priv = netdev->priv; 2187 struct bdx_priv *priv = netdev_priv(netdev);
2185 2188
2186 strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver)); 2189 strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
2187 strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version)); 2190 strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
@@ -2223,7 +2226,7 @@ bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2223{ 2226{
2224 u32 rdintcm; 2227 u32 rdintcm;
2225 u32 tdintcm; 2228 u32 tdintcm;
2226 struct bdx_priv *priv = netdev->priv; 2229 struct bdx_priv *priv = netdev_priv(netdev);
2227 2230
2228 rdintcm = priv->rdintcm; 2231 rdintcm = priv->rdintcm;
2229 tdintcm = priv->tdintcm; 2232 tdintcm = priv->tdintcm;
@@ -2252,7 +2255,7 @@ bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
2252{ 2255{
2253 u32 rdintcm; 2256 u32 rdintcm;
2254 u32 tdintcm; 2257 u32 tdintcm;
2255 struct bdx_priv *priv = netdev->priv; 2258 struct bdx_priv *priv = netdev_priv(netdev);
2256 int rx_coal; 2259 int rx_coal;
2257 int tx_coal; 2260 int tx_coal;
2258 int rx_max_coal; 2261 int rx_max_coal;
@@ -2310,7 +2313,7 @@ static inline int bdx_tx_fifo_size_to_packets(int tx_size)
2310static void 2313static void
2311bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) 2314bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2312{ 2315{
2313 struct bdx_priv *priv = netdev->priv; 2316 struct bdx_priv *priv = netdev_priv(netdev);
2314 2317
2315 /*max_pending - the maximum-sized FIFO we allow */ 2318 /*max_pending - the maximum-sized FIFO we allow */
2316 ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3); 2319 ring->rx_max_pending = bdx_rx_fifo_size_to_packets(3);
@@ -2327,7 +2330,7 @@ bdx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2327static int 2330static int
2328bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) 2331bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2329{ 2332{
2330 struct bdx_priv *priv = netdev->priv; 2333 struct bdx_priv *priv = netdev_priv(netdev);
2331 int rx_size = 0; 2334 int rx_size = 0;
2332 int tx_size = 0; 2335 int tx_size = 0;
2333 2336
@@ -2388,7 +2391,7 @@ static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2388 */ 2391 */
2389static int bdx_get_stats_count(struct net_device *netdev) 2392static int bdx_get_stats_count(struct net_device *netdev)
2390{ 2393{
2391 struct bdx_priv *priv = netdev->priv; 2394 struct bdx_priv *priv = netdev_priv(netdev);
2392 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names) 2395 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2393 != sizeof(struct bdx_stats) / sizeof(u64)); 2396 != sizeof(struct bdx_stats) / sizeof(u64));
2394 return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0); 2397 return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
@@ -2403,7 +2406,7 @@ static int bdx_get_stats_count(struct net_device *netdev)
2403static void bdx_get_ethtool_stats(struct net_device *netdev, 2406static void bdx_get_ethtool_stats(struct net_device *netdev,
2404 struct ethtool_stats *stats, u64 *data) 2407 struct ethtool_stats *stats, u64 *data)
2405{ 2408{
2406 struct bdx_priv *priv = netdev->priv; 2409 struct bdx_priv *priv = netdev_priv(netdev);
2407 2410
2408 if (priv->stats_flag) { 2411 if (priv->stats_flag) {
2409 2412
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index eb9f8f3638e1..04ae1e86aeaa 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -54,20 +54,21 @@
54#include <asm/prom.h> 54#include <asm/prom.h>
55#endif 55#endif
56 56
57#define BAR_0 0
58#define BAR_2 2
59
57#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 60#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
58#define TG3_VLAN_TAG_USED 1 61#define TG3_VLAN_TAG_USED 1
59#else 62#else
60#define TG3_VLAN_TAG_USED 0 63#define TG3_VLAN_TAG_USED 0
61#endif 64#endif
62 65
63#define TG3_TSO_SUPPORT 1
64
65#include "tg3.h" 66#include "tg3.h"
66 67
67#define DRV_MODULE_NAME "tg3" 68#define DRV_MODULE_NAME "tg3"
68#define PFX DRV_MODULE_NAME ": " 69#define PFX DRV_MODULE_NAME ": "
69#define DRV_MODULE_VERSION "3.94" 70#define DRV_MODULE_VERSION "3.97"
70#define DRV_MODULE_RELDATE "August 14, 2008" 71#define DRV_MODULE_RELDATE "December 10, 2008"
71 72
72#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
73#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -129,6 +130,8 @@
129/* minimum number of free TX descriptors required to wake up TX process */ 130/* minimum number of free TX descriptors required to wake up TX process */
130#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4) 131#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
131 132
133#define TG3_RAW_IP_ALIGN 2
134
132/* number of ETHTOOL_GSTATS u64's */ 135/* number of ETHTOOL_GSTATS u64's */
133#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64)) 136#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
134 137
@@ -205,7 +208,13 @@ static struct pci_device_id tg3_pci_tbl[] = {
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)}, 208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)}, 209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)}, 210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)}, 213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57720)},
209 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 218 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
210 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 219 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 220 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -872,13 +881,48 @@ static int tg3_mdio_reset(struct mii_bus *bp)
872 return 0; 881 return 0;
873} 882}
874 883
875static void tg3_mdio_config(struct tg3 *tp) 884static void tg3_mdio_config_5785(struct tg3 *tp)
876{ 885{
877 u32 val; 886 u32 val;
887 struct phy_device *phydev;
878 888
879 if (tp->mdio_bus->phy_map[PHY_ADDR]->interface != 889 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
880 PHY_INTERFACE_MODE_RGMII) 890 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
891 case TG3_PHY_ID_BCM50610:
892 val = MAC_PHYCFG2_50610_LED_MODES;
893 break;
894 case TG3_PHY_ID_BCMAC131:
895 val = MAC_PHYCFG2_AC131_LED_MODES;
896 break;
897 case TG3_PHY_ID_RTL8211C:
898 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
899 break;
900 case TG3_PHY_ID_RTL8201E:
901 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
902 break;
903 default:
881 return; 904 return;
905 }
906
907 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
908 tw32(MAC_PHYCFG2, val);
909
910 val = tr32(MAC_PHYCFG1);
911 val &= ~MAC_PHYCFG1_RGMII_INT;
912 tw32(MAC_PHYCFG1, val);
913
914 return;
915 }
916
917 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
918 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
919 MAC_PHYCFG2_FMODE_MASK_MASK |
920 MAC_PHYCFG2_GMODE_MASK_MASK |
921 MAC_PHYCFG2_ACT_MASK_MASK |
922 MAC_PHYCFG2_QUAL_MASK_MASK |
923 MAC_PHYCFG2_INBAND_ENABLE;
924
925 tw32(MAC_PHYCFG2, val);
882 926
883 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC | 927 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
884 MAC_PHYCFG1_RGMII_SND_STAT_EN); 928 MAC_PHYCFG1_RGMII_SND_STAT_EN);
@@ -890,11 +934,6 @@ static void tg3_mdio_config(struct tg3 *tp)
890 } 934 }
891 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV); 935 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
892 936
893 val = tr32(MAC_PHYCFG2) & ~(MAC_PHYCFG2_INBAND_ENABLE);
894 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
895 val |= MAC_PHYCFG2_INBAND_ENABLE;
896 tw32(MAC_PHYCFG2, val);
897
898 val = tr32(MAC_EXT_RGMII_MODE); 937 val = tr32(MAC_EXT_RGMII_MODE);
899 val &= ~(MAC_RGMII_MODE_RX_INT_B | 938 val &= ~(MAC_RGMII_MODE_RX_INT_B |
900 MAC_RGMII_MODE_RX_QUALITY | 939 MAC_RGMII_MODE_RX_QUALITY |
@@ -903,7 +942,7 @@ static void tg3_mdio_config(struct tg3 *tp)
903 MAC_RGMII_MODE_TX_ENABLE | 942 MAC_RGMII_MODE_TX_ENABLE |
904 MAC_RGMII_MODE_TX_LOWPWR | 943 MAC_RGMII_MODE_TX_LOWPWR |
905 MAC_RGMII_MODE_TX_RESET); 944 MAC_RGMII_MODE_TX_RESET);
906 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) { 945 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
907 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 946 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
908 val |= MAC_RGMII_MODE_RX_INT_B | 947 val |= MAC_RGMII_MODE_RX_INT_B |
909 MAC_RGMII_MODE_RX_QUALITY | 948 MAC_RGMII_MODE_RX_QUALITY |
@@ -929,8 +968,9 @@ static void tg3_mdio_start(struct tg3 *tp)
929 tw32_f(MAC_MI_MODE, tp->mi_mode); 968 tw32_f(MAC_MI_MODE, tp->mi_mode);
930 udelay(80); 969 udelay(80);
931 970
932 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) 971 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
933 tg3_mdio_config(tp); 972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
973 tg3_mdio_config_5785(tp);
934} 974}
935 975
936static void tg3_mdio_stop(struct tg3 *tp) 976static void tg3_mdio_stop(struct tg3 *tp)
@@ -984,29 +1024,44 @@ static int tg3_mdio_init(struct tg3 *tp)
984 if (i) { 1024 if (i) {
985 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n", 1025 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
986 tp->dev->name, i); 1026 tp->dev->name, i);
1027 mdiobus_free(tp->mdio_bus);
987 return i; 1028 return i;
988 } 1029 }
989 1030
990 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
991
992 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1031 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
993 1032
994 switch (phydev->phy_id) { 1033 if (!phydev || !phydev->drv) {
1034 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1035 mdiobus_unregister(tp->mdio_bus);
1036 mdiobus_free(tp->mdio_bus);
1037 return -ENODEV;
1038 }
1039
1040 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1041 case TG3_PHY_ID_BCM57780:
1042 phydev->interface = PHY_INTERFACE_MODE_GMII;
1043 break;
995 case TG3_PHY_ID_BCM50610: 1044 case TG3_PHY_ID_BCM50610:
996 phydev->interface = PHY_INTERFACE_MODE_RGMII;
997 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) 1045 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
998 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1046 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
999 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1047 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1000 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; 1048 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1001 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN) 1049 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1002 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; 1050 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1051 /* fallthru */
1052 case TG3_PHY_ID_RTL8211C:
1053 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1003 break; 1054 break;
1055 case TG3_PHY_ID_RTL8201E:
1004 case TG3_PHY_ID_BCMAC131: 1056 case TG3_PHY_ID_BCMAC131:
1005 phydev->interface = PHY_INTERFACE_MODE_MII; 1057 phydev->interface = PHY_INTERFACE_MODE_MII;
1006 break; 1058 break;
1007 } 1059 }
1008 1060
1009 tg3_mdio_config(tp); 1061 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1062
1063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1064 tg3_mdio_config_5785(tp);
1010 1065
1011 return 0; 1066 return 0;
1012} 1067}
@@ -1130,9 +1185,9 @@ static void tg3_link_report(struct tg3 *tp)
1130 printk(KERN_INFO PFX 1185 printk(KERN_INFO PFX
1131 "%s: Flow control is %s for TX and %s for RX.\n", 1186 "%s: Flow control is %s for TX and %s for RX.\n",
1132 tp->dev->name, 1187 tp->dev->name,
1133 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ? 1188 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1134 "on" : "off", 1189 "on" : "off",
1135 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ? 1190 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1136 "on" : "off"); 1191 "on" : "off");
1137 tg3_ump_link_report(tp); 1192 tg3_ump_link_report(tp);
1138 } 1193 }
@@ -1142,11 +1197,11 @@ static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1142{ 1197{
1143 u16 miireg; 1198 u16 miireg;
1144 1199
1145 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX)) 1200 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1146 miireg = ADVERTISE_PAUSE_CAP; 1201 miireg = ADVERTISE_PAUSE_CAP;
1147 else if (flow_ctrl & TG3_FLOW_CTRL_TX) 1202 else if (flow_ctrl & FLOW_CTRL_TX)
1148 miireg = ADVERTISE_PAUSE_ASYM; 1203 miireg = ADVERTISE_PAUSE_ASYM;
1149 else if (flow_ctrl & TG3_FLOW_CTRL_RX) 1204 else if (flow_ctrl & FLOW_CTRL_RX)
1150 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1205 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1151 else 1206 else
1152 miireg = 0; 1207 miireg = 0;
@@ -1158,11 +1213,11 @@ static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1158{ 1213{
1159 u16 miireg; 1214 u16 miireg;
1160 1215
1161 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX)) 1216 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1162 miireg = ADVERTISE_1000XPAUSE; 1217 miireg = ADVERTISE_1000XPAUSE;
1163 else if (flow_ctrl & TG3_FLOW_CTRL_TX) 1218 else if (flow_ctrl & FLOW_CTRL_TX)
1164 miireg = ADVERTISE_1000XPSE_ASYM; 1219 miireg = ADVERTISE_1000XPSE_ASYM;
1165 else if (flow_ctrl & TG3_FLOW_CTRL_RX) 1220 else if (flow_ctrl & FLOW_CTRL_RX)
1166 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM; 1221 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1167 else 1222 else
1168 miireg = 0; 1223 miireg = 0;
@@ -1170,28 +1225,6 @@ static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1170 return miireg; 1225 return miireg;
1171} 1226}
1172 1227
1173static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1174{
1175 u8 cap = 0;
1176
1177 if (lcladv & ADVERTISE_PAUSE_CAP) {
1178 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1179 if (rmtadv & LPA_PAUSE_CAP)
1180 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1181 else if (rmtadv & LPA_PAUSE_ASYM)
1182 cap = TG3_FLOW_CTRL_RX;
1183 } else {
1184 if (rmtadv & LPA_PAUSE_CAP)
1185 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1186 }
1187 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1188 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1189 cap = TG3_FLOW_CTRL_TX;
1190 }
1191
1192 return cap;
1193}
1194
1195static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv) 1228static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1196{ 1229{
1197 u8 cap = 0; 1230 u8 cap = 0;
@@ -1199,16 +1232,16 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1199 if (lcladv & ADVERTISE_1000XPAUSE) { 1232 if (lcladv & ADVERTISE_1000XPAUSE) {
1200 if (lcladv & ADVERTISE_1000XPSE_ASYM) { 1233 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1201 if (rmtadv & LPA_1000XPAUSE) 1234 if (rmtadv & LPA_1000XPAUSE)
1202 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX; 1235 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1203 else if (rmtadv & LPA_1000XPAUSE_ASYM) 1236 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1204 cap = TG3_FLOW_CTRL_RX; 1237 cap = FLOW_CTRL_RX;
1205 } else { 1238 } else {
1206 if (rmtadv & LPA_1000XPAUSE) 1239 if (rmtadv & LPA_1000XPAUSE)
1207 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX; 1240 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1208 } 1241 }
1209 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) { 1242 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1210 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM)) 1243 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1211 cap = TG3_FLOW_CTRL_TX; 1244 cap = FLOW_CTRL_TX;
1212 } 1245 }
1213 1246
1214 return cap; 1247 return cap;
@@ -1231,13 +1264,13 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1231 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) 1264 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1232 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv); 1265 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1233 else 1266 else
1234 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv); 1267 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1235 } else 1268 } else
1236 flowctrl = tp->link_config.flowctrl; 1269 flowctrl = tp->link_config.flowctrl;
1237 1270
1238 tp->link_config.active_flowctrl = flowctrl; 1271 tp->link_config.active_flowctrl = flowctrl;
1239 1272
1240 if (flowctrl & TG3_FLOW_CTRL_RX) 1273 if (flowctrl & FLOW_CTRL_RX)
1241 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE; 1274 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1242 else 1275 else
1243 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE; 1276 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
@@ -1245,7 +1278,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1245 if (old_rx_mode != tp->rx_mode) 1278 if (old_rx_mode != tp->rx_mode)
1246 tw32_f(MAC_RX_MODE, tp->rx_mode); 1279 tw32_f(MAC_RX_MODE, tp->rx_mode);
1247 1280
1248 if (flowctrl & TG3_FLOW_CTRL_TX) 1281 if (flowctrl & FLOW_CTRL_TX)
1249 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE; 1282 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1250 else 1283 else
1251 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE; 1284 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
@@ -1299,6 +1332,15 @@ static void tg3_adjust_link(struct net_device *dev)
1299 udelay(40); 1332 udelay(40);
1300 } 1333 }
1301 1334
1335 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1336 if (phydev->speed == SPEED_10)
1337 tw32(MAC_MI_STAT,
1338 MAC_MI_STAT_10MBPS_MODE |
1339 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1340 else
1341 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1342 }
1343
1302 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF) 1344 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1303 tw32(MAC_TX_LENGTHS, 1345 tw32(MAC_TX_LENGTHS,
1304 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) | 1346 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
@@ -1339,25 +1381,37 @@ static int tg3_phy_init(struct tg3 *tp)
1339 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1381 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1340 1382
1341 /* Attach the MAC to the PHY. */ 1383 /* Attach the MAC to the PHY. */
1342 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link, 1384 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1343 phydev->dev_flags, phydev->interface); 1385 phydev->dev_flags, phydev->interface);
1344 if (IS_ERR(phydev)) { 1386 if (IS_ERR(phydev)) {
1345 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name); 1387 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1346 return PTR_ERR(phydev); 1388 return PTR_ERR(phydev);
1347 } 1389 }
1348 1390
1349 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1350
1351 /* Mask with MAC supported features. */ 1391 /* Mask with MAC supported features. */
1352 phydev->supported &= (PHY_GBIT_FEATURES | 1392 switch (phydev->interface) {
1353 SUPPORTED_Pause | 1393 case PHY_INTERFACE_MODE_GMII:
1354 SUPPORTED_Asym_Pause); 1394 case PHY_INTERFACE_MODE_RGMII:
1395 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1396 phydev->supported &= (PHY_GBIT_FEATURES |
1397 SUPPORTED_Pause |
1398 SUPPORTED_Asym_Pause);
1399 break;
1400 }
1401 /* fallthru */
1402 case PHY_INTERFACE_MODE_MII:
1403 phydev->supported &= (PHY_BASIC_FEATURES |
1404 SUPPORTED_Pause |
1405 SUPPORTED_Asym_Pause);
1406 break;
1407 default:
1408 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1409 return -EINVAL;
1410 }
1355 1411
1356 phydev->advertising = phydev->supported; 1412 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1357 1413
1358 printk(KERN_INFO 1414 phydev->advertising = phydev->supported;
1359 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
1360 tp->dev->name, phydev->drv->name, phydev->dev.bus_id);
1361 1415
1362 return 0; 1416 return 0;
1363} 1417}
@@ -1406,6 +1460,34 @@ static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1406 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val); 1460 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1407} 1461}
1408 1462
1463static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1464{
1465 u32 reg;
1466
1467 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1468 return;
1469
1470 reg = MII_TG3_MISC_SHDW_WREN |
1471 MII_TG3_MISC_SHDW_SCR5_SEL |
1472 MII_TG3_MISC_SHDW_SCR5_LPED |
1473 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1474 MII_TG3_MISC_SHDW_SCR5_SDTL |
1475 MII_TG3_MISC_SHDW_SCR5_C125OE;
1476 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1477 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1478
1479 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1480
1481
1482 reg = MII_TG3_MISC_SHDW_WREN |
1483 MII_TG3_MISC_SHDW_APD_SEL |
1484 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1485 if (enable)
1486 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1487
1488 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1489}
1490
1409static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable) 1491static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1410{ 1492{
1411 u32 phy; 1493 u32 phy;
@@ -1737,7 +1819,8 @@ static int tg3_phy_reset(struct tg3 *tp)
1737 tw32(TG3_CPMU_CTRL, cpmuctrl); 1819 tw32(TG3_CPMU_CTRL, cpmuctrl);
1738 } 1820 }
1739 1821
1740 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) { 1822 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1823 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1741 u32 val; 1824 u32 val;
1742 1825
1743 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 1826 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
@@ -1747,16 +1830,15 @@ static int tg3_phy_reset(struct tg3 *tp)
1747 udelay(40); 1830 udelay(40);
1748 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val); 1831 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1749 } 1832 }
1750
1751 /* Disable GPHY autopowerdown. */
1752 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1753 MII_TG3_MISC_SHDW_WREN |
1754 MII_TG3_MISC_SHDW_APD_SEL |
1755 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1756 } 1833 }
1757 1834
1758 tg3_phy_apply_otp(tp); 1835 tg3_phy_apply_otp(tp);
1759 1836
1837 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1838 tg3_phy_toggle_apd(tp, true);
1839 else
1840 tg3_phy_toggle_apd(tp, false);
1841
1760out: 1842out:
1761 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) { 1843 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1762 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 1844 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
@@ -1961,7 +2043,7 @@ static int tg3_halt_cpu(struct tg3 *, u32);
1961static int tg3_nvram_lock(struct tg3 *); 2043static int tg3_nvram_lock(struct tg3 *);
1962static void tg3_nvram_unlock(struct tg3 *); 2044static void tg3_nvram_unlock(struct tg3 *);
1963 2045
1964static void tg3_power_down_phy(struct tg3 *tp) 2046static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
1965{ 2047{
1966 u32 val; 2048 u32 val;
1967 2049
@@ -1984,10 +2066,15 @@ static void tg3_power_down_phy(struct tg3 *tp)
1984 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 2066 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1985 udelay(40); 2067 udelay(40);
1986 return; 2068 return;
1987 } else if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { 2069 } else if (do_low_power) {
1988 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2070 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1989 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 2071 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1990 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2); 2072
2073 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2074 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2075 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2076 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2077 MII_TG3_AUXCTL_PCTL_VREG_11V);
1991 } 2078 }
1992 2079
1993 /* The PHY should not be powered down on some chips because 2080 /* The PHY should not be powered down on some chips because
@@ -1999,7 +2086,8 @@ static void tg3_power_down_phy(struct tg3 *tp)
1999 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))) 2086 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2000 return; 2087 return;
2001 2088
2002 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) { 2089 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2090 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2003 val = tr32(TG3_CPMU_LSPD_1000MB_CLK); 2091 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2004 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK; 2092 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2005 val |= CPMU_LSPD_1000MB_MACCLK_12_5; 2093 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
@@ -2009,9 +2097,47 @@ static void tg3_power_down_phy(struct tg3 *tp)
2009 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN); 2097 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2010} 2098}
2011 2099
2100/* tp->lock is held. */
2101static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2102{
2103 u32 addr_high, addr_low;
2104 int i;
2105
2106 addr_high = ((tp->dev->dev_addr[0] << 8) |
2107 tp->dev->dev_addr[1]);
2108 addr_low = ((tp->dev->dev_addr[2] << 24) |
2109 (tp->dev->dev_addr[3] << 16) |
2110 (tp->dev->dev_addr[4] << 8) |
2111 (tp->dev->dev_addr[5] << 0));
2112 for (i = 0; i < 4; i++) {
2113 if (i == 1 && skip_mac_1)
2114 continue;
2115 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2116 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2117 }
2118
2119 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2120 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2121 for (i = 0; i < 12; i++) {
2122 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2123 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2124 }
2125 }
2126
2127 addr_high = (tp->dev->dev_addr[0] +
2128 tp->dev->dev_addr[1] +
2129 tp->dev->dev_addr[2] +
2130 tp->dev->dev_addr[3] +
2131 tp->dev->dev_addr[4] +
2132 tp->dev->dev_addr[5]) &
2133 TX_BACKOFF_SEED_MASK;
2134 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2135}
2136
2012static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) 2137static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2013{ 2138{
2014 u32 misc_host_ctrl; 2139 u32 misc_host_ctrl;
2140 bool device_should_wake, do_low_power;
2015 2141
2016 /* Make sure register accesses (indirect or otherwise) 2142 /* Make sure register accesses (indirect or otherwise)
2017 * will function correctly. 2143 * will function correctly.
@@ -2041,15 +2167,34 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2041 tp->dev->name, state); 2167 tp->dev->name, state);
2042 return -EINVAL; 2168 return -EINVAL;
2043 } 2169 }
2170
2171 /* Restore the CLKREQ setting. */
2172 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2173 u16 lnkctl;
2174
2175 pci_read_config_word(tp->pdev,
2176 tp->pcie_cap + PCI_EXP_LNKCTL,
2177 &lnkctl);
2178 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2179 pci_write_config_word(tp->pdev,
2180 tp->pcie_cap + PCI_EXP_LNKCTL,
2181 lnkctl);
2182 }
2183
2044 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); 2184 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2045 tw32(TG3PCI_MISC_HOST_CTRL, 2185 tw32(TG3PCI_MISC_HOST_CTRL,
2046 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 2186 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2047 2187
2188 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2189 device_may_wakeup(&tp->pdev->dev) &&
2190 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2191
2048 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 2192 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2193 do_low_power = false;
2049 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) && 2194 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2050 !tp->link_config.phy_is_low_power) { 2195 !tp->link_config.phy_is_low_power) {
2051 struct phy_device *phydev; 2196 struct phy_device *phydev;
2052 u32 advertising; 2197 u32 phyid, advertising;
2053 2198
2054 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 2199 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2055 2200
@@ -2066,7 +2211,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2066 ADVERTISED_10baseT_Half; 2211 ADVERTISED_10baseT_Half;
2067 2212
2068 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || 2213 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2069 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) { 2214 device_should_wake) {
2070 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) 2215 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2071 advertising |= 2216 advertising |=
2072 ADVERTISED_100baseT_Half | 2217 ADVERTISED_100baseT_Half |
@@ -2079,8 +2224,19 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2079 phydev->advertising = advertising; 2224 phydev->advertising = advertising;
2080 2225
2081 phy_start_aneg(phydev); 2226 phy_start_aneg(phydev);
2227
2228 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2229 if (phyid != TG3_PHY_ID_BCMAC131) {
2230 phyid &= TG3_PHY_OUI_MASK;
2231 if (phyid == TG3_PHY_OUI_1 &&
2232 phyid == TG3_PHY_OUI_2 &&
2233 phyid == TG3_PHY_OUI_3)
2234 do_low_power = true;
2235 }
2082 } 2236 }
2083 } else { 2237 } else {
2238 do_low_power = true;
2239
2084 if (tp->link_config.phy_is_low_power == 0) { 2240 if (tp->link_config.phy_is_low_power == 0) {
2085 tp->link_config.phy_is_low_power = 1; 2241 tp->link_config.phy_is_low_power = 1;
2086 tp->link_config.orig_speed = tp->link_config.speed; 2242 tp->link_config.orig_speed = tp->link_config.speed;
@@ -2096,6 +2252,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2096 } 2252 }
2097 } 2253 }
2098 2254
2255 __tg3_set_mac_addr(tp, 0);
2256
2099 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2100 u32 val; 2258 u32 val;
2101 2259
@@ -2118,11 +2276,11 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2118 WOL_DRV_WOL | 2276 WOL_DRV_WOL |
2119 WOL_SET_MAGIC_PKT); 2277 WOL_SET_MAGIC_PKT);
2120 2278
2121 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) { 2279 if (device_should_wake) {
2122 u32 mac_mode; 2280 u32 mac_mode;
2123 2281
2124 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { 2282 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2125 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) { 2283 if (do_low_power) {
2126 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a); 2284 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2127 udelay(40); 2285 udelay(40);
2128 } 2286 }
@@ -2150,9 +2308,12 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2150 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 2308 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2151 tw32(MAC_LED_CTRL, tp->led_ctrl); 2309 tw32(MAC_LED_CTRL, tp->led_ctrl);
2152 2310
2153 if (pci_pme_capable(tp->pdev, state) && 2311 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2154 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) 2312 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2155 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; 2313 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2314 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2315 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2316 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2156 2317
2157 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 2318 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2158 mac_mode |= tp->mac_mode & 2319 mac_mode |= tp->mac_mode &
@@ -2224,10 +2385,9 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2224 } 2385 }
2225 } 2386 }
2226 2387
2227 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) && 2388 if (!(device_should_wake) &&
2228 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && 2389 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2229 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 2390 tg3_power_down_phy(tp, do_low_power);
2230 tg3_power_down_phy(tp);
2231 2391
2232 tg3_frob_aux_power(tp); 2392 tg3_frob_aux_power(tp);
2233 2393
@@ -2250,7 +2410,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2250 2410
2251 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 2411 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2252 2412
2253 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) 2413 if (device_should_wake)
2254 pci_enable_wake(tp->pdev, state, true); 2414 pci_enable_wake(tp->pdev, state, true);
2255 2415
2256 /* Finally, set the new power state. */ 2416 /* Finally, set the new power state. */
@@ -2789,6 +2949,24 @@ relink:
2789 NIC_SRAM_FIRMWARE_MBOX_MAGIC2); 2949 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2790 } 2950 }
2791 2951
2952 /* Prevent send BD corruption. */
2953 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2954 u16 oldlnkctl, newlnkctl;
2955
2956 pci_read_config_word(tp->pdev,
2957 tp->pcie_cap + PCI_EXP_LNKCTL,
2958 &oldlnkctl);
2959 if (tp->link_config.active_speed == SPEED_100 ||
2960 tp->link_config.active_speed == SPEED_10)
2961 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
2962 else
2963 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
2964 if (newlnkctl != oldlnkctl)
2965 pci_write_config_word(tp->pdev,
2966 tp->pcie_cap + PCI_EXP_LNKCTL,
2967 newlnkctl);
2968 }
2969
2792 if (current_link_up != netif_carrier_ok(tp->dev)) { 2970 if (current_link_up != netif_carrier_ok(tp->dev)) {
2793 if (current_link_up) 2971 if (current_link_up)
2794 netif_carrier_on(tp->dev); 2972 netif_carrier_on(tp->dev);
@@ -3765,8 +3943,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3765 err = tg3_setup_copper_phy(tp, force_reset); 3943 err = tg3_setup_copper_phy(tp, force_reset);
3766 } 3944 }
3767 3945
3768 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 || 3946 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3769 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3770 u32 val, scale; 3947 u32 val, scale;
3771 3948
3772 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; 3949 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
@@ -4100,12 +4277,15 @@ static int tg3_rx(struct tg3 *tp, int budget)
4100 goto next_pkt; 4277 goto next_pkt;
4101 } 4278 }
4102 4279
4103 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */ 4280 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4281 ETH_FCS_LEN;
4104 4282
4105 if (len > RX_COPY_THRESHOLD 4283 if (len > RX_COPY_THRESHOLD
4106 && tp->rx_offset == 2 4284 && tp->rx_offset == NET_IP_ALIGN
4107 /* rx_offset != 2 iff this is a 5701 card running 4285 /* rx_offset will likely not equal NET_IP_ALIGN
4108 * in PCI-X mode [see tg3_get_invariants()] */ 4286 * if this is a 5701 card running in PCI-X mode
4287 * [see tg3_get_invariants()]
4288 */
4109 ) { 4289 ) {
4110 int skb_size; 4290 int skb_size;
4111 4291
@@ -4125,11 +4305,12 @@ static int tg3_rx(struct tg3 *tp, int budget)
4125 tg3_recycle_rx(tp, opaque_key, 4305 tg3_recycle_rx(tp, opaque_key,
4126 desc_idx, *post_ptr); 4306 desc_idx, *post_ptr);
4127 4307
4128 copy_skb = netdev_alloc_skb(tp->dev, len + 2); 4308 copy_skb = netdev_alloc_skb(tp->dev,
4309 len + TG3_RAW_IP_ALIGN);
4129 if (copy_skb == NULL) 4310 if (copy_skb == NULL)
4130 goto drop_it_no_recycle; 4311 goto drop_it_no_recycle;
4131 4312
4132 skb_reserve(copy_skb, 2); 4313 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4133 skb_put(copy_skb, len); 4314 skb_put(copy_skb, len);
4134 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 4315 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4135 skb_copy_from_linear_data(skb, copy_skb->data, len); 4316 skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4157,7 +4338,6 @@ static int tg3_rx(struct tg3 *tp, int budget)
4157#endif 4338#endif
4158 netif_receive_skb(skb); 4339 netif_receive_skb(skb);
4159 4340
4160 tp->dev->last_rx = jiffies;
4161 received++; 4341 received++;
4162 budget--; 4342 budget--;
4163 4343
@@ -4271,7 +4451,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4271 sblk->status &= ~SD_STATUS_UPDATED; 4451 sblk->status &= ~SD_STATUS_UPDATED;
4272 4452
4273 if (likely(!tg3_has_work(tp))) { 4453 if (likely(!tg3_has_work(tp))) {
4274 netif_rx_complete(tp->dev, napi); 4454 netif_rx_complete(napi);
4275 tg3_restart_ints(tp); 4455 tg3_restart_ints(tp);
4276 break; 4456 break;
4277 } 4457 }
@@ -4281,7 +4461,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4281 4461
4282tx_recovery: 4462tx_recovery:
4283 /* work_done is guaranteed to be less than budget. */ 4463 /* work_done is guaranteed to be less than budget. */
4284 netif_rx_complete(tp->dev, napi); 4464 netif_rx_complete(napi);
4285 schedule_work(&tp->reset_task); 4465 schedule_work(&tp->reset_task);
4286 return work_done; 4466 return work_done;
4287} 4467}
@@ -4330,7 +4510,7 @@ static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4330 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4510 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4331 4511
4332 if (likely(!tg3_irq_sync(tp))) 4512 if (likely(!tg3_irq_sync(tp)))
4333 netif_rx_schedule(dev, &tp->napi); 4513 netif_rx_schedule(&tp->napi);
4334 4514
4335 return IRQ_HANDLED; 4515 return IRQ_HANDLED;
4336} 4516}
@@ -4355,7 +4535,7 @@ static irqreturn_t tg3_msi(int irq, void *dev_id)
4355 */ 4535 */
4356 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 4536 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4357 if (likely(!tg3_irq_sync(tp))) 4537 if (likely(!tg3_irq_sync(tp)))
4358 netif_rx_schedule(dev, &tp->napi); 4538 netif_rx_schedule(&tp->napi);
4359 4539
4360 return IRQ_RETVAL(1); 4540 return IRQ_RETVAL(1);
4361} 4541}
@@ -4397,7 +4577,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4397 sblk->status &= ~SD_STATUS_UPDATED; 4577 sblk->status &= ~SD_STATUS_UPDATED;
4398 if (likely(tg3_has_work(tp))) { 4578 if (likely(tg3_has_work(tp))) {
4399 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4579 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4400 netif_rx_schedule(dev, &tp->napi); 4580 netif_rx_schedule(&tp->napi);
4401 } else { 4581 } else {
4402 /* No work, shared interrupt perhaps? re-enable 4582 /* No work, shared interrupt perhaps? re-enable
4403 * interrupts, and flush that PCI write 4583 * interrupts, and flush that PCI write
@@ -4443,7 +4623,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4443 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); 4623 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4444 if (tg3_irq_sync(tp)) 4624 if (tg3_irq_sync(tp))
4445 goto out; 4625 goto out;
4446 if (netif_rx_schedule_prep(dev, &tp->napi)) { 4626 if (netif_rx_schedule_prep(&tp->napi)) {
4447 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); 4627 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4448 /* Update last_tag to mark that this status has been 4628 /* Update last_tag to mark that this status has been
4449 * seen. Because interrupt may be shared, we may be 4629 * seen. Because interrupt may be shared, we may be
@@ -4451,7 +4631,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4451 * if tg3_poll() is not scheduled. 4631 * if tg3_poll() is not scheduled.
4452 */ 4632 */
4453 tp->last_tag = sblk->status_tag; 4633 tp->last_tag = sblk->status_tag;
4454 __netif_rx_schedule(dev, &tp->napi); 4634 __netif_rx_schedule(&tp->napi);
4455 } 4635 }
4456out: 4636out:
4457 return IRQ_RETVAL(handled); 4637 return IRQ_RETVAL(handled);
@@ -5557,6 +5737,13 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5557 event = APE_EVENT_STATUS_STATE_START; 5737 event = APE_EVENT_STATUS_STATE_START;
5558 break; 5738 break;
5559 case RESET_KIND_SHUTDOWN: 5739 case RESET_KIND_SHUTDOWN:
5740 /* With the interface we are currently using,
5741 * APE does not track driver state. Wiping
5742 * out the HOST SEGMENT SIGNATURE forces
5743 * the APE to assume OS absent status.
5744 */
5745 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5746
5560 event = APE_EVENT_STATUS_STATE_UNLOAD; 5747 event = APE_EVENT_STATUS_STATE_UNLOAD;
5561 break; 5748 break;
5562 case RESET_KIND_SUSPEND: 5749 case RESET_KIND_SUSPEND:
@@ -5721,17 +5908,19 @@ static void tg3_restore_pci_state(struct tg3 *tp)
5721 5908
5722 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd); 5909 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5723 5910
5724 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 5911 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5725 pcie_set_readrq(tp->pdev, 4096); 5912 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5726 else { 5913 pcie_set_readrq(tp->pdev, 4096);
5727 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 5914 else {
5728 tp->pci_cacheline_sz); 5915 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5729 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, 5916 tp->pci_cacheline_sz);
5730 tp->pci_lat_timer); 5917 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5918 tp->pci_lat_timer);
5919 }
5731 } 5920 }
5732 5921
5733 /* Make sure PCI-X relaxed ordering bit is clear. */ 5922 /* Make sure PCI-X relaxed ordering bit is clear. */
5734 if (tp->pcix_cap) { 5923 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
5735 u16 pcix_cmd; 5924 u16 pcix_cmd;
5736 5925
5737 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD, 5926 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
@@ -5788,11 +5977,7 @@ static int tg3_chip_reset(struct tg3 *tp)
5788 tg3_save_pci_state(tp); 5977 tg3_save_pci_state(tp);
5789 5978
5790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 5979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 5980 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
5792 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5793 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5795 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5796 tw32(GRC_FASTBOOT_PC, 0); 5981 tw32(GRC_FASTBOOT_PC, 0);
5797 5982
5798 /* 5983 /*
@@ -5871,7 +6056,7 @@ static int tg3_chip_reset(struct tg3 *tp)
5871 6056
5872 udelay(120); 6057 udelay(120);
5873 6058
5874 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 6059 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
5875 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) { 6060 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5876 int i; 6061 int i;
5877 u32 cfg_val; 6062 u32 cfg_val;
@@ -5884,8 +6069,23 @@ static int tg3_chip_reset(struct tg3 *tp)
5884 pci_write_config_dword(tp->pdev, 0xc4, 6069 pci_write_config_dword(tp->pdev, 0xc4,
5885 cfg_val | (1 << 15)); 6070 cfg_val | (1 << 15));
5886 } 6071 }
5887 /* Set PCIE max payload size and clear error status. */ 6072
5888 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000); 6073 /* Set PCIE max payload size to 128 bytes and
6074 * clear the "no snoop" and "relaxed ordering" bits.
6075 */
6076 pci_write_config_word(tp->pdev,
6077 tp->pcie_cap + PCI_EXP_DEVCTL,
6078 0);
6079
6080 pcie_set_readrq(tp->pdev, 4096);
6081
6082 /* Clear error status */
6083 pci_write_config_word(tp->pdev,
6084 tp->pcie_cap + PCI_EXP_DEVSTA,
6085 PCI_EXP_DEVSTA_CED |
6086 PCI_EXP_DEVSTA_NFED |
6087 PCI_EXP_DEVSTA_FED |
6088 PCI_EXP_DEVSTA_URD);
5889 } 6089 }
5890 6090
5891 tg3_restore_pci_state(tp); 6091 tg3_restore_pci_state(tp);
@@ -6883,43 +7083,6 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
6883} 7083}
6884 7084
6885 7085
6886/* tp->lock is held. */
6887static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6888{
6889 u32 addr_high, addr_low;
6890 int i;
6891
6892 addr_high = ((tp->dev->dev_addr[0] << 8) |
6893 tp->dev->dev_addr[1]);
6894 addr_low = ((tp->dev->dev_addr[2] << 24) |
6895 (tp->dev->dev_addr[3] << 16) |
6896 (tp->dev->dev_addr[4] << 8) |
6897 (tp->dev->dev_addr[5] << 0));
6898 for (i = 0; i < 4; i++) {
6899 if (i == 1 && skip_mac_1)
6900 continue;
6901 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6902 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6903 }
6904
6905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6907 for (i = 0; i < 12; i++) {
6908 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6909 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6910 }
6911 }
6912
6913 addr_high = (tp->dev->dev_addr[0] +
6914 tp->dev->dev_addr[1] +
6915 tp->dev->dev_addr[2] +
6916 tp->dev->dev_addr[3] +
6917 tp->dev->dev_addr[4] +
6918 tp->dev->dev_addr[5]) &
6919 TX_BACKOFF_SEED_MASK;
6920 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6921}
6922
6923static int tg3_set_mac_addr(struct net_device *dev, void *p) 7086static int tg3_set_mac_addr(struct net_device *dev, void *p)
6924{ 7087{
6925 struct tg3 *tp = netdev_priv(dev); 7088 struct tg3 *tp = netdev_priv(dev);
@@ -7024,8 +7187,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7024 7187
7025 tg3_write_sig_legacy(tp, RESET_KIND_INIT); 7188 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7026 7189
7027 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 || 7190 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7028 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
7029 val = tr32(TG3_CPMU_CTRL); 7191 val = tr32(TG3_CPMU_CTRL);
7030 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE); 7192 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7031 tw32(TG3_CPMU_CTRL, val); 7193 tw32(TG3_CPMU_CTRL, val);
@@ -7091,8 +7253,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7091 return err; 7253 return err;
7092 7254
7093 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7255 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7094 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && 7256 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7095 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7096 /* This value is determined during the probe time DMA 7257 /* This value is determined during the probe time DMA
7097 * engine test, tg3_test_dma. 7258 * engine test, tg3_test_dma.
7098 */ 7259 */
@@ -7332,7 +7493,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7332 RDMAC_MODE_LNGREAD_ENAB); 7493 RDMAC_MODE_LNGREAD_ENAB);
7333 7494
7334 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 7495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7335 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 7496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7497 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7336 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB | 7498 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7337 RDMAC_MODE_MBUF_RBD_CRPT_ENAB | 7499 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7338 RDMAC_MODE_MBUF_SBD_CRPT_ENAB; 7500 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
@@ -7354,7 +7516,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7354 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; 7516 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7355 7517
7356 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 7518 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7357 rdmac_mode |= (1 << 27); 7519 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7520
7521 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7523 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7358 7524
7359 /* Receive/send statistics. */ 7525 /* Receive/send statistics. */
7360 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 7526 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
@@ -7501,11 +7667,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7501 } 7667 }
7502 7668
7503 /* Enable host coalescing bug fix */ 7669 /* Enable host coalescing bug fix */
7504 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) || 7670 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7505 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7506 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7507 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7508 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7509 val |= WDMAC_MODE_STATUS_TAG_FIX; 7671 val |= WDMAC_MODE_STATUS_TAG_FIX;
7510 7672
7511 tw32_f(WDMAC_MODE, val); 7673 tw32_f(WDMAC_MODE, val);
@@ -7566,10 +7728,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7566 udelay(100); 7728 udelay(100);
7567 7729
7568 tp->rx_mode = RX_MODE_ENABLE; 7730 tp->rx_mode = RX_MODE_ENABLE;
7569 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 7731 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7571 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7573 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE; 7732 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7574 7733
7575 tw32_f(MAC_RX_MODE, tp->rx_mode); 7734 tw32_f(MAC_RX_MODE, tp->rx_mode);
@@ -9066,7 +9225,8 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9066 else 9225 else
9067 wol->supported = 0; 9226 wol->supported = 0;
9068 wol->wolopts = 0; 9227 wol->wolopts = 0;
9069 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) 9228 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9229 device_can_wakeup(&tp->pdev->dev))
9070 wol->wolopts = WAKE_MAGIC; 9230 wol->wolopts = WAKE_MAGIC;
9071 memset(&wol->sopass, 0, sizeof(wol->sopass)); 9231 memset(&wol->sopass, 0, sizeof(wol->sopass));
9072} 9232}
@@ -9116,14 +9276,15 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
9116 return -EINVAL; 9276 return -EINVAL;
9117 return 0; 9277 return 0;
9118 } 9278 }
9119 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && 9279 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9120 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) { 9280 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
9121 if (value) { 9281 if (value) {
9122 dev->features |= NETIF_F_TSO6; 9282 dev->features |= NETIF_F_TSO6;
9123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 9283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9124 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 9284 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9125 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 9285 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 9286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9127 dev->features |= NETIF_F_TSO_ECN; 9288 dev->features |= NETIF_F_TSO_ECN;
9128 } else 9289 } else
9129 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); 9290 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
@@ -9238,12 +9399,12 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
9238 9399
9239 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0; 9400 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9240 9401
9241 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) 9402 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9242 epause->rx_pause = 1; 9403 epause->rx_pause = 1;
9243 else 9404 else
9244 epause->rx_pause = 0; 9405 epause->rx_pause = 0;
9245 9406
9246 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) 9407 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9247 epause->tx_pause = 1; 9408 epause->tx_pause = 1;
9248 else 9409 else
9249 epause->tx_pause = 0; 9410 epause->tx_pause = 0;
@@ -9294,14 +9455,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
9294 } 9455 }
9295 } else { 9456 } else {
9296 if (epause->rx_pause) 9457 if (epause->rx_pause)
9297 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX; 9458 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9298 else 9459 else
9299 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX; 9460 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9300 9461
9301 if (epause->tx_pause) 9462 if (epause->tx_pause)
9302 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX; 9463 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9303 else 9464 else
9304 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX; 9465 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9305 9466
9306 if (netif_running(dev)) 9467 if (netif_running(dev))
9307 tg3_setup_flow_control(tp, 0, 0); 9468 tg3_setup_flow_control(tp, 0, 0);
@@ -9321,13 +9482,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
9321 else 9482 else
9322 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG; 9483 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9323 if (epause->rx_pause) 9484 if (epause->rx_pause)
9324 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX; 9485 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9325 else 9486 else
9326 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX; 9487 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9327 if (epause->tx_pause) 9488 if (epause->tx_pause)
9328 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX; 9489 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9329 else 9490 else
9330 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX; 9491 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9331 9492
9332 if (netif_running(dev)) { 9493 if (netif_running(dev)) {
9333 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 9494 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
@@ -9378,11 +9539,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9378 return 0; 9539 return 0;
9379 } 9540 }
9380 9541
9381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 9542 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9382 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9383 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9386 ethtool_op_set_tx_ipv6_csum(dev, data); 9543 ethtool_op_set_tx_ipv6_csum(dev, data);
9387 else 9544 else
9388 ethtool_op_set_tx_csum(dev, data); 9545 ethtool_op_set_tx_csum(dev, data);
@@ -9899,18 +10056,13 @@ static int tg3_test_memory(struct tg3 *tp)
9899 int err = 0; 10056 int err = 0;
9900 int i; 10057 int i;
9901 10058
9902 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 10059 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 10060 mem_tbl = mem_tbl_5755;
9904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 10061 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 10062 mem_tbl = mem_tbl_5906;
9906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 10063 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 10064 mem_tbl = mem_tbl_5705;
9908 mem_tbl = mem_tbl_5755; 10065 else
9909 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9910 mem_tbl = mem_tbl_5906;
9911 else
9912 mem_tbl = mem_tbl_5705;
9913 } else
9914 mem_tbl = mem_tbl_570x; 10066 mem_tbl = mem_tbl_570x;
9915 10067
9916 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { 10068 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
@@ -10110,9 +10262,11 @@ static int tg3_test_loopback(struct tg3 *tp)
10110 if (err) 10262 if (err)
10111 return TG3_LOOPBACK_FAILED; 10263 return TG3_LOOPBACK_FAILED;
10112 10264
10113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 10265 /* Turn off gphy autopowerdown. */
10114 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 10266 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10115 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { 10267 tg3_phy_toggle_apd(tp, false);
10268
10269 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10116 int i; 10270 int i;
10117 u32 status; 10271 u32 status;
10118 10272
@@ -10139,9 +10293,7 @@ static int tg3_test_loopback(struct tg3 *tp)
10139 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) 10293 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10140 err |= TG3_MAC_LOOPBACK_FAILED; 10294 err |= TG3_MAC_LOOPBACK_FAILED;
10141 10295
10142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 10296 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10143 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10145 tw32(TG3_CPMU_CTRL, cpmuctrl); 10297 tw32(TG3_CPMU_CTRL, cpmuctrl);
10146 10298
10147 /* Release the mutex */ 10299 /* Release the mutex */
@@ -10154,6 +10306,10 @@ static int tg3_test_loopback(struct tg3 *tp)
10154 err |= TG3_PHY_LOOPBACK_FAILED; 10306 err |= TG3_PHY_LOOPBACK_FAILED;
10155 } 10307 }
10156 10308
10309 /* Re-enable gphy autopowerdown. */
10310 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10311 tg3_phy_toggle_apd(tp, true);
10312
10157 return err; 10313 return err;
10158} 10314}
10159 10315
@@ -10756,6 +10912,102 @@ static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10756 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 10912 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10757} 10913}
10758 10914
10915static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
10916{
10917 u32 nvcfg1;
10918
10919 nvcfg1 = tr32(NVRAM_CFG1);
10920
10921 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10922 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10923 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10924 tp->nvram_jedecnum = JEDEC_ATMEL;
10925 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10926 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10927
10928 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10929 tw32(NVRAM_CFG1, nvcfg1);
10930 return;
10931 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10932 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10933 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10934 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10935 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10936 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10937 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10938 tp->nvram_jedecnum = JEDEC_ATMEL;
10939 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10940 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10941
10942 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10943 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10944 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10945 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10946 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10947 break;
10948 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10949 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10950 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10951 break;
10952 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10953 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10954 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10955 break;
10956 }
10957 break;
10958 case FLASH_5752VENDOR_ST_M45PE10:
10959 case FLASH_5752VENDOR_ST_M45PE20:
10960 case FLASH_5752VENDOR_ST_M45PE40:
10961 tp->nvram_jedecnum = JEDEC_ST;
10962 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10963 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10964
10965 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10966 case FLASH_5752VENDOR_ST_M45PE10:
10967 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10968 break;
10969 case FLASH_5752VENDOR_ST_M45PE20:
10970 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10971 break;
10972 case FLASH_5752VENDOR_ST_M45PE40:
10973 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10974 break;
10975 }
10976 break;
10977 default:
10978 return;
10979 }
10980
10981 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10982 case FLASH_5752PAGE_SIZE_256:
10983 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10984 tp->nvram_pagesize = 256;
10985 break;
10986 case FLASH_5752PAGE_SIZE_512:
10987 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10988 tp->nvram_pagesize = 512;
10989 break;
10990 case FLASH_5752PAGE_SIZE_1K:
10991 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10992 tp->nvram_pagesize = 1024;
10993 break;
10994 case FLASH_5752PAGE_SIZE_2K:
10995 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10996 tp->nvram_pagesize = 2048;
10997 break;
10998 case FLASH_5752PAGE_SIZE_4K:
10999 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11000 tp->nvram_pagesize = 4096;
11001 break;
11002 case FLASH_5752PAGE_SIZE_264:
11003 tp->nvram_pagesize = 264;
11004 break;
11005 case FLASH_5752PAGE_SIZE_528:
11006 tp->nvram_pagesize = 528;
11007 break;
11008 }
11009}
11010
10759/* Chips other than 5700/5701 use the NVRAM for fetching info. */ 11011/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10760static void __devinit tg3_nvram_init(struct tg3 *tp) 11012static void __devinit tg3_nvram_init(struct tg3 *tp)
10761{ 11013{
@@ -10796,6 +11048,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
10796 tg3_get_5761_nvram_info(tp); 11048 tg3_get_5761_nvram_info(tp);
10797 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 11049 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10798 tg3_get_5906_nvram_info(tp); 11050 tg3_get_5906_nvram_info(tp);
11051 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11052 tg3_get_57780_nvram_info(tp);
10799 else 11053 else
10800 tg3_get_nvram_info(tp); 11054 tg3_get_nvram_info(tp);
10801 11055
@@ -11116,12 +11370,8 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11116 if (i == (len - 4)) 11370 if (i == (len - 4))
11117 nvram_cmd |= NVRAM_CMD_LAST; 11371 nvram_cmd |= NVRAM_CMD_LAST;
11118 11372
11119 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && 11373 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11120 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && 11374 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11121 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11122 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11123 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11124 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11125 (tp->nvram_jedecnum == JEDEC_ST) && 11375 (tp->nvram_jedecnum == JEDEC_ST) &&
11126 (nvram_cmd & NVRAM_CMD_FIRST)) { 11376 (nvram_cmd & NVRAM_CMD_FIRST)) {
11127 11377
@@ -11296,10 +11546,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11296 if (val & VCPU_CFGSHDW_ASPM_DBNC) 11546 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11297 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND; 11547 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11298 if ((val & VCPU_CFGSHDW_WOL_ENABLE) && 11548 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11299 (val & VCPU_CFGSHDW_WOL_MAGPKT) && 11549 (val & VCPU_CFGSHDW_WOL_MAGPKT))
11300 device_may_wakeup(&tp->pdev->dev))
11301 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 11550 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11302 return; 11551 goto done;
11303 } 11552 }
11304 11553
11305 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val); 11554 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
@@ -11421,15 +11670,17 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11421 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) 11670 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11422 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE; 11671 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11423 } 11672 }
11424 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) 11673
11674 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11675 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11425 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE; 11676 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11677
11426 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES && 11678 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11427 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)) 11679 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11428 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP; 11680 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11429 11681
11430 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) && 11682 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11431 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE) && 11683 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11432 device_may_wakeup(&tp->pdev->dev))
11433 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE; 11684 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11434 11685
11435 if (cfg2 & (1 << 17)) 11686 if (cfg2 & (1 << 17))
@@ -11440,6 +11691,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11440 if (cfg2 & (1 << 18)) 11691 if (cfg2 & (1 << 18))
11441 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS; 11692 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11442 11693
11694 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11695 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11696 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11697 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11698
11443 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 11699 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11444 u32 cfg3; 11700 u32 cfg3;
11445 11701
@@ -11455,6 +11711,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11455 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN) 11711 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11456 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; 11712 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11457 } 11713 }
11714done:
11715 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11716 device_set_wakeup_enable(&tp->pdev->dev,
11717 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11458} 11718}
11459 11719
11460static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 11720static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
@@ -11751,6 +12011,51 @@ static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11751 return 1; 12011 return 1;
11752} 12012}
11753 12013
12014static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12015{
12016 u32 offset, major, minor, build;
12017
12018 tp->fw_ver[0] = 's';
12019 tp->fw_ver[1] = 'b';
12020 tp->fw_ver[2] = '\0';
12021
12022 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12023 return;
12024
12025 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12026 case TG3_EEPROM_SB_REVISION_0:
12027 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12028 break;
12029 case TG3_EEPROM_SB_REVISION_2:
12030 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12031 break;
12032 case TG3_EEPROM_SB_REVISION_3:
12033 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12034 break;
12035 default:
12036 return;
12037 }
12038
12039 if (tg3_nvram_read_swab(tp, offset, &val))
12040 return;
12041
12042 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12043 TG3_EEPROM_SB_EDH_BLD_SHFT;
12044 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12045 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12046 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12047
12048 if (minor > 99 || build > 26)
12049 return;
12050
12051 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
12052
12053 if (build > 0) {
12054 tp->fw_ver[8] = 'a' + build - 1;
12055 tp->fw_ver[9] = '\0';
12056 }
12057}
12058
11754static void __devinit tg3_read_fw_ver(struct tg3 *tp) 12059static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11755{ 12060{
11756 u32 val, offset, start; 12061 u32 val, offset, start;
@@ -11760,8 +12065,12 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11760 if (tg3_nvram_read_swab(tp, 0, &val)) 12065 if (tg3_nvram_read_swab(tp, 0, &val))
11761 return; 12066 return;
11762 12067
11763 if (val != TG3_EEPROM_MAGIC) 12068 if (val != TG3_EEPROM_MAGIC) {
12069 if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12070 tg3_read_sb_ver(tp, val);
12071
11764 return; 12072 return;
12073 }
11765 12074
11766 if (tg3_nvram_read_swab(tp, 0xc, &offset) || 12075 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11767 tg3_nvram_read_swab(tp, 0x4, &start)) 12076 tg3_nvram_read_swab(tp, 0x4, &start))
@@ -11849,11 +12158,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11849 { }, 12158 { },
11850 }; 12159 };
11851 u32 misc_ctrl_reg; 12160 u32 misc_ctrl_reg;
11852 u32 cacheline_sz_reg;
11853 u32 pci_state_reg, grc_misc_cfg; 12161 u32 pci_state_reg, grc_misc_cfg;
11854 u32 val; 12162 u32 val;
11855 u16 pci_cmd; 12163 u16 pci_cmd;
11856 int err, pcie_cap; 12164 int err;
11857 12165
11858 /* Force memory write invalidate off. If we leave it on, 12166 /* Force memory write invalidate off. If we leave it on,
11859 * then on 5700_BX chips we have to enable a workaround. 12167 * then on 5700_BX chips we have to enable a workaround.
@@ -11882,7 +12190,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
11882 12190
11883 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, 12191 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11884 &prod_id_asic_rev); 12192 &prod_id_asic_rev);
11885 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK; 12193 tp->pci_chip_rev_id = prod_id_asic_rev;
11886 } 12194 }
11887 12195
11888 /* Wrong chip ID in 5752 A0. This code can be removed later 12196 /* Wrong chip ID in 5752 A0. This code can be removed later
@@ -12019,26 +12327,23 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12019 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, 12327 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12020 tp->misc_host_ctrl); 12328 tp->misc_host_ctrl);
12021 12329
12022 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12023 &cacheline_sz_reg);
12024
12025 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12026 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12027 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12028 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12029
12030 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) || 12330 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12031 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) 12331 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12032 tp->pdev_peer = tg3_find_peer(tp); 12332 tp->pdev_peer = tg3_find_peer(tp);
12033 12333
12034 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || 12334 /* Intentionally exclude ASIC_REV_5906 */
12035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 12335 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12036 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12336 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12038 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12338 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 12339 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12340 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12341 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12342
12343 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12344 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12041 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || 12345 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12346 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12042 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 12347 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12043 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; 12348 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12044 12349
@@ -12046,6 +12351,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12046 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) 12351 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12047 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; 12352 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12048 12353
12354 /* 5700 B0 chips do not support checksumming correctly due
12355 * to hardware bugs.
12356 */
12357 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12358 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12359 else {
12360 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12361 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12362 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12363 tp->dev->features |= NETIF_F_IPV6_CSUM;
12364 }
12365
12049 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 12366 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12050 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; 12367 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12051 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || 12368 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
@@ -12055,11 +12372,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12055 tp->pdev_peer == tp->pdev)) 12372 tp->pdev_peer == tp->pdev))
12056 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI; 12373 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12057 12374
12058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 12375 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12062 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12063 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12064 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; 12377 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12065 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 12378 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
@@ -12076,21 +12389,41 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12076 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) 12389 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12077 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; 12390 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12078 12391
12079 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP); 12392 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12080 if (pcie_cap != 0) { 12393 &pci_state_reg);
12394
12395 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12396 if (tp->pcie_cap != 0) {
12397 u16 lnkctl;
12398
12081 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 12399 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12082 12400
12083 pcie_set_readrq(tp->pdev, 4096); 12401 pcie_set_readrq(tp->pdev, 4096);
12084 12402
12085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12403 pci_read_config_word(tp->pdev,
12086 u16 lnkctl; 12404 tp->pcie_cap + PCI_EXP_LNKCTL,
12087 12405 &lnkctl);
12088 pci_read_config_word(tp->pdev, 12406 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12089 pcie_cap + PCI_EXP_LNKCTL, 12407 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12090 &lnkctl);
12091 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12092 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2; 12408 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12412 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12093 } 12413 }
12414 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12415 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12416 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12417 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12418 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12419 if (!tp->pcix_cap) {
12420 printk(KERN_ERR PFX "Cannot find PCI-X "
12421 "capability, aborting.\n");
12422 return -EIO;
12423 }
12424
12425 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12426 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12094 } 12427 }
12095 12428
12096 /* If we have an AMD 762 or VIA K8T800 chipset, write 12429 /* If we have an AMD 762 or VIA K8T800 chipset, write
@@ -12103,42 +12436,29 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12103 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) 12436 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12104 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; 12437 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12105 12438
12439 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12440 &tp->pci_cacheline_sz);
12441 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12442 &tp->pci_lat_timer);
12106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && 12443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12107 tp->pci_lat_timer < 64) { 12444 tp->pci_lat_timer < 64) {
12108 tp->pci_lat_timer = 64; 12445 tp->pci_lat_timer = 64;
12109 12446 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12110 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0); 12447 tp->pci_lat_timer);
12111 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12112 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12113 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12114
12115 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12116 cacheline_sz_reg);
12117 }
12118
12119 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12120 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12121 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12122 if (!tp->pcix_cap) {
12123 printk(KERN_ERR PFX "Cannot find PCI-X "
12124 "capability, aborting.\n");
12125 return -EIO;
12126 }
12127 } 12448 }
12128 12449
12129 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 12450 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12130 &pci_state_reg); 12451 /* 5700 BX chips need to have their TX producer index
12131 12452 * mailboxes written twice to workaround a bug.
12132 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) { 12453 */
12133 tp->tg3_flags |= TG3_FLAG_PCIX_MODE; 12454 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12134 12455
12135 /* If this is a 5700 BX chipset, and we are in PCI-X 12456 /* If we are in PCI-X mode, enable register write workaround.
12136 * mode, enable register write workaround.
12137 * 12457 *
12138 * The workaround is to use indirect register accesses 12458 * The workaround is to use indirect register accesses
12139 * for all chip writes not to mailbox registers. 12459 * for all chip writes not to mailbox registers.
12140 */ 12460 */
12141 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) { 12461 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12142 u32 pm_reg; 12462 u32 pm_reg;
12143 12463
12144 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG; 12464 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
@@ -12163,12 +12483,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12163 } 12483 }
12164 } 12484 }
12165 12485
12166 /* 5700 BX chips need to have their TX producer index mailboxes
12167 * written twice to workaround a bug.
12168 */
12169 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12170 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12171
12172 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0) 12486 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12173 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED; 12487 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12174 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0) 12488 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
@@ -12263,16 +12577,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12263 12577
12264 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 12579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12266 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { 12580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12581 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12267 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; 12582 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12268 12583
12269 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
12270 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
12271 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
12272 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
12273 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
12274 }
12275
12276 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). 12584 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12277 * GPIO1 driven high will bring 5700's external PHY out of reset. 12585 * GPIO1 driven high will bring 5700's external PHY out of reset.
12278 * It is also used as eeprom write protect on LOMs. 12586 * It is also used as eeprom write protect on LOMs.
@@ -12288,7 +12596,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12288 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) 12596 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12289 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; 12597 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12290 12598
12291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) 12599 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12600 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12292 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL; 12601 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12293 12602
12294 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) { 12603 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
@@ -12308,12 +12617,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12308 return err; 12617 return err;
12309 } 12618 }
12310 12619
12311 /* 5700 B0 chips do not support checksumming correctly due
12312 * to hardware bugs.
12313 */
12314 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12315 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12316
12317 /* Derive initial jumbo mode from MTU assigned in 12620 /* Derive initial jumbo mode from MTU assigned in
12318 * ether_setup() via the alloc_etherdev() call 12621 * ether_setup() via the alloc_etherdev() call
12319 */ 12622 */
@@ -12346,7 +12649,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12346 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) 12649 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12347 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; 12650 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12348 12651
12349 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { 12652 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12653 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12654 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12655 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || 12656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || 12657 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12352 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 12658 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -12356,8 +12662,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12356 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG; 12662 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12357 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M) 12663 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12358 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM; 12664 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12359 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 && 12665 } else
12360 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12361 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; 12666 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12362 } 12667 }
12363 12668
@@ -12378,7 +12683,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12378 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX) 12683 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12379 tp->coalesce_mode |= HOSTCC_MODE_32BYTE; 12684 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12380 12685
12381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 12686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12382 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; 12688 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12383 12689
12384 err = tg3_mdio_init(tp); 12690 err = tg3_mdio_init(tp);
@@ -12463,6 +12769,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12463 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F || 12769 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12464 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F || 12770 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12465 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) || 12771 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12772 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12466 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 12773 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12467 tp->tg3_flags |= TG3_FLAG_10_100_ONLY; 12774 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12468 12775
@@ -12512,20 +12819,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12512 else 12819 else
12513 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 12820 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12514 12821
12515 /* All chips before 5787 can get confused if TX buffers 12822 tp->rx_offset = NET_IP_ALIGN;
12516 * straddle the 4GB address boundary in some cases.
12517 */
12518 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12524 tp->dev->hard_start_xmit = tg3_start_xmit;
12525 else
12526 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12527
12528 tp->rx_offset = 2;
12529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 12823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12530 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) 12824 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12531 tp->rx_offset = 0; 12825 tp->rx_offset = 0;
@@ -13241,18 +13535,53 @@ static void __devinit tg3_init_coal(struct tg3 *tp)
13241 } 13535 }
13242} 13536}
13243 13537
13538static const struct net_device_ops tg3_netdev_ops = {
13539 .ndo_open = tg3_open,
13540 .ndo_stop = tg3_close,
13541 .ndo_start_xmit = tg3_start_xmit,
13542 .ndo_get_stats = tg3_get_stats,
13543 .ndo_validate_addr = eth_validate_addr,
13544 .ndo_set_multicast_list = tg3_set_rx_mode,
13545 .ndo_set_mac_address = tg3_set_mac_addr,
13546 .ndo_do_ioctl = tg3_ioctl,
13547 .ndo_tx_timeout = tg3_tx_timeout,
13548 .ndo_change_mtu = tg3_change_mtu,
13549#if TG3_VLAN_TAG_USED
13550 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13551#endif
13552#ifdef CONFIG_NET_POLL_CONTROLLER
13553 .ndo_poll_controller = tg3_poll_controller,
13554#endif
13555};
13556
13557static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13558 .ndo_open = tg3_open,
13559 .ndo_stop = tg3_close,
13560 .ndo_start_xmit = tg3_start_xmit_dma_bug,
13561 .ndo_get_stats = tg3_get_stats,
13562 .ndo_validate_addr = eth_validate_addr,
13563 .ndo_set_multicast_list = tg3_set_rx_mode,
13564 .ndo_set_mac_address = tg3_set_mac_addr,
13565 .ndo_do_ioctl = tg3_ioctl,
13566 .ndo_tx_timeout = tg3_tx_timeout,
13567 .ndo_change_mtu = tg3_change_mtu,
13568#if TG3_VLAN_TAG_USED
13569 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13570#endif
13571#ifdef CONFIG_NET_POLL_CONTROLLER
13572 .ndo_poll_controller = tg3_poll_controller,
13573#endif
13574};
13575
13244static int __devinit tg3_init_one(struct pci_dev *pdev, 13576static int __devinit tg3_init_one(struct pci_dev *pdev,
13245 const struct pci_device_id *ent) 13577 const struct pci_device_id *ent)
13246{ 13578{
13247 static int tg3_version_printed = 0; 13579 static int tg3_version_printed = 0;
13248 resource_size_t tg3reg_base;
13249 unsigned long tg3reg_len;
13250 struct net_device *dev; 13580 struct net_device *dev;
13251 struct tg3 *tp; 13581 struct tg3 *tp;
13252 int err, pm_cap; 13582 int err, pm_cap;
13253 char str[40]; 13583 char str[40];
13254 u64 dma_mask, persist_dma_mask; 13584 u64 dma_mask, persist_dma_mask;
13255 DECLARE_MAC_BUF(mac);
13256 13585
13257 if (tg3_version_printed++ == 0) 13586 if (tg3_version_printed++ == 0)
13258 printk(KERN_INFO "%s", version); 13587 printk(KERN_INFO "%s", version);
@@ -13264,13 +13593,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13264 return err; 13593 return err;
13265 } 13594 }
13266 13595
13267 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13268 printk(KERN_ERR PFX "Cannot find proper PCI device "
13269 "base address, aborting.\n");
13270 err = -ENODEV;
13271 goto err_out_disable_pdev;
13272 }
13273
13274 err = pci_request_regions(pdev, DRV_MODULE_NAME); 13596 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13275 if (err) { 13597 if (err) {
13276 printk(KERN_ERR PFX "Cannot obtain PCI resources, " 13598 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
@@ -13289,9 +13611,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13289 goto err_out_free_res; 13611 goto err_out_free_res;
13290 } 13612 }
13291 13613
13292 tg3reg_base = pci_resource_start(pdev, 0);
13293 tg3reg_len = pci_resource_len(pdev, 0);
13294
13295 dev = alloc_etherdev(sizeof(*tp)); 13614 dev = alloc_etherdev(sizeof(*tp));
13296 if (!dev) { 13615 if (!dev) {
13297 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); 13616 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
@@ -13303,7 +13622,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13303 13622
13304#if TG3_VLAN_TAG_USED 13623#if TG3_VLAN_TAG_USED
13305 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 13624 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13306 dev->vlan_rx_register = tg3_vlan_rx_register;
13307#endif 13625#endif
13308 13626
13309 tp = netdev_priv(dev); 13627 tp = netdev_priv(dev);
@@ -13343,7 +13661,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13343 spin_lock_init(&tp->indirect_lock); 13661 spin_lock_init(&tp->indirect_lock);
13344 INIT_WORK(&tp->reset_task, tg3_reset_task); 13662 INIT_WORK(&tp->reset_task, tg3_reset_task);
13345 13663
13346 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); 13664 tp->regs = pci_ioremap_bar(pdev, BAR_0);
13347 if (!tp->regs) { 13665 if (!tp->regs) {
13348 printk(KERN_ERR PFX "Cannot map device registers, " 13666 printk(KERN_ERR PFX "Cannot map device registers, "
13349 "aborting.\n"); 13667 "aborting.\n");
@@ -13357,21 +13675,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13357 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 13675 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13358 tp->tx_pending = TG3_DEF_TX_RING_PENDING; 13676 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13359 13677
13360 dev->open = tg3_open;
13361 dev->stop = tg3_close;
13362 dev->get_stats = tg3_get_stats;
13363 dev->set_multicast_list = tg3_set_rx_mode;
13364 dev->set_mac_address = tg3_set_mac_addr;
13365 dev->do_ioctl = tg3_ioctl;
13366 dev->tx_timeout = tg3_tx_timeout;
13367 netif_napi_add(dev, &tp->napi, tg3_poll, 64); 13678 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13368 dev->ethtool_ops = &tg3_ethtool_ops; 13679 dev->ethtool_ops = &tg3_ethtool_ops;
13369 dev->watchdog_timeo = TG3_TX_TIMEOUT; 13680 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13370 dev->change_mtu = tg3_change_mtu;
13371 dev->irq = pdev->irq; 13681 dev->irq = pdev->irq;
13372#ifdef CONFIG_NET_POLL_CONTROLLER
13373 dev->poll_controller = tg3_poll_controller;
13374#endif
13375 13682
13376 err = tg3_get_invariants(tp); 13683 err = tg3_get_invariants(tp);
13377 if (err) { 13684 if (err) {
@@ -13380,6 +13687,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13380 goto err_out_iounmap; 13687 goto err_out_iounmap;
13381 } 13688 }
13382 13689
13690 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13692 dev->netdev_ops = &tg3_netdev_ops;
13693 else
13694 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13695
13696
13383 /* The EPB bridge inside 5714, 5715, and 5780 and any 13697 /* The EPB bridge inside 5714, 5715, and 5780 and any
13384 * device behind the EPB cannot support DMA addresses > 40-bit. 13698 * device behind the EPB cannot support DMA addresses > 40-bit.
13385 * On 64-bit systems with IOMMU, use 40-bit dma_mask. 13699 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
@@ -13439,14 +13753,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13439 * is off by default, but can be enabled using ethtool. 13753 * is off by default, but can be enabled using ethtool.
13440 */ 13754 */
13441 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 13755 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13442 dev->features |= NETIF_F_TSO; 13756 if (dev->features & NETIF_F_IP_CSUM)
13443 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) && 13757 dev->features |= NETIF_F_TSO;
13444 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) 13758 if ((dev->features & NETIF_F_IPV6_CSUM) &&
13759 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
13445 dev->features |= NETIF_F_TSO6; 13760 dev->features |= NETIF_F_TSO6;
13446 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 13761 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13447 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 13762 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13448 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 13763 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13449 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 13764 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13765 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13450 dev->features |= NETIF_F_TSO_ECN; 13766 dev->features |= NETIF_F_TSO_ECN;
13451 } 13767 }
13452 13768
@@ -13466,17 +13782,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13466 } 13782 }
13467 13783
13468 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 13784 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13469 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 13785 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13470 printk(KERN_ERR PFX "Cannot find proper PCI device "
13471 "base address for APE, aborting.\n");
13472 err = -ENODEV;
13473 goto err_out_iounmap;
13474 }
13475
13476 tg3reg_base = pci_resource_start(pdev, 2);
13477 tg3reg_len = pci_resource_len(pdev, 2);
13478
13479 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
13480 if (!tp->aperegs) { 13786 if (!tp->aperegs) {
13481 printk(KERN_ERR PFX "Cannot map APE registers, " 13787 printk(KERN_ERR PFX "Cannot map APE registers, "
13482 "aborting.\n"); 13788 "aborting.\n");
@@ -13504,25 +13810,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13504 goto err_out_apeunmap; 13810 goto err_out_apeunmap;
13505 } 13811 }
13506 13812
13507 /* Tigon3 can do ipv4 only... and some chips have buggy
13508 * checksumming.
13509 */
13510 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13511 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13512 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13513 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13516 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13517 dev->features |= NETIF_F_IPV6_CSUM;
13518
13519 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13520 } else
13521 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13522
13523 /* flow control autonegotiation is default behavior */ 13813 /* flow control autonegotiation is default behavior */
13524 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 13814 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13525 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX; 13815 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13526 13816
13527 tg3_init_coal(tp); 13817 tg3_init_coal(tp);
13528 13818
@@ -13535,26 +13825,34 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13535 goto err_out_apeunmap; 13825 goto err_out_apeunmap;
13536 } 13826 }
13537 13827
13538 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] " 13828 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13539 "(%s) %s Ethernet %s\n",
13540 dev->name, 13829 dev->name,
13541 tp->board_part_number, 13830 tp->board_part_number,
13542 tp->pci_chip_rev_id, 13831 tp->pci_chip_rev_id,
13543 tg3_phy_string(tp),
13544 tg3_bus_string(tp, str), 13832 tg3_bus_string(tp, str),
13545 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : 13833 dev->dev_addr);
13546 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13547 "10/100/1000Base-T")),
13548 print_mac(mac, dev->dev_addr));
13549 13834
13550 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] " 13835 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13551 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n", 13836 printk(KERN_INFO
13837 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13838 tp->dev->name,
13839 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13840 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13841 else
13842 printk(KERN_INFO
13843 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13844 tp->dev->name, tg3_phy_string(tp),
13845 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13846 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13847 "10/100/1000Base-T")),
13848 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13849
13850 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13552 dev->name, 13851 dev->name,
13553 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0, 13852 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13554 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0, 13853 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13555 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0, 13854 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13556 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0, 13855 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13557 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13558 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0); 13856 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13559 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n", 13857 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13560 dev->name, tp->dma_rwctrl, 13858 dev->name, tp->dma_rwctrl,
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index be252abe8985..8936edfb0438 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -38,26 +38,13 @@
38#define TG3PCI_DEVICE_TIGON3_2 0x1645 /* BCM5701 */ 38#define TG3PCI_DEVICE_TIGON3_2 0x1645 /* BCM5701 */
39#define TG3PCI_DEVICE_TIGON3_3 0x1646 /* BCM5702 */ 39#define TG3PCI_DEVICE_TIGON3_3 0x1646 /* BCM5702 */
40#define TG3PCI_DEVICE_TIGON3_4 0x1647 /* BCM5703 */ 40#define TG3PCI_DEVICE_TIGON3_4 0x1647 /* BCM5703 */
41#define TG3PCI_COMMAND 0x00000004 41#define TG3PCI_DEVICE_TIGON3_5761S 0x1688
42#define TG3PCI_STATUS 0x00000006 42#define TG3PCI_DEVICE_TIGON3_5761SE 0x1689
43#define TG3PCI_CCREVID 0x00000008 43#define TG3PCI_DEVICE_TIGON3_57780 0x1692
44#define TG3PCI_CACHELINESZ 0x0000000c 44#define TG3PCI_DEVICE_TIGON3_57760 0x1690
45#define TG3PCI_LATTIMER 0x0000000d 45#define TG3PCI_DEVICE_TIGON3_57790 0x1694
46#define TG3PCI_HEADERTYPE 0x0000000e 46#define TG3PCI_DEVICE_TIGON3_57720 0x168c
47#define TG3PCI_BIST 0x0000000f 47/* 0x04 --> 0x64 unused */
48#define TG3PCI_BASE0_LOW 0x00000010
49#define TG3PCI_BASE0_HIGH 0x00000014
50/* 0x18 --> 0x2c unused */
51#define TG3PCI_SUBSYSVENID 0x0000002c
52#define TG3PCI_SUBSYSID 0x0000002e
53#define TG3PCI_ROMADDR 0x00000030
54#define TG3PCI_CAPLIST 0x00000034
55/* 0x35 --> 0x3c unused */
56#define TG3PCI_IRQ_LINE 0x0000003c
57#define TG3PCI_IRQ_PIN 0x0000003d
58#define TG3PCI_MIN_GNT 0x0000003e
59#define TG3PCI_MAX_LAT 0x0000003f
60/* 0x40 --> 0x64 unused */
61#define TG3PCI_MSI_DATA 0x00000064 48#define TG3PCI_MSI_DATA 0x00000064
62/* 0x66 --> 0x68 unused */ 49/* 0x66 --> 0x68 unused */
63#define TG3PCI_MISC_HOST_CTRL 0x00000068 50#define TG3PCI_MISC_HOST_CTRL 0x00000068
@@ -108,10 +95,6 @@
108#define CHIPREV_ID_5752_A1 0x6001 95#define CHIPREV_ID_5752_A1 0x6001
109#define CHIPREV_ID_5714_A2 0x9002 96#define CHIPREV_ID_5714_A2 0x9002
110#define CHIPREV_ID_5906_A1 0xc001 97#define CHIPREV_ID_5906_A1 0xc001
111#define CHIPREV_ID_5784_A0 0x5784000
112#define CHIPREV_ID_5784_A1 0x5784001
113#define CHIPREV_ID_5761_A0 0x5761000
114#define CHIPREV_ID_5761_A1 0x5761001
115#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 98#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
116#define ASIC_REV_5700 0x07 99#define ASIC_REV_5700 0x07
117#define ASIC_REV_5701 0x00 100#define ASIC_REV_5701 0x00
@@ -129,6 +112,7 @@
129#define ASIC_REV_5784 0x5784 112#define ASIC_REV_5784 0x5784
130#define ASIC_REV_5761 0x5761 113#define ASIC_REV_5761 0x5761
131#define ASIC_REV_5785 0x5785 114#define ASIC_REV_5785 0x5785
115#define ASIC_REV_57780 0x57780
132#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 116#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
133#define CHIPREV_5700_AX 0x70 117#define CHIPREV_5700_AX 0x70
134#define CHIPREV_5700_BX 0x71 118#define CHIPREV_5700_BX 0x71
@@ -325,6 +309,7 @@
325#define MAC_MODE_TDE_ENABLE 0x00200000 309#define MAC_MODE_TDE_ENABLE 0x00200000
326#define MAC_MODE_RDE_ENABLE 0x00400000 310#define MAC_MODE_RDE_ENABLE 0x00400000
327#define MAC_MODE_FHDE_ENABLE 0x00800000 311#define MAC_MODE_FHDE_ENABLE 0x00800000
312#define MAC_MODE_KEEP_FRAME_IN_WOL 0x01000000
328#define MAC_MODE_APE_RX_EN 0x08000000 313#define MAC_MODE_APE_RX_EN 0x08000000
329#define MAC_MODE_APE_TX_EN 0x10000000 314#define MAC_MODE_APE_TX_EN 0x10000000
330#define MAC_STATUS 0x00000404 315#define MAC_STATUS 0x00000404
@@ -414,6 +399,7 @@
414#define MI_COM_DATA_MASK 0x0000ffff 399#define MI_COM_DATA_MASK 0x0000ffff
415#define MAC_MI_STAT 0x00000450 400#define MAC_MI_STAT 0x00000450
416#define MAC_MI_STAT_LNKSTAT_ATTN_ENAB 0x00000001 401#define MAC_MI_STAT_LNKSTAT_ATTN_ENAB 0x00000001
402#define MAC_MI_STAT_10MBPS_MODE 0x00000002
417#define MAC_MI_MODE 0x00000454 403#define MAC_MI_MODE 0x00000454
418#define MAC_MI_MODE_CLK_10MHZ 0x00000001 404#define MAC_MI_MODE_CLK_10MHZ 0x00000001
419#define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002 405#define MAC_MI_MODE_SHORT_PREAMBLE 0x00000002
@@ -539,6 +525,100 @@
539#define MAC_PHYCFG1_TXC_DRV 0x20000000 525#define MAC_PHYCFG1_TXC_DRV 0x20000000
540#define MAC_PHYCFG2 0x000005a4 526#define MAC_PHYCFG2 0x000005a4
541#define MAC_PHYCFG2_INBAND_ENABLE 0x00000001 527#define MAC_PHYCFG2_INBAND_ENABLE 0x00000001
528#define MAC_PHYCFG2_EMODE_MASK_MASK 0x000001c0
529#define MAC_PHYCFG2_EMODE_MASK_AC131 0x000000c0
530#define MAC_PHYCFG2_EMODE_MASK_50610 0x00000100
531#define MAC_PHYCFG2_EMODE_MASK_RT8211 0x00000000
532#define MAC_PHYCFG2_EMODE_MASK_RT8201 0x000001c0
533#define MAC_PHYCFG2_EMODE_COMP_MASK 0x00000e00
534#define MAC_PHYCFG2_EMODE_COMP_AC131 0x00000600
535#define MAC_PHYCFG2_EMODE_COMP_50610 0x00000400
536#define MAC_PHYCFG2_EMODE_COMP_RT8211 0x00000800
537#define MAC_PHYCFG2_EMODE_COMP_RT8201 0x00000000
538#define MAC_PHYCFG2_FMODE_MASK_MASK 0x00007000
539#define MAC_PHYCFG2_FMODE_MASK_AC131 0x00006000
540#define MAC_PHYCFG2_FMODE_MASK_50610 0x00004000
541#define MAC_PHYCFG2_FMODE_MASK_RT8211 0x00000000
542#define MAC_PHYCFG2_FMODE_MASK_RT8201 0x00007000
543#define MAC_PHYCFG2_FMODE_COMP_MASK 0x00038000
544#define MAC_PHYCFG2_FMODE_COMP_AC131 0x00030000
545#define MAC_PHYCFG2_FMODE_COMP_50610 0x00008000
546#define MAC_PHYCFG2_FMODE_COMP_RT8211 0x00038000
547#define MAC_PHYCFG2_FMODE_COMP_RT8201 0x00000000
548#define MAC_PHYCFG2_GMODE_MASK_MASK 0x001c0000
549#define MAC_PHYCFG2_GMODE_MASK_AC131 0x001c0000
550#define MAC_PHYCFG2_GMODE_MASK_50610 0x00100000
551#define MAC_PHYCFG2_GMODE_MASK_RT8211 0x00000000
552#define MAC_PHYCFG2_GMODE_MASK_RT8201 0x001c0000
553#define MAC_PHYCFG2_GMODE_COMP_MASK 0x00e00000
554#define MAC_PHYCFG2_GMODE_COMP_AC131 0x00e00000
555#define MAC_PHYCFG2_GMODE_COMP_50610 0x00000000
556#define MAC_PHYCFG2_GMODE_COMP_RT8211 0x00200000
557#define MAC_PHYCFG2_GMODE_COMP_RT8201 0x00000000
558#define MAC_PHYCFG2_ACT_MASK_MASK 0x03000000
559#define MAC_PHYCFG2_ACT_MASK_AC131 0x03000000
560#define MAC_PHYCFG2_ACT_MASK_50610 0x01000000
561#define MAC_PHYCFG2_ACT_MASK_RT8211 0x03000000
562#define MAC_PHYCFG2_ACT_MASK_RT8201 0x01000000
563#define MAC_PHYCFG2_ACT_COMP_MASK 0x0c000000
564#define MAC_PHYCFG2_ACT_COMP_AC131 0x00000000
565#define MAC_PHYCFG2_ACT_COMP_50610 0x00000000
566#define MAC_PHYCFG2_ACT_COMP_RT8211 0x00000000
567#define MAC_PHYCFG2_ACT_COMP_RT8201 0x08000000
568#define MAC_PHYCFG2_QUAL_MASK_MASK 0x30000000
569#define MAC_PHYCFG2_QUAL_MASK_AC131 0x30000000
570#define MAC_PHYCFG2_QUAL_MASK_50610 0x30000000
571#define MAC_PHYCFG2_QUAL_MASK_RT8211 0x30000000
572#define MAC_PHYCFG2_QUAL_MASK_RT8201 0x30000000
573#define MAC_PHYCFG2_QUAL_COMP_MASK 0xc0000000
574#define MAC_PHYCFG2_QUAL_COMP_AC131 0x00000000
575#define MAC_PHYCFG2_QUAL_COMP_50610 0x00000000
576#define MAC_PHYCFG2_QUAL_COMP_RT8211 0x00000000
577#define MAC_PHYCFG2_QUAL_COMP_RT8201 0x00000000
578#define MAC_PHYCFG2_50610_LED_MODES \
579 (MAC_PHYCFG2_EMODE_MASK_50610 | \
580 MAC_PHYCFG2_EMODE_COMP_50610 | \
581 MAC_PHYCFG2_FMODE_MASK_50610 | \
582 MAC_PHYCFG2_FMODE_COMP_50610 | \
583 MAC_PHYCFG2_GMODE_MASK_50610 | \
584 MAC_PHYCFG2_GMODE_COMP_50610 | \
585 MAC_PHYCFG2_ACT_MASK_50610 | \
586 MAC_PHYCFG2_ACT_COMP_50610 | \
587 MAC_PHYCFG2_QUAL_MASK_50610 | \
588 MAC_PHYCFG2_QUAL_COMP_50610)
589#define MAC_PHYCFG2_AC131_LED_MODES \
590 (MAC_PHYCFG2_EMODE_MASK_AC131 | \
591 MAC_PHYCFG2_EMODE_COMP_AC131 | \
592 MAC_PHYCFG2_FMODE_MASK_AC131 | \
593 MAC_PHYCFG2_FMODE_COMP_AC131 | \
594 MAC_PHYCFG2_GMODE_MASK_AC131 | \
595 MAC_PHYCFG2_GMODE_COMP_AC131 | \
596 MAC_PHYCFG2_ACT_MASK_AC131 | \
597 MAC_PHYCFG2_ACT_COMP_AC131 | \
598 MAC_PHYCFG2_QUAL_MASK_AC131 | \
599 MAC_PHYCFG2_QUAL_COMP_AC131)
600#define MAC_PHYCFG2_RTL8211C_LED_MODES \
601 (MAC_PHYCFG2_EMODE_MASK_RT8211 | \
602 MAC_PHYCFG2_EMODE_COMP_RT8211 | \
603 MAC_PHYCFG2_FMODE_MASK_RT8211 | \
604 MAC_PHYCFG2_FMODE_COMP_RT8211 | \
605 MAC_PHYCFG2_GMODE_MASK_RT8211 | \
606 MAC_PHYCFG2_GMODE_COMP_RT8211 | \
607 MAC_PHYCFG2_ACT_MASK_RT8211 | \
608 MAC_PHYCFG2_ACT_COMP_RT8211 | \
609 MAC_PHYCFG2_QUAL_MASK_RT8211 | \
610 MAC_PHYCFG2_QUAL_COMP_RT8211)
611#define MAC_PHYCFG2_RTL8201E_LED_MODES \
612 (MAC_PHYCFG2_EMODE_MASK_RT8201 | \
613 MAC_PHYCFG2_EMODE_COMP_RT8201 | \
614 MAC_PHYCFG2_FMODE_MASK_RT8201 | \
615 MAC_PHYCFG2_FMODE_COMP_RT8201 | \
616 MAC_PHYCFG2_GMODE_MASK_RT8201 | \
617 MAC_PHYCFG2_GMODE_COMP_RT8201 | \
618 MAC_PHYCFG2_ACT_MASK_RT8201 | \
619 MAC_PHYCFG2_ACT_COMP_RT8201 | \
620 MAC_PHYCFG2_QUAL_MASK_RT8201 | \
621 MAC_PHYCFG2_QUAL_COMP_RT8201)
542#define MAC_EXT_RGMII_MODE 0x000005a8 622#define MAC_EXT_RGMII_MODE 0x000005a8
543#define MAC_RGMII_MODE_TX_ENABLE 0x00000001 623#define MAC_RGMII_MODE_TX_ENABLE 0x00000001
544#define MAC_RGMII_MODE_TX_LOWPWR 0x00000002 624#define MAC_RGMII_MODE_TX_LOWPWR 0x00000002
@@ -1104,6 +1184,8 @@
1104#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000 1184#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000
1105#define RDMAC_MODE_FIFO_SIZE_128 0x00020000 1185#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
1106#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000 1186#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
1187#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
1188#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
1107#define RDMAC_STATUS 0x00004804 1189#define RDMAC_STATUS 0x00004804
1108#define RDMAC_STATUS_TGTABORT 0x00000004 1190#define RDMAC_STATUS_TGTABORT 0x00000004
1109#define RDMAC_STATUS_MSTABORT 0x00000008 1191#define RDMAC_STATUS_MSTABORT 0x00000008
@@ -1550,6 +1632,12 @@
1550#define FLASH_5761VENDOR_ST_A_M45PE40 0x02000000 1632#define FLASH_5761VENDOR_ST_A_M45PE40 0x02000000
1551#define FLASH_5761VENDOR_ST_A_M45PE80 0x02000002 1633#define FLASH_5761VENDOR_ST_A_M45PE80 0x02000002
1552#define FLASH_5761VENDOR_ST_A_M45PE16 0x02000003 1634#define FLASH_5761VENDOR_ST_A_M45PE16 0x02000003
1635#define FLASH_57780VENDOR_ATMEL_AT45DB011D 0x00400000
1636#define FLASH_57780VENDOR_ATMEL_AT45DB011B 0x03400000
1637#define FLASH_57780VENDOR_ATMEL_AT45DB021D 0x00400002
1638#define FLASH_57780VENDOR_ATMEL_AT45DB021B 0x03400002
1639#define FLASH_57780VENDOR_ATMEL_AT45DB041D 0x00400001
1640#define FLASH_57780VENDOR_ATMEL_AT45DB041B 0x03400001
1553#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 1641#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000
1554#define FLASH_5752PAGE_SIZE_256 0x00000000 1642#define FLASH_5752PAGE_SIZE_256 0x00000000
1555#define FLASH_5752PAGE_SIZE_512 0x10000000 1643#define FLASH_5752PAGE_SIZE_512 0x10000000
@@ -1557,6 +1645,7 @@
1557#define FLASH_5752PAGE_SIZE_2K 0x30000000 1645#define FLASH_5752PAGE_SIZE_2K 0x30000000
1558#define FLASH_5752PAGE_SIZE_4K 0x40000000 1646#define FLASH_5752PAGE_SIZE_4K 0x40000000
1559#define FLASH_5752PAGE_SIZE_264 0x50000000 1647#define FLASH_5752PAGE_SIZE_264 0x50000000
1648#define FLASH_5752PAGE_SIZE_528 0x60000000
1560#define NVRAM_CFG2 0x00007018 1649#define NVRAM_CFG2 0x00007018
1561#define NVRAM_CFG3 0x0000701c 1650#define NVRAM_CFG3 0x0000701c
1562#define NVRAM_SWARB 0x00007020 1651#define NVRAM_SWARB 0x00007020
@@ -1649,6 +1738,17 @@
1649#define TG3_NVM_DIRTYPE_SHIFT 24 1738#define TG3_NVM_DIRTYPE_SHIFT 24
1650#define TG3_NVM_DIRTYPE_ASFINI 1 1739#define TG3_NVM_DIRTYPE_ASFINI 1
1651 1740
1741#define TG3_EEPROM_SB_F1R0_EDH_OFF 0x10
1742#define TG3_EEPROM_SB_F1R2_EDH_OFF 0x14
1743#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
1744#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18
1745#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700
1746#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8
1747#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff
1748#define TG3_EEPROM_SB_EDH_BLD_MASK 0x0000f800
1749#define TG3_EEPROM_SB_EDH_BLD_SHFT 11
1750
1751
1652/* 32K Window into NIC internal memory */ 1752/* 32K Window into NIC internal memory */
1653#define NIC_SRAM_WIN_BASE 0x00008000 1753#define NIC_SRAM_WIN_BASE 0x00008000
1654 1754
@@ -1724,6 +1824,7 @@
1724 1824
1725#define NIC_SRAM_DATA_CFG_2 0x00000d38 1825#define NIC_SRAM_DATA_CFG_2 0x00000d38
1726 1826
1827#define NIC_SRAM_DATA_CFG_2_APD_EN 0x00000400
1727#define SHASTA_EXT_LED_MODE_MASK 0x00018000 1828#define SHASTA_EXT_LED_MODE_MASK 0x00018000
1728#define SHASTA_EXT_LED_LEGACY 0x00000000 1829#define SHASTA_EXT_LED_LEGACY 0x00000000
1729#define SHASTA_EXT_LED_SHARED 0x00008000 1830#define SHASTA_EXT_LED_SHARED 0x00008000
@@ -1792,6 +1893,11 @@
1792 1893
1793#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */ 1894#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */
1794 1895
1896#define MII_TG3_AUXCTL_PCTL_100TX_LPWR 0x0010
1897#define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE 0x0020
1898#define MII_TG3_AUXCTL_PCTL_VREG_11V 0x0180
1899#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002
1900
1795#define MII_TG3_AUXCTL_MISC_WREN 0x8000 1901#define MII_TG3_AUXCTL_MISC_WREN 0x8000
1796#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200 1902#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200
1797#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000 1903#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000
@@ -1817,12 +1923,6 @@
1817#define MII_TG3_ISTAT 0x1a /* IRQ status register */ 1923#define MII_TG3_ISTAT 0x1a /* IRQ status register */
1818#define MII_TG3_IMASK 0x1b /* IRQ mask register */ 1924#define MII_TG3_IMASK 0x1b /* IRQ mask register */
1819 1925
1820#define MII_TG3_MISC_SHDW 0x1c
1821#define MII_TG3_MISC_SHDW_WREN 0x8000
1822#define MII_TG3_MISC_SHDW_APD_SEL 0x2800
1823
1824#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001
1825
1826/* ISTAT/IMASK event bits */ 1926/* ISTAT/IMASK event bits */
1827#define MII_TG3_INT_LINKCHG 0x0002 1927#define MII_TG3_INT_LINKCHG 0x0002
1828#define MII_TG3_INT_SPEEDCHG 0x0004 1928#define MII_TG3_INT_SPEEDCHG 0x0004
@@ -1831,7 +1931,9 @@
1831 1931
1832#define MII_TG3_MISC_SHDW 0x1c 1932#define MII_TG3_MISC_SHDW 0x1c
1833#define MII_TG3_MISC_SHDW_WREN 0x8000 1933#define MII_TG3_MISC_SHDW_WREN 0x8000
1834#define MII_TG3_MISC_SHDW_SCR5_SEL 0x1400 1934
1935#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001
1936#define MII_TG3_MISC_SHDW_APD_ENABLE 0x0020
1835#define MII_TG3_MISC_SHDW_APD_SEL 0x2800 1937#define MII_TG3_MISC_SHDW_APD_SEL 0x2800
1836 1938
1837#define MII_TG3_MISC_SHDW_SCR5_C125OE 0x0001 1939#define MII_TG3_MISC_SHDW_SCR5_C125OE 0x0001
@@ -1839,9 +1941,8 @@
1839#define MII_TG3_MISC_SHDW_SCR5_SDTL 0x0004 1941#define MII_TG3_MISC_SHDW_SCR5_SDTL 0x0004
1840#define MII_TG3_MISC_SHDW_SCR5_DLPTLM 0x0008 1942#define MII_TG3_MISC_SHDW_SCR5_DLPTLM 0x0008
1841#define MII_TG3_MISC_SHDW_SCR5_LPED 0x0010 1943#define MII_TG3_MISC_SHDW_SCR5_LPED 0x0010
1944#define MII_TG3_MISC_SHDW_SCR5_SEL 0x1400
1842 1945
1843#define MII_TG3_MISC_SHDW_APD_WKTM_84MS 0x0001
1844#define MII_TG3_MISC_SHDW_APD_ENABLE 0x0020
1845 1946
1846#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */ 1947#define MII_TG3_EPHY_TEST 0x1f /* 5906 PHY register */
1847#define MII_TG3_EPHY_SHADOW_EN 0x80 1948#define MII_TG3_EPHY_SHADOW_EN 0x80
@@ -2211,8 +2312,6 @@ struct tg3_link_config {
2211 u8 duplex; 2312 u8 duplex;
2212 u8 autoneg; 2313 u8 autoneg;
2213 u8 flowctrl; 2314 u8 flowctrl;
2214#define TG3_FLOW_CTRL_TX 0x01
2215#define TG3_FLOW_CTRL_RX 0x02
2216 2315
2217 /* Describes what we actually have. */ 2316 /* Describes what we actually have. */
2218 u8 active_flowctrl; 2317 u8 active_flowctrl;
@@ -2507,7 +2606,6 @@ struct tg3 {
2507 u32 tg3_flags3; 2606 u32 tg3_flags3;
2508#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 2607#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001
2509#define TG3_FLG3_ENABLE_APE 0x00000002 2608#define TG3_FLG3_ENABLE_APE 0x00000002
2510#define TG3_FLG3_5761_5784_AX_FIXES 0x00000004
2511#define TG3_FLG3_5701_DMA_BUG 0x00000008 2609#define TG3_FLG3_5701_DMA_BUG 0x00000008
2512#define TG3_FLG3_USE_PHYLIB 0x00000010 2610#define TG3_FLG3_USE_PHYLIB 0x00000010
2513#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2611#define TG3_FLG3_MDIOBUS_INITED 0x00000020
@@ -2516,6 +2614,9 @@ struct tg3 {
2516#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 2614#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100
2517#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 2615#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
2518#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400 2616#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400
2617#define TG3_FLG3_CLKREQ_BUG 0x00000800
2618#define TG3_FLG3_PHY_ENABLE_APD 0x00001000
2619#define TG3_FLG3_5755_PLUS 0x00002000
2519 2620
2520 struct timer_list timer; 2621 struct timer_list timer;
2521 u16 timer_counter; 2622 u16 timer_counter;
@@ -2547,14 +2648,16 @@ struct tg3 {
2547 2648
2548 /* PCI block */ 2649 /* PCI block */
2549 u32 pci_chip_rev_id; 2650 u32 pci_chip_rev_id;
2651 u16 pci_cmd;
2550 u8 pci_cacheline_sz; 2652 u8 pci_cacheline_sz;
2551 u8 pci_lat_timer; 2653 u8 pci_lat_timer;
2552 u8 pci_hdr_type;
2553 u8 pci_bist;
2554 2654
2555 int pm_cap; 2655 int pm_cap;
2556 int msi_cap; 2656 int msi_cap;
2657 union {
2557 int pcix_cap; 2658 int pcix_cap;
2659 int pcie_cap;
2660 };
2558 2661
2559 struct mii_bus *mdio_bus; 2662 struct mii_bus *mdio_bus;
2560 int mdio_irq[PHY_MAX_ADDR]; 2663 int mdio_irq[PHY_MAX_ADDR];
@@ -2588,11 +2691,16 @@ struct tg3 {
2588#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ 2691#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
2589#define TG3_PHY_ID_BCM50610 0x143bd60 2692#define TG3_PHY_ID_BCM50610 0x143bd60
2590#define TG3_PHY_ID_BCMAC131 0x143bc70 2693#define TG3_PHY_ID_BCMAC131 0x143bc70
2591 2694#define TG3_PHY_ID_RTL8211C 0x001cc910
2695#define TG3_PHY_ID_RTL8201E 0x00008200
2696#define TG3_PHY_ID_BCM57780 0x03625d90
2697#define TG3_PHY_OUI_MASK 0xfffffc00
2698#define TG3_PHY_OUI_1 0x00206000
2699#define TG3_PHY_OUI_2 0x0143bc00
2700#define TG3_PHY_OUI_3 0x03625c00
2592 2701
2593 u32 led_ctrl; 2702 u32 led_ctrl;
2594 u32 phy_otp; 2703 u32 phy_otp;
2595 u16 pci_cmd;
2596 2704
2597 char board_part_number[24]; 2705 char board_part_number[24];
2598#define TG3_VER_SIZE 32 2706#define TG3_VER_SIZE 32
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index e60498232b94..85ef8b744557 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -163,6 +163,11 @@
163 * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be 163 * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be
164 * 10T half duplex no loopback 164 * 10T half duplex no loopback
165 * Thanks to Gunnar Eikman 165 * Thanks to Gunnar Eikman
166 *
167 * Sakari Ailus <sakari.ailus@iki.fi>:
168 *
169 * v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway.
170 *
166 *******************************************************************************/ 171 *******************************************************************************/
167 172
168#include <linux/module.h> 173#include <linux/module.h>
@@ -213,12 +218,8 @@ static int debug;
213module_param(debug, int, 0); 218module_param(debug, int, 0);
214MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); 219MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
215 220
216static int bbuf;
217module_param(bbuf, int, 0);
218MODULE_PARM_DESC(bbuf, "ThunderLAN use big buffer (0-1)");
219
220static const char TLanSignature[] = "TLAN"; 221static const char TLanSignature[] = "TLAN";
221static const char tlan_banner[] = "ThunderLAN driver v1.15\n"; 222static const char tlan_banner[] = "ThunderLAN driver v1.15a\n";
222static int tlan_have_pci; 223static int tlan_have_pci;
223static int tlan_have_eisa; 224static int tlan_have_eisa;
224 225
@@ -859,13 +860,8 @@ static int TLan_Init( struct net_device *dev )
859 860
860 priv = netdev_priv(dev); 861 priv = netdev_priv(dev);
861 862
862 if ( bbuf ) { 863 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
863 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) 864 * ( sizeof(TLanList) );
864 * ( sizeof(TLanList) + TLAN_MAX_FRAME_SIZE );
865 } else {
866 dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
867 * ( sizeof(TLanList) );
868 }
869 priv->dmaStorage = pci_alloc_consistent(priv->pciDev, 865 priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
870 dma_size, &priv->dmaStorageDMA); 866 dma_size, &priv->dmaStorageDMA);
871 priv->dmaSize = dma_size; 867 priv->dmaSize = dma_size;
@@ -881,16 +877,6 @@ static int TLan_Init( struct net_device *dev )
881 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; 877 priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
882 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; 878 priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
883 879
884 if ( bbuf ) {
885 priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS );
886 priv->rxBufferDMA =priv->txListDMA
887 + sizeof(TLanList) * TLAN_NUM_TX_LISTS;
888 priv->txBuffer = priv->rxBuffer
889 + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
890 priv->txBufferDMA = priv->rxBufferDMA
891 + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE );
892 }
893
894 err = 0; 880 err = 0;
895 for ( i = 0; i < 6 ; i++ ) 881 for ( i = 0; i < 6 ; i++ )
896 err |= TLan_EeReadByte( dev, 882 err |= TLan_EeReadByte( dev,
@@ -1094,9 +1080,8 @@ static void TLan_tx_timeout_work(struct work_struct *work)
1094static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) 1080static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1095{ 1081{
1096 TLanPrivateInfo *priv = netdev_priv(dev); 1082 TLanPrivateInfo *priv = netdev_priv(dev);
1097 TLanList *tail_list;
1098 dma_addr_t tail_list_phys; 1083 dma_addr_t tail_list_phys;
1099 u8 *tail_buffer; 1084 TLanList *tail_list;
1100 unsigned long flags; 1085 unsigned long flags;
1101 unsigned int txlen; 1086 unsigned int txlen;
1102 1087
@@ -1125,15 +1110,10 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1125 1110
1126 tail_list->forward = 0; 1111 tail_list->forward = 0;
1127 1112
1128 if ( bbuf ) { 1113 tail_list->buffer[0].address = pci_map_single(priv->pciDev,
1129 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); 1114 skb->data, txlen,
1130 skb_copy_from_linear_data(skb, tail_buffer, txlen); 1115 PCI_DMA_TODEVICE);
1131 } else { 1116 TLan_StoreSKB(tail_list, skb);
1132 tail_list->buffer[0].address = pci_map_single(priv->pciDev,
1133 skb->data, txlen,
1134 PCI_DMA_TODEVICE);
1135 TLan_StoreSKB(tail_list, skb);
1136 }
1137 1117
1138 tail_list->frameSize = (u16) txlen; 1118 tail_list->frameSize = (u16) txlen;
1139 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen; 1119 tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
@@ -1163,9 +1143,6 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1163 1143
1164 CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS ); 1144 CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
1165 1145
1166 if ( bbuf )
1167 dev_kfree_skb_any(skb);
1168
1169 dev->trans_start = jiffies; 1146 dev->trans_start = jiffies;
1170 return 0; 1147 return 0;
1171 1148
@@ -1429,17 +1406,16 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
1429 head_list = priv->txList + priv->txHead; 1406 head_list = priv->txList + priv->txHead;
1430 1407
1431 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1408 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1409 struct sk_buff *skb = TLan_GetSKB(head_list);
1410
1432 ack++; 1411 ack++;
1433 if ( ! bbuf ) { 1412 pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
1434 struct sk_buff *skb = TLan_GetSKB(head_list); 1413 max(skb->len,
1435 pci_unmap_single(priv->pciDev, head_list->buffer[0].address, 1414 (unsigned int)TLAN_MIN_FRAME_SIZE),
1436 max(skb->len, 1415 PCI_DMA_TODEVICE);
1437 (unsigned int)TLAN_MIN_FRAME_SIZE), 1416 dev_kfree_skb_any(skb);
1438 PCI_DMA_TODEVICE); 1417 head_list->buffer[8].address = 0;
1439 dev_kfree_skb_any(skb); 1418 head_list->buffer[9].address = 0;
1440 head_list->buffer[8].address = 0;
1441 head_list->buffer[9].address = 0;
1442 }
1443 1419
1444 if ( tmpCStat & TLAN_CSTAT_EOC ) 1420 if ( tmpCStat & TLAN_CSTAT_EOC )
1445 eoc = 1; 1421 eoc = 1;
@@ -1549,7 +1525,6 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1549 TLanPrivateInfo *priv = netdev_priv(dev); 1525 TLanPrivateInfo *priv = netdev_priv(dev);
1550 u32 ack = 0; 1526 u32 ack = 0;
1551 int eoc = 0; 1527 int eoc = 0;
1552 u8 *head_buffer;
1553 TLanList *head_list; 1528 TLanList *head_list;
1554 struct sk_buff *skb; 1529 struct sk_buff *skb;
1555 TLanList *tail_list; 1530 TLanList *tail_list;
@@ -1564,53 +1539,33 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1564 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1539 while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
1565 dma_addr_t frameDma = head_list->buffer[0].address; 1540 dma_addr_t frameDma = head_list->buffer[0].address;
1566 u32 frameSize = head_list->frameSize; 1541 u32 frameSize = head_list->frameSize;
1542 struct sk_buff *new_skb;
1543
1567 ack++; 1544 ack++;
1568 if (tmpCStat & TLAN_CSTAT_EOC) 1545 if (tmpCStat & TLAN_CSTAT_EOC)
1569 eoc = 1; 1546 eoc = 1;
1570 1547
1571 if (bbuf) { 1548 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
1572 skb = netdev_alloc_skb(dev, frameSize + 7); 1549 if ( !new_skb )
1573 if ( !skb ) 1550 goto drop_and_reuse;
1574 goto drop_and_reuse;
1575
1576 head_buffer = priv->rxBuffer
1577 + (priv->rxHead * TLAN_MAX_FRAME_SIZE);
1578 skb_reserve(skb, 2);
1579 pci_dma_sync_single_for_cpu(priv->pciDev,
1580 frameDma, frameSize,
1581 PCI_DMA_FROMDEVICE);
1582 skb_copy_from_linear_data(skb, head_buffer, frameSize);
1583 skb_put(skb, frameSize);
1584 dev->stats.rx_bytes += frameSize;
1585
1586 skb->protocol = eth_type_trans( skb, dev );
1587 netif_rx( skb );
1588 } else {
1589 struct sk_buff *new_skb;
1590
1591 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
1592 if ( !new_skb )
1593 goto drop_and_reuse;
1594
1595 skb = TLan_GetSKB(head_list);
1596 pci_unmap_single(priv->pciDev, frameDma,
1597 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1598 skb_put( skb, frameSize );
1599 1551
1600 dev->stats.rx_bytes += frameSize; 1552 skb = TLan_GetSKB(head_list);
1553 pci_unmap_single(priv->pciDev, frameDma,
1554 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
1555 skb_put( skb, frameSize );
1601 1556
1602 skb->protocol = eth_type_trans( skb, dev ); 1557 dev->stats.rx_bytes += frameSize;
1603 netif_rx( skb );
1604 1558
1605 skb_reserve( new_skb, NET_IP_ALIGN ); 1559 skb->protocol = eth_type_trans( skb, dev );
1606 head_list->buffer[0].address = pci_map_single(priv->pciDev, 1560 netif_rx( skb );
1607 new_skb->data,
1608 TLAN_MAX_FRAME_SIZE,
1609 PCI_DMA_FROMDEVICE);
1610 1561
1611 TLan_StoreSKB(head_list, new_skb); 1562 skb_reserve( new_skb, NET_IP_ALIGN );
1563 head_list->buffer[0].address = pci_map_single(priv->pciDev,
1564 new_skb->data,
1565 TLAN_MAX_FRAME_SIZE,
1566 PCI_DMA_FROMDEVICE);
1612 1567
1613 } 1568 TLan_StoreSKB(head_list, new_skb);
1614drop_and_reuse: 1569drop_and_reuse:
1615 head_list->forward = 0; 1570 head_list->forward = 0;
1616 head_list->cStat = 0; 1571 head_list->cStat = 0;
@@ -1653,8 +1608,6 @@ drop_and_reuse:
1653 } 1608 }
1654 } 1609 }
1655 1610
1656 dev->last_rx = jiffies;
1657
1658 return ack; 1611 return ack;
1659 1612
1660} /* TLan_HandleRxEOF */ 1613} /* TLan_HandleRxEOF */
@@ -1995,12 +1948,7 @@ static void TLan_ResetLists( struct net_device *dev )
1995 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 1948 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
1996 list = priv->txList + i; 1949 list = priv->txList + i;
1997 list->cStat = TLAN_CSTAT_UNUSED; 1950 list->cStat = TLAN_CSTAT_UNUSED;
1998 if ( bbuf ) { 1951 list->buffer[0].address = 0;
1999 list->buffer[0].address = priv->txBufferDMA
2000 + ( i * TLAN_MAX_FRAME_SIZE );
2001 } else {
2002 list->buffer[0].address = 0;
2003 }
2004 list->buffer[2].count = 0; 1952 list->buffer[2].count = 0;
2005 list->buffer[2].address = 0; 1953 list->buffer[2].address = 0;
2006 list->buffer[8].address = 0; 1954 list->buffer[8].address = 0;
@@ -2015,23 +1963,18 @@ static void TLan_ResetLists( struct net_device *dev )
2015 list->cStat = TLAN_CSTAT_READY; 1963 list->cStat = TLAN_CSTAT_READY;
2016 list->frameSize = TLAN_MAX_FRAME_SIZE; 1964 list->frameSize = TLAN_MAX_FRAME_SIZE;
2017 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 1965 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
2018 if ( bbuf ) { 1966 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
2019 list->buffer[0].address = priv->rxBufferDMA 1967 if ( !skb ) {
2020 + ( i * TLAN_MAX_FRAME_SIZE ); 1968 pr_err("TLAN: out of memory for received data.\n" );
2021 } else { 1969 break;
2022 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 );
2023 if ( !skb ) {
2024 pr_err("TLAN: out of memory for received data.\n" );
2025 break;
2026 }
2027
2028 skb_reserve( skb, NET_IP_ALIGN );
2029 list->buffer[0].address = pci_map_single(priv->pciDev,
2030 skb->data,
2031 TLAN_MAX_FRAME_SIZE,
2032 PCI_DMA_FROMDEVICE);
2033 TLan_StoreSKB(list, skb);
2034 } 1970 }
1971
1972 skb_reserve( skb, NET_IP_ALIGN );
1973 list->buffer[0].address = pci_map_single(priv->pciDev,
1974 skb->data,
1975 TLAN_MAX_FRAME_SIZE,
1976 PCI_DMA_FROMDEVICE);
1977 TLan_StoreSKB(list, skb);
2035 list->buffer[1].count = 0; 1978 list->buffer[1].count = 0;
2036 list->buffer[1].address = 0; 1979 list->buffer[1].address = 0;
2037 list->forward = list_phys + sizeof(TLanList); 1980 list->forward = list_phys + sizeof(TLanList);
@@ -2054,35 +1997,33 @@ static void TLan_FreeLists( struct net_device *dev )
2054 TLanList *list; 1997 TLanList *list;
2055 struct sk_buff *skb; 1998 struct sk_buff *skb;
2056 1999
2057 if ( ! bbuf ) { 2000 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
2058 for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { 2001 list = priv->txList + i;
2059 list = priv->txList + i; 2002 skb = TLan_GetSKB(list);
2060 skb = TLan_GetSKB(list); 2003 if ( skb ) {
2061 if ( skb ) { 2004 pci_unmap_single(
2062 pci_unmap_single( 2005 priv->pciDev,
2063 priv->pciDev, 2006 list->buffer[0].address,
2064 list->buffer[0].address, 2007 max(skb->len,
2065 max(skb->len, 2008 (unsigned int)TLAN_MIN_FRAME_SIZE),
2066 (unsigned int)TLAN_MIN_FRAME_SIZE), 2009 PCI_DMA_TODEVICE);
2067 PCI_DMA_TODEVICE); 2010 dev_kfree_skb_any( skb );
2068 dev_kfree_skb_any( skb ); 2011 list->buffer[8].address = 0;
2069 list->buffer[8].address = 0; 2012 list->buffer[9].address = 0;
2070 list->buffer[9].address = 0;
2071 }
2072 } 2013 }
2014 }
2073 2015
2074 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { 2016 for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
2075 list = priv->rxList + i; 2017 list = priv->rxList + i;
2076 skb = TLan_GetSKB(list); 2018 skb = TLan_GetSKB(list);
2077 if ( skb ) { 2019 if ( skb ) {
2078 pci_unmap_single(priv->pciDev, 2020 pci_unmap_single(priv->pciDev,
2079 list->buffer[0].address, 2021 list->buffer[0].address,
2080 TLAN_MAX_FRAME_SIZE, 2022 TLAN_MAX_FRAME_SIZE,
2081 PCI_DMA_FROMDEVICE); 2023 PCI_DMA_FROMDEVICE);
2082 dev_kfree_skb_any( skb ); 2024 dev_kfree_skb_any( skb );
2083 list->buffer[8].address = 0; 2025 list->buffer[8].address = 0;
2084 list->buffer[9].address = 0; 2026 list->buffer[9].address = 0;
2085 }
2086 } 2027 }
2087 } 2028 }
2088} /* TLan_FreeLists */ 2029} /* TLan_FreeLists */
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index bf621328b601..43853e3b210e 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -296,8 +296,9 @@ static int __devinit xl_probe(struct pci_dev *pdev,
296 } ; 296 } ;
297 297
298 /* 298 /*
299 * Allowing init_trdev to allocate the dev->priv structure will align xl_private 299 * Allowing init_trdev to allocate the private data will align
300 * on a 32 bytes boundary which we need for the rx/tx descriptors 300 * xl_private on a 32 bytes boundary which we need for the rx/tx
301 * descriptors
301 */ 302 */
302 303
303 dev = alloc_trdev(sizeof(struct xl_private)) ; 304 dev = alloc_trdev(sizeof(struct xl_private)) ;
@@ -638,13 +639,13 @@ static int xl_open(struct net_device *dev)
638 /* These MUST be on 8 byte boundaries */ 639 /* These MUST be on 8 byte boundaries */
639 xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL); 640 xl_priv->xl_tx_ring = kzalloc((sizeof(struct xl_tx_desc) * XL_TX_RING_SIZE) + 7, GFP_DMA | GFP_KERNEL);
640 if (xl_priv->xl_tx_ring == NULL) { 641 if (xl_priv->xl_tx_ring == NULL) {
641 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n", 642 printk(KERN_WARNING "%s: Not enough memory to allocate tx buffers.\n",
642 dev->name); 643 dev->name);
643 free_irq(dev->irq,dev); 644 free_irq(dev->irq,dev);
644 return -ENOMEM; 645 return -ENOMEM;
645 } 646 }
646 xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL); 647 xl_priv->xl_rx_ring = kzalloc((sizeof(struct xl_rx_desc) * XL_RX_RING_SIZE) +7, GFP_DMA | GFP_KERNEL);
647 if (xl_priv->xl_tx_ring == NULL) { 648 if (xl_priv->xl_rx_ring == NULL) {
648 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n", 649 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers.\n",
649 dev->name); 650 dev->name);
650 free_irq(dev->irq,dev); 651 free_irq(dev->irq,dev);
@@ -669,6 +670,8 @@ static int xl_open(struct net_device *dev)
669 if (i==0) { 670 if (i==0) {
670 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled \n",dev->name) ; 671 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled \n",dev->name) ;
671 free_irq(dev->irq,dev) ; 672 free_irq(dev->irq,dev) ;
673 kfree(xl_priv->xl_tx_ring);
674 kfree(xl_priv->xl_rx_ring);
672 return -EIO ; 675 return -EIO ;
673 } 676 }
674 677
@@ -974,7 +977,6 @@ static void xl_rx(struct net_device *dev)
974 977
975 netif_rx(skb2) ; 978 netif_rx(skb2) ;
976 } /* if multiple buffers */ 979 } /* if multiple buffers */
977 dev->last_rx = jiffies ;
978 } /* while packet to do */ 980 } /* while packet to do */
979 981
980 /* Clear the updComplete interrupt */ 982 /* Clear the updComplete interrupt */
@@ -1571,7 +1573,6 @@ static void xl_arb_cmd(struct net_device *dev)
1571 * anyway. 1573 * anyway.
1572 */ 1574 */
1573 1575
1574 dev->last_rx = jiffies ;
1575 /* Acknowledge interrupt, this tells nic we are done with the arb */ 1576 /* Acknowledge interrupt, this tells nic we are done with the arb */
1576 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 1577 writel(ACK_INTERRUPT | ARBCACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1577 1578
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index e6b2e06493e7..c4137b0f808e 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -4,7 +4,7 @@
4 4
5# So far, we only have PCI, ISA, and MCA token ring devices 5# So far, we only have PCI, ISA, and MCA token ring devices
6menuconfig TR 6menuconfig TR
7 bool "Token Ring driver support" 7 tristate "Token Ring driver support"
8 depends on NETDEVICES && !UML 8 depends on NETDEVICES && !UML
9 depends on (PCI || ISA || MCA || CCW) 9 depends on (PCI || ISA || MCA || CCW)
10 select LLC 10 select LLC
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index 7a7de0469eae..b566d6d79ecd 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -99,7 +99,6 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_
99 struct net_local *tp; 99 struct net_local *tp;
100 int ret, pci_irq_line; 100 int ret, pci_irq_line;
101 unsigned long pci_ioaddr; 101 unsigned long pci_ioaddr;
102 DECLARE_MAC_BUF(mac);
103 102
104 if (versionprinted++ == 0) 103 if (versionprinted++ == 0)
105 printk("%s", version); 104 printk("%s", version);
@@ -147,8 +146,7 @@ static int __devinit abyss_attach(struct pci_dev *pdev, const struct pci_device_
147 146
148 abyss_read_eeprom(dev); 147 abyss_read_eeprom(dev);
149 148
150 printk("%s: Ring Station Address: %s\n", 149 printk("%s: Ring Station Address: %pM\n", dev->name, dev->dev_addr);
151 dev->name, print_mac(mac, dev->dev_addr));
152 150
153 tp = netdev_priv(dev); 151 tp = netdev_priv(dev);
154 tp->setnselout = abyss_setnselout_pins; 152 tp->setnselout = abyss_setnselout_pins;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index e494c63bfbd9..fa7bce6e0c6d 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -389,7 +389,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
389 unsigned long timeout; 389 unsigned long timeout;
390 static int version_printed; 390 static int version_printed;
391#endif 391#endif
392 DECLARE_MAC_BUF(mac);
393 392
394 /* Query the adapter PIO base port which will return 393 /* Query the adapter PIO base port which will return
395 * indication of where MMIO was placed. We also have a 394 * indication of where MMIO was placed. We also have a
@@ -703,8 +702,7 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
703 channel_def[cardpresent - 1], adapter_def(ti->adapter_type)); 702 channel_def[cardpresent - 1], adapter_def(ti->adapter_type));
704 DPRINTK("using irq %d, PIOaddr %hx, %dK shared RAM.\n", 703 DPRINTK("using irq %d, PIOaddr %hx, %dK shared RAM.\n",
705 irq, PIOaddr, ti->mapped_ram_size / 2); 704 irq, PIOaddr, ti->mapped_ram_size / 2);
706 DPRINTK("Hardware address : %s\n", 705 DPRINTK("Hardware address : %pM\n", dev->dev_addr);
707 print_mac(mac, dev->dev_addr));
708 if (ti->page_mask) 706 if (ti->page_mask)
709 DPRINTK("Shared RAM paging enabled. " 707 DPRINTK("Shared RAM paging enabled. "
710 "Page size: %uK Shared Ram size %dK\n", 708 "Page size: %uK Shared Ram size %dK\n",
@@ -1741,8 +1739,6 @@ static void tr_rx(struct net_device *dev)
1741 void __iomem *trhhdr = rbuf + offsetof(struct rec_buf, data); 1739 void __iomem *trhhdr = rbuf + offsetof(struct rec_buf, data);
1742 u8 saddr[6]; 1740 u8 saddr[6];
1743 u8 daddr[6]; 1741 u8 daddr[6];
1744 DECLARE_MAC_BUF(mac);
1745 DECLARE_MAC_BUF(mac2);
1746 int i; 1742 int i;
1747 for (i = 0 ; i < 6 ; i++) 1743 for (i = 0 ; i < 6 ; i++)
1748 saddr[i] = readb(trhhdr + SADDR_OFST + i); 1744 saddr[i] = readb(trhhdr + SADDR_OFST + i);
@@ -1750,9 +1746,9 @@ static void tr_rx(struct net_device *dev)
1750 daddr[i] = readb(trhhdr + DADDR_OFST + i); 1746 daddr[i] = readb(trhhdr + DADDR_OFST + i);
1751 DPRINTK("Probably non-IP frame received.\n"); 1747 DPRINTK("Probably non-IP frame received.\n");
1752 DPRINTK("ssap: %02X dsap: %02X " 1748 DPRINTK("ssap: %02X dsap: %02X "
1753 "saddr: %s daddr: %$s\n", 1749 "saddr: %pM daddr: %pM\n",
1754 readb(llc + SSAP_OFST), readb(llc + DSAP_OFST), 1750 readb(llc + SSAP_OFST), readb(llc + DSAP_OFST),
1755 print_mac(mac, saddr), print_mac(mac2, daddr)); 1751 saddr, daddr);
1756 } 1752 }
1757#endif 1753#endif
1758 1754
@@ -1826,7 +1822,6 @@ static void tr_rx(struct net_device *dev)
1826 skb->ip_summed = CHECKSUM_COMPLETE; 1822 skb->ip_summed = CHECKSUM_COMPLETE;
1827 } 1823 }
1828 netif_rx(skb); 1824 netif_rx(skb);
1829 dev->last_rx = jiffies;
1830} /*tr_rx */ 1825} /*tr_rx */
1831 1826
1832/*****************************************************************************/ 1827/*****************************************************************************/
@@ -1842,8 +1837,8 @@ static void ibmtr_reset_timer(struct timer_list *tmr, struct net_device *dev)
1842 1837
1843/*****************************************************************************/ 1838/*****************************************************************************/
1844 1839
1845void tok_rerun(unsigned long dev_addr){ 1840static void tok_rerun(unsigned long dev_addr)
1846 1841{
1847 struct net_device *dev = (struct net_device *)dev_addr; 1842 struct net_device *dev = (struct net_device *)dev_addr;
1848 struct tok_info *ti = netdev_priv(dev); 1843 struct tok_info *ti = netdev_priv(dev);
1849 1844
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 59d1673f9387..239c75217b12 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -446,9 +446,6 @@ static int streamer_reset(struct net_device *dev)
446 unsigned int uaa_addr; 446 unsigned int uaa_addr;
447 struct sk_buff *skb = NULL; 447 struct sk_buff *skb = NULL;
448 __u16 misr; 448 __u16 misr;
449#if STREAMER_DEBUG
450 DECLARE_MAC_BUF(mac);
451#endif
452 449
453 streamer_priv = netdev_priv(dev); 450 streamer_priv = netdev_priv(dev);
454 streamer_mmio = streamer_priv->streamer_mmio; 451 streamer_mmio = streamer_priv->streamer_mmio;
@@ -577,8 +574,7 @@ static int streamer_reset(struct net_device *dev)
577 dev->dev_addr[i+1]= addr & 0xff; 574 dev->dev_addr[i+1]= addr & 0xff;
578 } 575 }
579#if STREAMER_DEBUG 576#if STREAMER_DEBUG
580 printk("Adapter address: %s\n", 577 printk("Adapter address: %pM\n", dev->dev_addr);
581 print_mac(mac, dev->dev_addr));
582#endif 578#endif
583 } 579 }
584 return 0; 580 return 0;
@@ -1013,7 +1009,6 @@ static void streamer_rx(struct net_device *dev)
1013 /* send up to the protocol */ 1009 /* send up to the protocol */
1014 netif_rx(skb); 1010 netif_rx(skb);
1015 } 1011 }
1016 dev->last_rx = jiffies;
1017 streamer_priv->streamer_stats.rx_packets++; 1012 streamer_priv->streamer_stats.rx_packets++;
1018 streamer_priv->streamer_stats.rx_bytes += length; 1013 streamer_priv->streamer_stats.rx_bytes += length;
1019 } /* if skb == null */ 1014 } /* if skb == null */
@@ -1538,7 +1533,6 @@ static void streamer_arb_cmd(struct net_device *dev)
1538 1533
1539#if STREAMER_NETWORK_MONITOR 1534#if STREAMER_NETWORK_MONITOR
1540 struct trh_hdr *mac_hdr; 1535 struct trh_hdr *mac_hdr;
1541 DECLARE_MAC_BUF(mac);
1542#endif 1536#endif
1543 1537
1544 writew(streamer_priv->arb, streamer_mmio + LAPA); 1538 writew(streamer_priv->arb, streamer_mmio + LAPA);
@@ -1611,11 +1605,11 @@ static void streamer_arb_cmd(struct net_device *dev)
1611 dev->name); 1605 dev->name);
1612 mac_hdr = tr_hdr(mac_frame); 1606 mac_hdr = tr_hdr(mac_frame);
1613 printk(KERN_WARNING 1607 printk(KERN_WARNING
1614 "%s: MAC Frame Dest. Addr: %s\n", 1608 "%s: MAC Frame Dest. Addr: %pM\n",
1615 dev->name, print_mac(mac, mac_hdr->daddr)); 1609 dev->name, mac_hdr->daddr);
1616 printk(KERN_WARNING 1610 printk(KERN_WARNING
1617 "%s: MAC Frame Srce. Addr: %s\n", 1611 "%s: MAC Frame Srce. Addr: %pM\n",
1618 dev->name, DEV->ADDR6(mac_hdr->saddr)); 1612 dev->name, mac_hdr->saddr);
1619#endif 1613#endif
1620 netif_rx(mac_frame); 1614 netif_rx(mac_frame);
1621 1615
@@ -1850,8 +1844,6 @@ static int sprintf_info(char *buffer, struct net_device *dev)
1850 struct streamer_parameters_table spt; 1844 struct streamer_parameters_table spt;
1851 int size = 0; 1845 int size = 0;
1852 int i; 1846 int i;
1853 DECLARE_MAC_BUF(mac);
1854 DECLARE_MAC_BUF(mac2);
1855 1847
1856 writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA); 1848 writew(streamer_priv->streamer_addr_table_addr, streamer_mmio + LAPA);
1857 for (i = 0; i < 14; i += 2) { 1849 for (i = 0; i < 14; i += 2) {
@@ -1873,9 +1865,8 @@ static int sprintf_info(char *buffer, struct net_device *dev)
1873 size = sprintf(buffer, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name); 1865 size = sprintf(buffer, "\n%6s: Adapter Address : Node Address : Functional Addr\n", dev->name);
1874 1866
1875 size += sprintf(buffer + size, 1867 size += sprintf(buffer + size,
1876 "%6s: %s : %s : %02x:%02x:%02x:%02x\n", 1868 "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1877 dev->name, print_mac(mac, dev->dev_addr), 1869 dev->name, dev->dev_addr, sat.node_addr,
1878 print_mac(mac2, sat.node_addr),
1879 sat.func_addr[0], sat.func_addr[1], 1870 sat.func_addr[0], sat.func_addr[1],
1880 sat.func_addr[2], sat.func_addr[3]); 1871 sat.func_addr[2], sat.func_addr[3]);
1881 1872
@@ -1884,19 +1875,18 @@ static int sprintf_info(char *buffer, struct net_device *dev)
1884 size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name); 1875 size += sprintf(buffer + size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n", dev->name);
1885 1876
1886 size += sprintf(buffer + size, 1877 size += sprintf(buffer + size,
1887 "%6s: %02x:%02x:%02x:%02x : %s : %s : %04x : %04x : %04x :\n", 1878 "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1888 dev->name, spt.phys_addr[0], spt.phys_addr[1], 1879 dev->name, spt.phys_addr[0], spt.phys_addr[1],
1889 spt.phys_addr[2], spt.phys_addr[3], 1880 spt.phys_addr[2], spt.phys_addr[3],
1890 print_mac(mac, spt.up_node_addr), 1881 spt.up_node_addr, spt.poll_addr,
1891 print_mac(mac2, spt.poll_addr),
1892 ntohs(spt.acc_priority), ntohs(spt.auth_source_class), 1882 ntohs(spt.acc_priority), ntohs(spt.auth_source_class),
1893 ntohs(spt.att_code)); 1883 ntohs(spt.att_code));
1894 1884
1895 size += sprintf(buffer + size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name); 1885 size += sprintf(buffer + size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n", dev->name);
1896 1886
1897 size += sprintf(buffer + size, 1887 size += sprintf(buffer + size,
1898 "%6s: %s : %04x : %04x : %04x : %04x : %04x : %04x : \n", 1888 "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1899 dev->name, print_mac(mac, spt.source_addr), 1889 dev->name, spt.source_addr,
1900 ntohs(spt.beacon_type), ntohs(spt.major_vector), 1890 ntohs(spt.beacon_type), ntohs(spt.major_vector),
1901 ntohs(spt.lan_status), ntohs(spt.local_ring), 1891 ntohs(spt.lan_status), ntohs(spt.local_ring),
1902 ntohs(spt.mon_error), ntohs(spt.frame_correl)); 1892 ntohs(spt.mon_error), ntohs(spt.frame_correl));
@@ -1905,10 +1895,10 @@ static int sprintf_info(char *buffer, struct net_device *dev)
1905 dev->name); 1895 dev->name);
1906 1896
1907 size += sprintf(buffer + size, 1897 size += sprintf(buffer + size,
1908 "%6s: : %02x : %02x : %s : %02x:%02x:%02x:%02x : \n", 1898 "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1909 dev->name, ntohs(spt.beacon_transmit), 1899 dev->name, ntohs(spt.beacon_transmit),
1910 ntohs(spt.beacon_receive), 1900 ntohs(spt.beacon_receive),
1911 print_mac(mac, spt.beacon_naun), 1901 spt.beacon_naun,
1912 spt.beacon_phys[0], spt.beacon_phys[1], 1902 spt.beacon_phys[0], spt.beacon_phys[1],
1913 spt.beacon_phys[2], spt.beacon_phys[3]); 1903 spt.beacon_phys[2], spt.beacon_phys[3]);
1914 return size; 1904 return size;
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index c9c5a2b1ed9e..917b4d201e09 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -152,7 +152,6 @@ static int __devinit madgemc_probe(struct device *device)
152 struct card_info *card; 152 struct card_info *card;
153 struct mca_device *mdev = to_mca_device(device); 153 struct mca_device *mdev = to_mca_device(device);
154 int ret = 0; 154 int ret = 0;
155 DECLARE_MAC_BUF(mac);
156 155
157 if (versionprinted++ == 0) 156 if (versionprinted++ == 0)
158 printk("%s", version); 157 printk("%s", version);
@@ -323,8 +322,8 @@ static int __devinit madgemc_probe(struct device *device)
323 mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME); 322 mca_device_set_name(mdev, (card->cardtype == 0x08)?MADGEMC16_CARDNAME:MADGEMC32_CARDNAME);
324 mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev); 323 mca_set_adapter_procfn(mdev->slot, madgemc_mcaproc, dev);
325 324
326 printk("%s: Ring Station Address: %s\n", 325 printk("%s: Ring Station Address: %pM\n",
327 dev->name, print_mac(mac, dev->dev_addr)); 326 dev->name, dev->dev_addr);
328 327
329 if (tmsdev_init(dev, device)) { 328 if (tmsdev_init(dev, device)) {
330 printk("%s: unable to get memory for dev->priv.\n", 329 printk("%s: unable to get memory for dev->priv.\n",
@@ -467,7 +466,7 @@ static irqreturn_t madgemc_interrupt(int irq, void *dev_id)
467 * zero to leave the TMS NSELOUT bits unaffected. 466 * zero to leave the TMS NSELOUT bits unaffected.
468 * 467 *
469 */ 468 */
470unsigned short madgemc_setnselout_pins(struct net_device *dev) 469static unsigned short madgemc_setnselout_pins(struct net_device *dev)
471{ 470{
472 unsigned char reg1; 471 unsigned char reg1;
473 struct net_local *tp = netdev_priv(dev); 472 struct net_local *tp = netdev_priv(dev);
@@ -690,7 +689,6 @@ static int madgemc_mcaproc(char *buf, int slot, void *d)
690 struct net_local *tp = netdev_priv(dev); 689 struct net_local *tp = netdev_priv(dev);
691 struct card_info *curcard = tp->tmspriv; 690 struct card_info *curcard = tp->tmspriv;
692 int len = 0; 691 int len = 0;
693 DECLARE_MAC_BUF(mac);
694 692
695 len += sprintf(buf+len, "-------\n"); 693 len += sprintf(buf+len, "-------\n");
696 if (curcard) { 694 if (curcard) {
@@ -714,8 +712,8 @@ static int madgemc_mcaproc(char *buf, int slot, void *d)
714 } 712 }
715 len += sprintf(buf+len, " (%s)\n", (curcard->fairness)?"Unfair":"Fair"); 713 len += sprintf(buf+len, " (%s)\n", (curcard->fairness)?"Unfair":"Fair");
716 714
717 len += sprintf(buf+len, "Ring Station Address: %s\n", 715 len += sprintf(buf+len, "Ring Station Address: %pM\n",
718 print_mac(mac, dev->dev_addr)); 716 dev->dev_addr);
719 } else 717 } else
720 len += sprintf(buf+len, "Card not configured\n"); 718 len += sprintf(buf+len, "Card not configured\n");
721 719
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 0ab51a0f35fc..ecb5c7c96910 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -421,10 +421,7 @@ static int olympic_init(struct net_device *dev)
421 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6); 421 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
422 422
423#if OLYMPIC_DEBUG 423#if OLYMPIC_DEBUG
424 { 424 printk("adapter address: %pM\n", dev->dev_addr);
425 DECLARE_MAC_BUF(mac);
426 printk("adapter address: %s\n", print_mac(mac, dev->dev_addr));
427 }
428#endif 425#endif
429 426
430 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12)); 427 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
@@ -441,7 +438,6 @@ static int olympic_open(struct net_device *dev)
441 unsigned long flags, t; 438 unsigned long flags, t;
442 int i, open_finished = 1 ; 439 int i, open_finished = 1 ;
443 u8 resp, err; 440 u8 resp, err;
444 DECLARE_MAC_BUF(mac);
445 441
446 DECLARE_WAITQUEUE(wait,current) ; 442 DECLARE_WAITQUEUE(wait,current) ;
447 443
@@ -569,8 +565,8 @@ static int olympic_open(struct net_device *dev)
569 goto out; 565 goto out;
570 566
571 case 0x32: 567 case 0x32:
572 printk(KERN_WARNING "%s: Invalid LAA: %s\n", 568 printk(KERN_WARNING "%s: Invalid LAA: %pM\n",
573 dev->name, print_mac(mac, olympic_priv->olympic_laa)); 569 dev->name, olympic_priv->olympic_laa);
574 goto out; 570 goto out;
575 571
576 default: 572 default:
@@ -704,13 +700,12 @@ static int olympic_open(struct net_device *dev)
704 u8 __iomem *opt; 700 u8 __iomem *opt;
705 int i; 701 int i;
706 u8 addr[6]; 702 u8 addr[6];
707 DECLARE_MAC_BUF(mac);
708 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr); 703 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr);
709 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr); 704 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr);
710 705
711 for (i = 0; i < 6; i++) 706 for (i = 0; i < 6; i++)
712 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i); 707 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i);
713 printk("%s: Node Address: %s\n",dev->name, print_mac(mac, addr)); 708 printk("%s: Node Address: %pM\n", dev->name, addr);
714 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name, 709 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
715 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), 710 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
716 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1), 711 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
@@ -719,7 +714,7 @@ static int olympic_open(struct net_device *dev)
719 714
720 for (i = 0; i < 6; i++) 715 for (i = 0; i < 6; i++)
721 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i); 716 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i);
722 printk("%s: NAUN Address: %s\n",dev->name, print_mac(mac, addr)); 717 printk("%s: NAUN Address: %pM\n", dev->name, addr);
723 } 718 }
724 719
725 netif_start_queue(dev); 720 netif_start_queue(dev);
@@ -867,7 +862,6 @@ static void olympic_rx(struct net_device *dev)
867 skb->protocol = tr_type_trans(skb,dev); 862 skb->protocol = tr_type_trans(skb,dev);
868 netif_rx(skb) ; 863 netif_rx(skb) ;
869 } 864 }
870 dev->last_rx = jiffies ;
871 olympic_priv->olympic_stats.rx_packets++ ; 865 olympic_priv->olympic_stats.rx_packets++ ;
872 olympic_priv->olympic_stats.rx_bytes += length ; 866 olympic_priv->olympic_stats.rx_bytes += length ;
873 } /* if skb == null */ 867 } /* if skb == null */
@@ -1440,19 +1434,12 @@ static void olympic_arb_cmd(struct net_device *dev)
1440 struct trh_hdr *mac_hdr; 1434 struct trh_hdr *mac_hdr;
1441 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name); 1435 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name);
1442 mac_hdr = tr_hdr(mac_frame); 1436 mac_hdr = tr_hdr(mac_frame);
1443 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: " 1437 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
1444 MAC_FMT " \n", dev->name, 1438 dev->name, mac_hdr->daddr);
1445 mac_hdr->daddr[0], mac_hdr->daddr[1], 1439 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n",
1446 mac_hdr->daddr[2], mac_hdr->daddr[3], 1440 dev->name, mac_hdr->saddr);
1447 mac_hdr->daddr[4], mac_hdr->daddr[5]);
1448 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: "
1449 MAC_FMT " \n", dev->name,
1450 mac_hdr->saddr[0], mac_hdr->saddr[1],
1451 mac_hdr->saddr[2], mac_hdr->saddr[3],
1452 mac_hdr->saddr[4], mac_hdr->saddr[5]);
1453 } 1441 }
1454 netif_rx(mac_frame); 1442 netif_rx(mac_frame);
1455 dev->last_rx = jiffies;
1456 1443
1457drop_frame: 1444drop_frame:
1458 /* Now tell the card we have dealt with the received frame */ 1445 /* Now tell the card we have dealt with the received frame */
@@ -1647,8 +1634,6 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
1647 u8 addr[6]; 1634 u8 addr[6];
1648 u8 addr2[6]; 1635 u8 addr2[6];
1649 int i; 1636 int i;
1650 DECLARE_MAC_BUF(mac);
1651 DECLARE_MAC_BUF(mac2);
1652 1637
1653 size = sprintf(buffer, 1638 size = sprintf(buffer,
1654 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name); 1639 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
@@ -1658,10 +1643,9 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
1658 for (i = 0 ; i < 6 ; i++) 1643 for (i = 0 ; i < 6 ; i++)
1659 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i); 1644 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
1660 1645
1661 size += sprintf(buffer+size, "%6s: %s : %s : %02x:%02x:%02x:%02x\n", 1646 size += sprintf(buffer+size, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1662 dev->name, 1647 dev->name,
1663 print_mac(mac, dev->dev_addr), 1648 dev->dev_addr, addr,
1664 print_mac(mac2, addr),
1665 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)), 1649 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1666 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1), 1650 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1667 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2), 1651 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
@@ -1677,14 +1661,13 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
1677 for (i = 0 ; i < 6 ; i++) 1661 for (i = 0 ; i < 6 ; i++)
1678 addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i); 1662 addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
1679 1663
1680 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %s : %s : %04x : %04x : %04x :\n", 1664 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1681 dev->name, 1665 dev->name,
1682 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)), 1666 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1683 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1), 1667 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1684 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2), 1668 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1685 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3), 1669 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1686 print_mac(mac, addr), 1670 addr, addr2,
1687 print_mac(mac2, addr2),
1688 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))), 1671 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1689 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))), 1672 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1690 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code)))); 1673 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
@@ -1694,9 +1677,8 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
1694 1677
1695 for (i = 0 ; i < 6 ; i++) 1678 for (i = 0 ; i < 6 ; i++)
1696 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i); 1679 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
1697 size += sprintf(buffer+size, "%6s: %s : %04x : %04x : %04x : %04x : %04x : %04x : \n", 1680 size += sprintf(buffer+size, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1698 dev->name, 1681 dev->name, addr,
1699 print_mac(mac, addr),
1700 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))), 1682 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1701 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))), 1683 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1702 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))), 1684 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
@@ -1709,11 +1691,11 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
1709 1691
1710 for (i = 0 ; i < 6 ; i++) 1692 for (i = 0 ; i < 6 ; i++)
1711 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i); 1693 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
1712 size += sprintf(buffer+size, "%6s: : %02x : %02x : %s : %02x:%02x:%02x:%02x : \n", 1694 size += sprintf(buffer+size, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1713 dev->name, 1695 dev->name,
1714 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))), 1696 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1715 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))), 1697 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1716 print_mac(mac, addr), 1698 addr,
1717 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)), 1699 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1718 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1), 1700 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1719 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2), 1701 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index 00ea94513460..b8c955f6d31a 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -122,7 +122,6 @@ static int __init setup_card(struct net_device *dev, struct device *pdev)
122 static int versionprinted; 122 static int versionprinted;
123 const unsigned *port; 123 const unsigned *port;
124 int j,err = 0; 124 int j,err = 0;
125 DECLARE_MAC_BUF(mac);
126 125
127 if (!dev) 126 if (!dev)
128 return -ENOMEM; 127 return -ENOMEM;
@@ -153,8 +152,8 @@ static int __init setup_card(struct net_device *dev, struct device *pdev)
153 152
154 proteon_read_eeprom(dev); 153 proteon_read_eeprom(dev);
155 154
156 printk(KERN_DEBUG "proteon.c: Ring Station Address: %s\n", 155 printk(KERN_DEBUG "proteon.c: Ring Station Address: %pM\n",
157 print_mac(mac, dev->dev_addr)); 156 dev->dev_addr);
158 157
159 tp = netdev_priv(dev); 158 tp = netdev_priv(dev);
160 tp->setnselout = proteon_setnselout_pins; 159 tp->setnselout = proteon_setnselout_pins;
@@ -284,7 +283,7 @@ static void proteon_read_eeprom(struct net_device *dev)
284 dev->dev_addr[i] = proteon_sifreadw(dev, SIFINC) >> 8; 283 dev->dev_addr[i] = proteon_sifreadw(dev, SIFINC) >> 8;
285} 284}
286 285
287unsigned short proteon_setnselout_pins(struct net_device *dev) 286static unsigned short proteon_setnselout_pins(struct net_device *dev)
288{ 287{
289 return 0; 288 return 0;
290} 289}
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
index 41b6999a0f33..c0f58f08782c 100644
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -139,7 +139,6 @@ static int __init setup_card(struct net_device *dev, struct device *pdev)
139 static int versionprinted; 139 static int versionprinted;
140 const unsigned *port; 140 const unsigned *port;
141 int j, err = 0; 141 int j, err = 0;
142 DECLARE_MAC_BUF(mac);
143 142
144 if (!dev) 143 if (!dev)
145 return -ENOMEM; 144 return -ENOMEM;
@@ -170,8 +169,8 @@ static int __init setup_card(struct net_device *dev, struct device *pdev)
170 169
171 sk_isa_read_eeprom(dev); 170 sk_isa_read_eeprom(dev);
172 171
173 printk(KERN_DEBUG "skisa.c: Ring Station Address: %s\n", 172 printk(KERN_DEBUG "skisa.c: Ring Station Address: %pM\n",
174 print_mac(mac, dev->dev_addr)); 173 dev->dev_addr);
175 174
176 tp = netdev_priv(dev); 175 tp = netdev_priv(dev);
177 tp->setnselout = sk_isa_setnselout_pins; 176 tp->setnselout = sk_isa_setnselout_pins;
@@ -301,7 +300,7 @@ static void sk_isa_read_eeprom(struct net_device *dev)
301 dev->dev_addr[i] = sk_isa_sifreadw(dev, SIFINC) >> 8; 300 dev->dev_addr[i] = sk_isa_sifreadw(dev, SIFINC) >> 8;
302} 301}
303 302
304unsigned short sk_isa_setnselout_pins(struct net_device *dev) 303static unsigned short sk_isa_setnselout_pins(struct net_device *dev)
305{ 304{
306 return 0; 305 return 0;
307} 306}
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index ed50d288e494..a011666342ff 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -3910,7 +3910,6 @@ static int smctr_process_rx_packet(MAC_HEADER *rmf, __u16 size,
3910 /* Kick the packet on up. */ 3910 /* Kick the packet on up. */
3911 skb->protocol = tr_type_trans(skb, dev); 3911 skb->protocol = tr_type_trans(skb, dev);
3912 netif_rx(skb); 3912 netif_rx(skb);
3913 dev->last_rx = jiffies;
3914 err = 0; 3913 err = 0;
3915 } 3914 }
3916 3915
@@ -4496,7 +4495,6 @@ static int smctr_rx_frame(struct net_device *dev)
4496 /* Kick the packet on up. */ 4495 /* Kick the packet on up. */
4497 skb->protocol = tr_type_trans(skb, dev); 4496 skb->protocol = tr_type_trans(skb, dev);
4498 netif_rx(skb); 4497 netif_rx(skb);
4499 dev->last_rx = jiffies;
4500 } else { 4498 } else {
4501 } 4499 }
4502 } 4500 }
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index d07c4523c847..5be34c2fd483 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -180,10 +180,14 @@ void tms380tr_wait(unsigned long time);
180static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status); 180static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status);
181static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status); 181static void tms380tr_write_tpl_status(TPL *tpl, unsigned int Status);
182 182
183#define SIFREADB(reg) (((struct net_local *)dev->priv)->sifreadb(dev, reg)) 183#define SIFREADB(reg) \
184#define SIFWRITEB(val, reg) (((struct net_local *)dev->priv)->sifwriteb(dev, val, reg)) 184 (((struct net_local *)netdev_priv(dev))->sifreadb(dev, reg))
185#define SIFREADW(reg) (((struct net_local *)dev->priv)->sifreadw(dev, reg)) 185#define SIFWRITEB(val, reg) \
186#define SIFWRITEW(val, reg) (((struct net_local *)dev->priv)->sifwritew(dev, val, reg)) 186 (((struct net_local *)netdev_priv(dev))->sifwriteb(dev, val, reg))
187#define SIFREADW(reg) \
188 (((struct net_local *)netdev_priv(dev))->sifreadw(dev, reg))
189#define SIFWRITEW(val, reg) \
190 (((struct net_local *)netdev_priv(dev))->sifwritew(dev, val, reg))
187 191
188 192
189 193
@@ -2186,7 +2190,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2186 skb_trim(skb,Length); 2190 skb_trim(skb,Length);
2187 skb->protocol = tr_type_trans(skb,dev); 2191 skb->protocol = tr_type_trans(skb,dev);
2188 netif_rx(skb); 2192 netif_rx(skb);
2189 dev->last_rx = jiffies;
2190 } 2193 }
2191 } 2194 }
2192 else /* Invalid frame */ 2195 else /* Invalid frame */
@@ -2331,7 +2334,7 @@ int tmsdev_init(struct net_device *dev, struct device *pdev)
2331{ 2334{
2332 struct net_local *tms_local; 2335 struct net_local *tms_local;
2333 2336
2334 memset(dev->priv, 0, sizeof(struct net_local)); 2337 memset(netdev_priv(dev), 0, sizeof(struct net_local));
2335 tms_local = netdev_priv(dev); 2338 tms_local = netdev_priv(dev);
2336 init_waitqueue_head(&tms_local->wait_for_tok_int); 2339 init_waitqueue_head(&tms_local->wait_for_tok_int);
2337 if (pdev->dma_mask) 2340 if (pdev->dma_mask)
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index 5f0ee880cfff..5f601773c260 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -100,7 +100,6 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
100 unsigned int pci_irq_line; 100 unsigned int pci_irq_line;
101 unsigned long pci_ioaddr; 101 unsigned long pci_ioaddr;
102 struct card_info *cardinfo = &card_info_table[ent->driver_data]; 102 struct card_info *cardinfo = &card_info_table[ent->driver_data];
103 DECLARE_MAC_BUF(mac);
104 103
105 if (versionprinted++ == 0) 104 if (versionprinted++ == 0)
106 printk("%s", version); 105 printk("%s", version);
@@ -137,8 +136,8 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
137 136
138 tms_pci_read_eeprom(dev); 137 tms_pci_read_eeprom(dev);
139 138
140 printk("%s: Ring Station Address: %s\n", 139 printk("%s: Ring Station Address: %pM\n",
141 dev->name, print_mac(mac, dev->dev_addr)); 140 dev->name, dev->dev_addr);
142 141
143 ret = tmsdev_init(dev, &pdev->dev); 142 ret = tmsdev_init(dev, &pdev->dev);
144 if (ret) { 143 if (ret) {
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index eb1da6f0b086..75461dbd4876 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -788,7 +788,6 @@ static int tsi108_complete_rx(struct net_device *dev, int budget)
788 skb_put(skb, data->rxring[rx].len); 788 skb_put(skb, data->rxring[rx].len);
789 skb->protocol = eth_type_trans(skb, dev); 789 skb->protocol = eth_type_trans(skb, dev);
790 netif_receive_skb(skb); 790 netif_receive_skb(skb);
791 dev->last_rx = jiffies;
792 } 791 }
793 792
794 return done; 793 return done;
@@ -889,7 +888,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget)
889 888
890 if (num_received < budget) { 889 if (num_received < budget) {
891 data->rxpending = 0; 890 data->rxpending = 0;
892 netif_rx_complete(dev, napi); 891 netif_rx_complete(napi);
893 892
894 TSI_WRITE(TSI108_EC_INTMASK, 893 TSI_WRITE(TSI108_EC_INTMASK,
895 TSI_READ(TSI108_EC_INTMASK) 894 TSI_READ(TSI108_EC_INTMASK)
@@ -920,7 +919,7 @@ static void tsi108_rx_int(struct net_device *dev)
920 * from tsi108_check_rxring(). 919 * from tsi108_check_rxring().
921 */ 920 */
922 921
923 if (netif_rx_schedule_prep(dev, &data->napi)) { 922 if (netif_rx_schedule_prep(&data->napi)) {
924 /* Mask, rather than ack, the receive interrupts. The ack 923 /* Mask, rather than ack, the receive interrupts. The ack
925 * will happen in tsi108_poll(). 924 * will happen in tsi108_poll().
926 */ 925 */
@@ -931,7 +930,7 @@ static void tsi108_rx_int(struct net_device *dev)
931 | TSI108_INT_RXTHRESH | 930 | TSI108_INT_RXTHRESH |
932 TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR | 931 TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |
933 TSI108_INT_RXWAIT); 932 TSI108_INT_RXWAIT);
934 __netif_rx_schedule(dev, &data->napi); 933 __netif_rx_schedule(&data->napi);
935 } else { 934 } else {
936 if (!netif_running(dev)) { 935 if (!netif_running(dev)) {
937 /* This can happen if an interrupt occurs while the 936 /* This can happen if an interrupt occurs while the
@@ -1569,7 +1568,6 @@ tsi108_init_one(struct platform_device *pdev)
1569 struct tsi108_prv_data *data = NULL; 1568 struct tsi108_prv_data *data = NULL;
1570 hw_info *einfo; 1569 hw_info *einfo;
1571 int err = 0; 1570 int err = 0;
1572 DECLARE_MAC_BUF(mac);
1573 1571
1574 einfo = pdev->dev.platform_data; 1572 einfo = pdev->dev.platform_data;
1575 1573
@@ -1659,8 +1657,8 @@ tsi108_init_one(struct platform_device *pdev)
1659 } 1657 }
1660 1658
1661 platform_set_drvdata(pdev, dev); 1659 platform_set_drvdata(pdev, dev);
1662 printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %s\n", 1660 printk(KERN_INFO "%s: Tsi108 Gigabit Ethernet, MAC: %pM\n",
1663 dev->name, print_mac(mac, dev->dev_addr)); 1661 dev->name, dev->dev_addr);
1664#ifdef DEBUG 1662#ifdef DEBUG
1665 data->msg_enable = DEBUG; 1663 data->msg_enable = DEBUG;
1666 dump_eth_one(dev); 1664 dump_eth_one(dev);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 124d5d690dde..5166be930a52 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -459,7 +459,6 @@ static void de_rx (struct de_private *de)
459 459
460 de->net_stats.rx_packets++; 460 de->net_stats.rx_packets++;
461 de->net_stats.rx_bytes += skb->len; 461 de->net_stats.rx_bytes += skb->len;
462 de->dev->last_rx = jiffies;
463 rc = netif_rx (skb); 462 rc = netif_rx (skb);
464 if (rc == NET_RX_DROP) 463 if (rc == NET_RX_DROP)
465 drop = 1; 464 drop = 1;
@@ -484,7 +483,7 @@ rx_next:
484static irqreturn_t de_interrupt (int irq, void *dev_instance) 483static irqreturn_t de_interrupt (int irq, void *dev_instance)
485{ 484{
486 struct net_device *dev = dev_instance; 485 struct net_device *dev = dev_instance;
487 struct de_private *de = dev->priv; 486 struct de_private *de = netdev_priv(dev);
488 u32 status; 487 u32 status;
489 488
490 status = dr32(MacStatus); 489 status = dr32(MacStatus);
@@ -590,7 +589,7 @@ next:
590 589
591static int de_start_xmit (struct sk_buff *skb, struct net_device *dev) 590static int de_start_xmit (struct sk_buff *skb, struct net_device *dev)
592{ 591{
593 struct de_private *de = dev->priv; 592 struct de_private *de = netdev_priv(dev);
594 unsigned int entry, tx_free; 593 unsigned int entry, tx_free;
595 u32 mapping, len, flags = FirstFrag | LastFrag; 594 u32 mapping, len, flags = FirstFrag | LastFrag;
596 struct de_desc *txd; 595 struct de_desc *txd;
@@ -653,7 +652,7 @@ static int de_start_xmit (struct sk_buff *skb, struct net_device *dev)
653 652
654static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) 653static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
655{ 654{
656 struct de_private *de = dev->priv; 655 struct de_private *de = netdev_priv(dev);
657 u16 hash_table[32]; 656 u16 hash_table[32];
658 struct dev_mc_list *mclist; 657 struct dev_mc_list *mclist;
659 int i; 658 int i;
@@ -684,7 +683,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
684 683
685static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) 684static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
686{ 685{
687 struct de_private *de = dev->priv; 686 struct de_private *de = netdev_priv(dev);
688 struct dev_mc_list *mclist; 687 struct dev_mc_list *mclist;
689 int i; 688 int i;
690 u16 *eaddrs; 689 u16 *eaddrs;
@@ -712,7 +711,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
712 711
713static void __de_set_rx_mode (struct net_device *dev) 712static void __de_set_rx_mode (struct net_device *dev)
714{ 713{
715 struct de_private *de = dev->priv; 714 struct de_private *de = netdev_priv(dev);
716 u32 macmode; 715 u32 macmode;
717 unsigned int entry; 716 unsigned int entry;
718 u32 mapping; 717 u32 mapping;
@@ -797,7 +796,7 @@ out:
797static void de_set_rx_mode (struct net_device *dev) 796static void de_set_rx_mode (struct net_device *dev)
798{ 797{
799 unsigned long flags; 798 unsigned long flags;
800 struct de_private *de = dev->priv; 799 struct de_private *de = netdev_priv(dev);
801 800
802 spin_lock_irqsave (&de->lock, flags); 801 spin_lock_irqsave (&de->lock, flags);
803 __de_set_rx_mode(dev); 802 __de_set_rx_mode(dev);
@@ -821,7 +820,7 @@ static void __de_get_stats(struct de_private *de)
821 820
822static struct net_device_stats *de_get_stats(struct net_device *dev) 821static struct net_device_stats *de_get_stats(struct net_device *dev)
823{ 822{
824 struct de_private *de = dev->priv; 823 struct de_private *de = netdev_priv(dev);
825 824
826 /* The chip only need report frame silently dropped. */ 825 /* The chip only need report frame silently dropped. */
827 spin_lock_irq(&de->lock); 826 spin_lock_irq(&de->lock);
@@ -1355,7 +1354,7 @@ static void de_free_rings (struct de_private *de)
1355 1354
1356static int de_open (struct net_device *dev) 1355static int de_open (struct net_device *dev)
1357{ 1356{
1358 struct de_private *de = dev->priv; 1357 struct de_private *de = netdev_priv(dev);
1359 int rc; 1358 int rc;
1360 1359
1361 if (netif_msg_ifup(de)) 1360 if (netif_msg_ifup(de))
@@ -1400,7 +1399,7 @@ err_out_free:
1400 1399
1401static int de_close (struct net_device *dev) 1400static int de_close (struct net_device *dev)
1402{ 1401{
1403 struct de_private *de = dev->priv; 1402 struct de_private *de = netdev_priv(dev);
1404 unsigned long flags; 1403 unsigned long flags;
1405 1404
1406 if (netif_msg_ifdown(de)) 1405 if (netif_msg_ifdown(de))
@@ -1423,7 +1422,7 @@ static int de_close (struct net_device *dev)
1423 1422
1424static void de_tx_timeout (struct net_device *dev) 1423static void de_tx_timeout (struct net_device *dev)
1425{ 1424{
1426 struct de_private *de = dev->priv; 1425 struct de_private *de = netdev_priv(dev);
1427 1426
1428 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n", 1427 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1429 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus), 1428 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
@@ -1574,7 +1573,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1574 1573
1575static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info) 1574static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1576{ 1575{
1577 struct de_private *de = dev->priv; 1576 struct de_private *de = netdev_priv(dev);
1578 1577
1579 strcpy (info->driver, DRV_NAME); 1578 strcpy (info->driver, DRV_NAME);
1580 strcpy (info->version, DRV_VERSION); 1579 strcpy (info->version, DRV_VERSION);
@@ -1589,7 +1588,7 @@ static int de_get_regs_len(struct net_device *dev)
1589 1588
1590static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 1589static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1591{ 1590{
1592 struct de_private *de = dev->priv; 1591 struct de_private *de = netdev_priv(dev);
1593 int rc; 1592 int rc;
1594 1593
1595 spin_lock_irq(&de->lock); 1594 spin_lock_irq(&de->lock);
@@ -1601,7 +1600,7 @@ static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1601 1600
1602static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 1601static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1603{ 1602{
1604 struct de_private *de = dev->priv; 1603 struct de_private *de = netdev_priv(dev);
1605 int rc; 1604 int rc;
1606 1605
1607 spin_lock_irq(&de->lock); 1606 spin_lock_irq(&de->lock);
@@ -1613,14 +1612,14 @@ static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1613 1612
1614static u32 de_get_msglevel(struct net_device *dev) 1613static u32 de_get_msglevel(struct net_device *dev)
1615{ 1614{
1616 struct de_private *de = dev->priv; 1615 struct de_private *de = netdev_priv(dev);
1617 1616
1618 return de->msg_enable; 1617 return de->msg_enable;
1619} 1618}
1620 1619
1621static void de_set_msglevel(struct net_device *dev, u32 msglvl) 1620static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1622{ 1621{
1623 struct de_private *de = dev->priv; 1622 struct de_private *de = netdev_priv(dev);
1624 1623
1625 de->msg_enable = msglvl; 1624 de->msg_enable = msglvl;
1626} 1625}
@@ -1628,7 +1627,7 @@ static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1628static int de_get_eeprom(struct net_device *dev, 1627static int de_get_eeprom(struct net_device *dev,
1629 struct ethtool_eeprom *eeprom, u8 *data) 1628 struct ethtool_eeprom *eeprom, u8 *data)
1630{ 1629{
1631 struct de_private *de = dev->priv; 1630 struct de_private *de = netdev_priv(dev);
1632 1631
1633 if (!de->ee_data) 1632 if (!de->ee_data)
1634 return -EOPNOTSUPP; 1633 return -EOPNOTSUPP;
@@ -1642,7 +1641,7 @@ static int de_get_eeprom(struct net_device *dev,
1642 1641
1643static int de_nway_reset(struct net_device *dev) 1642static int de_nway_reset(struct net_device *dev)
1644{ 1643{
1645 struct de_private *de = dev->priv; 1644 struct de_private *de = netdev_priv(dev);
1646 u32 status; 1645 u32 status;
1647 1646
1648 if (de->media_type != DE_MEDIA_TP_AUTO) 1647 if (de->media_type != DE_MEDIA_TP_AUTO)
@@ -1661,7 +1660,7 @@ static int de_nway_reset(struct net_device *dev)
1661static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1660static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1662 void *data) 1661 void *data)
1663{ 1662{
1664 struct de_private *de = dev->priv; 1663 struct de_private *de = netdev_priv(dev);
1665 1664
1666 regs->version = (DE_REGS_VER << 2) | de->de21040; 1665 regs->version = (DE_REGS_VER << 2) | de->de21040;
1667 1666
@@ -1692,9 +1691,9 @@ static void __devinit de21040_get_mac_address (struct de_private *de)
1692 1691
1693 for (i = 0; i < 6; i++) { 1692 for (i = 0; i < 6; i++) {
1694 int value, boguscnt = 100000; 1693 int value, boguscnt = 100000;
1695 do 1694 do {
1696 value = dr32(ROMCmd); 1695 value = dr32(ROMCmd);
1697 while (value < 0 && --boguscnt > 0); 1696 } while (value < 0 && --boguscnt > 0);
1698 de->dev->dev_addr[i] = value; 1697 de->dev->dev_addr[i] = value;
1699 udelay(1); 1698 udelay(1);
1700 if (boguscnt <= 0) 1699 if (boguscnt <= 0)
@@ -1932,7 +1931,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
1932 void __iomem *regs; 1931 void __iomem *regs;
1933 unsigned long pciaddr; 1932 unsigned long pciaddr;
1934 static int board_idx = -1; 1933 static int board_idx = -1;
1935 DECLARE_MAC_BUF(mac);
1936 1934
1937 board_idx++; 1935 board_idx++;
1938 1936
@@ -1956,7 +1954,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
1956 dev->tx_timeout = de_tx_timeout; 1954 dev->tx_timeout = de_tx_timeout;
1957 dev->watchdog_timeo = TX_TIMEOUT; 1955 dev->watchdog_timeo = TX_TIMEOUT;
1958 1956
1959 de = dev->priv; 1957 de = netdev_priv(dev);
1960 de->de21040 = ent->driver_data == 0 ? 1 : 0; 1958 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1961 de->pdev = pdev; 1959 de->pdev = pdev;
1962 de->dev = dev; 1960 de->dev = dev;
@@ -2046,11 +2044,11 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2046 goto err_out_iomap; 2044 goto err_out_iomap;
2047 2045
2048 /* print info about board and interface just registered */ 2046 /* print info about board and interface just registered */
2049 printk (KERN_INFO "%s: %s at 0x%lx, %s, IRQ %d\n", 2047 printk (KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
2050 dev->name, 2048 dev->name,
2051 de->de21040 ? "21040" : "21041", 2049 de->de21040 ? "21040" : "21041",
2052 dev->base_addr, 2050 dev->base_addr,
2053 print_mac(mac, dev->dev_addr), 2051 dev->dev_addr,
2054 dev->irq); 2052 dev->irq);
2055 2053
2056 pci_set_drvdata(pdev, dev); 2054 pci_set_drvdata(pdev, dev);
@@ -2078,7 +2076,7 @@ err_out_free:
2078static void __devexit de_remove_one (struct pci_dev *pdev) 2076static void __devexit de_remove_one (struct pci_dev *pdev)
2079{ 2077{
2080 struct net_device *dev = pci_get_drvdata(pdev); 2078 struct net_device *dev = pci_get_drvdata(pdev);
2081 struct de_private *de = dev->priv; 2079 struct de_private *de = netdev_priv(dev);
2082 2080
2083 BUG_ON(!dev); 2081 BUG_ON(!dev);
2084 unregister_netdev(dev); 2082 unregister_netdev(dev);
@@ -2095,7 +2093,7 @@ static void __devexit de_remove_one (struct pci_dev *pdev)
2095static int de_suspend (struct pci_dev *pdev, pm_message_t state) 2093static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2096{ 2094{
2097 struct net_device *dev = pci_get_drvdata (pdev); 2095 struct net_device *dev = pci_get_drvdata (pdev);
2098 struct de_private *de = dev->priv; 2096 struct de_private *de = netdev_priv(dev);
2099 2097
2100 rtnl_lock(); 2098 rtnl_lock();
2101 if (netif_running (dev)) { 2099 if (netif_running (dev)) {
@@ -2130,7 +2128,7 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2130static int de_resume (struct pci_dev *pdev) 2128static int de_resume (struct pci_dev *pdev)
2131{ 2129{
2132 struct net_device *dev = pci_get_drvdata (pdev); 2130 struct net_device *dev = pci_get_drvdata (pdev);
2133 struct de_private *de = dev->priv; 2131 struct de_private *de = netdev_priv(dev);
2134 int retval = 0; 2132 int retval = 0;
2135 2133
2136 rtnl_lock(); 2134 rtnl_lock();
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 6444cbec0bdc..67bfd6f43366 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1085,7 +1085,6 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1085 struct de4x5_private *lp = netdev_priv(dev); 1085 struct de4x5_private *lp = netdev_priv(dev);
1086 struct pci_dev *pdev = NULL; 1086 struct pci_dev *pdev = NULL;
1087 int i, status=0; 1087 int i, status=0;
1088 DECLARE_MAC_BUF(mac);
1089 1088
1090 gendev->driver_data = dev; 1089 gendev->driver_data = dev;
1091 1090
@@ -1119,10 +1118,10 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1119 } 1118 }
1120 1119
1121 dev->base_addr = iobase; 1120 dev->base_addr = iobase;
1122 printk ("%s: %s at 0x%04lx", gendev->bus_id, name, iobase); 1121 printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
1123 1122
1124 status = get_hw_addr(dev); 1123 status = get_hw_addr(dev);
1125 printk(", h/w address %s\n", print_mac(mac, dev->dev_addr)); 1124 printk(", h/w address %pM\n", dev->dev_addr);
1126 1125
1127 if (status != 0) { 1126 if (status != 0) {
1128 printk(" which has an Ethernet PROM CRC error.\n"); 1127 printk(" which has an Ethernet PROM CRC error.\n");
@@ -1154,7 +1153,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1154 } 1153 }
1155 } 1154 }
1156 lp->fdx = lp->params.fdx; 1155 lp->fdx = lp->params.fdx;
1157 sprintf(lp->adapter_name,"%s (%s)", name, gendev->bus_id); 1156 sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
1158 1157
1159 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc); 1158 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
1160#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY) 1159#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
@@ -1647,7 +1646,6 @@ de4x5_rx(struct net_device *dev)
1647 netif_rx(skb); 1646 netif_rx(skb);
1648 1647
1649 /* Update stats */ 1648 /* Update stats */
1650 dev->last_rx = jiffies;
1651 lp->stats.rx_packets++; 1649 lp->stats.rx_packets++;
1652 lp->stats.rx_bytes += pkt_len; 1650 lp->stats.rx_bytes += pkt_len;
1653 } 1651 }
@@ -5401,7 +5399,6 @@ static void
5401de4x5_dbg_srom(struct de4x5_srom *p) 5399de4x5_dbg_srom(struct de4x5_srom *p)
5402{ 5400{
5403 int i; 5401 int i;
5404 DECLARE_MAC_BUF(mac);
5405 5402
5406 if (de4x5_debug & DEBUG_SROM) { 5403 if (de4x5_debug & DEBUG_SROM) {
5407 printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id)); 5404 printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
@@ -5410,7 +5407,7 @@ de4x5_dbg_srom(struct de4x5_srom *p)
5410 printk("SROM version: %02x\n", (u_char)(p->version)); 5407 printk("SROM version: %02x\n", (u_char)(p->version));
5411 printk("# controllers: %02x\n", (u_char)(p->num_controllers)); 5408 printk("# controllers: %02x\n", (u_char)(p->num_controllers));
5412 5409
5413 printk("Hardware Address: %s\n", print_mac(mac, p->ieee_addr)); 5410 printk("Hardware Address: %pM\n", p->ieee_addr);
5414 printk("CRC checksum: %04x\n", (u_short)(p->chksum)); 5411 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
5415 for (i=0; i<64; i++) { 5412 for (i=0; i<64; i++) {
5416 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i)); 5413 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
@@ -5424,12 +5421,10 @@ static void
5424de4x5_dbg_rx(struct sk_buff *skb, int len) 5421de4x5_dbg_rx(struct sk_buff *skb, int len)
5425{ 5422{
5426 int i, j; 5423 int i, j;
5427 DECLARE_MAC_BUF(mac);
5428 DECLARE_MAC_BUF(mac2);
5429 5424
5430 if (de4x5_debug & DEBUG_RX) { 5425 if (de4x5_debug & DEBUG_RX) {
5431 printk("R: %s <- %s len/SAP:%02x%02x [%d]\n", 5426 printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
5432 print_mac(mac, skb->data), print_mac(mac2, &skb->data[6]), 5427 skb->data, &skb->data[6],
5433 (u_char)skb->data[12], 5428 (u_char)skb->data[12],
5434 (u_char)skb->data[13], 5429 (u_char)skb->data[13],
5435 len); 5430 len);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index c91852f49a48..28a5c51b43a0 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -362,7 +362,6 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
362 struct net_device *dev; 362 struct net_device *dev;
363 u32 pci_pmr; 363 u32 pci_pmr;
364 int i, err; 364 int i, err;
365 DECLARE_MAC_BUF(mac);
366 365
367 DMFE_DBUG(0, "dmfe_init_one()", 0); 366 DMFE_DBUG(0, "dmfe_init_one()", 0);
368 367
@@ -475,12 +474,11 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
475 if (err) 474 if (err)
476 goto err_out_free_buf; 475 goto err_out_free_buf;
477 476
478 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, " 477 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, %pM, irq %d.\n",
479 "%s, irq %d.\n",
480 dev->name, 478 dev->name,
481 ent->driver_data >> 16, 479 ent->driver_data >> 16,
482 pci_name(pdev), 480 pci_name(pdev),
483 print_mac(mac, dev->dev_addr), 481 dev->dev_addr,
484 dev->irq); 482 dev->irq);
485 483
486 pci_set_master(pdev); 484 pci_set_master(pdev);
@@ -1010,7 +1008,6 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
1010 1008
1011 skb->protocol = eth_type_trans(skb, dev); 1009 skb->protocol = eth_type_trans(skb, dev);
1012 netif_rx(skb); 1010 netif_rx(skb);
1013 dev->last_rx = jiffies;
1014 db->stats.rx_packets++; 1011 db->stats.rx_packets++;
1015 db->stats.rx_bytes += rxlen; 1012 db->stats.rx_bytes += rxlen;
1016 } 1013 }
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 0dcced1263b9..391acd32a6a5 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -337,7 +337,7 @@ int __devinit tulip_read_eeprom(struct net_device *dev, int location, int addr_l
337{ 337{
338 int i; 338 int i;
339 unsigned retval = 0; 339 unsigned retval = 0;
340 struct tulip_private *tp = dev->priv; 340 struct tulip_private *tp = netdev_priv(dev);
341 void __iomem *ee_addr = tp->base_addr + CSR9; 341 void __iomem *ee_addr = tp->base_addr + CSR9;
342 int read_cmd = location | (EE_READ_CMD << addr_len); 342 int read_cmd = location | (EE_READ_CMD << addr_len);
343 343
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index c6bad987d63e..6c3428a37c0b 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -103,7 +103,7 @@ void oom_timer(unsigned long data)
103{ 103{
104 struct net_device *dev = (struct net_device *)data; 104 struct net_device *dev = (struct net_device *)data;
105 struct tulip_private *tp = netdev_priv(dev); 105 struct tulip_private *tp = netdev_priv(dev);
106 netif_rx_schedule(dev, &tp->napi); 106 netif_rx_schedule(&tp->napi);
107} 107}
108 108
109int tulip_poll(struct napi_struct *napi, int budget) 109int tulip_poll(struct napi_struct *napi, int budget)
@@ -231,7 +231,6 @@ int tulip_poll(struct napi_struct *napi, int budget)
231 231
232 netif_receive_skb(skb); 232 netif_receive_skb(skb);
233 233
234 dev->last_rx = jiffies;
235 tp->stats.rx_packets++; 234 tp->stats.rx_packets++;
236 tp->stats.rx_bytes += pkt_len; 235 tp->stats.rx_bytes += pkt_len;
237 } 236 }
@@ -301,7 +300,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
301 300
302 /* Remove us from polling list and enable RX intr. */ 301 /* Remove us from polling list and enable RX intr. */
303 302
304 netif_rx_complete(dev, napi); 303 netif_rx_complete(napi);
305 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); 304 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
306 305
307 /* The last op happens after poll completion. Which means the following: 306 /* The last op happens after poll completion. Which means the following:
@@ -337,7 +336,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
337 * before we did netif_rx_complete(). See? We would lose it. */ 336 * before we did netif_rx_complete(). See? We would lose it. */
338 337
339 /* remove ourselves from the polling list */ 338 /* remove ourselves from the polling list */
340 netif_rx_complete(dev, napi); 339 netif_rx_complete(napi);
341 340
342 return work_done; 341 return work_done;
343} 342}
@@ -444,7 +443,6 @@ static int tulip_rx(struct net_device *dev)
444 443
445 netif_rx(skb); 444 netif_rx(skb);
446 445
447 dev->last_rx = jiffies;
448 tp->stats.rx_packets++; 446 tp->stats.rx_packets++;
449 tp->stats.rx_bytes += pkt_len; 447 tp->stats.rx_bytes += pkt_len;
450 } 448 }
@@ -521,7 +519,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
521 rxd++; 519 rxd++;
522 /* Mask RX intrs and add the device to poll list. */ 520 /* Mask RX intrs and add the device to poll list. */
523 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); 521 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
524 netif_rx_schedule(dev, &tp->napi); 522 netif_rx_schedule(&tp->napi);
525 523
526 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) 524 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
527 break; 525 break;
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index cafa89e60167..ff84babb3ff3 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1050,13 +1050,11 @@ static void set_rx_mode(struct net_device *dev)
1050 filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 1050 filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1051 filterbit &= 0x3f; 1051 filterbit &= 0x3f;
1052 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 1052 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1053 if (tulip_debug > 2) { 1053 if (tulip_debug > 2)
1054 DECLARE_MAC_BUF(mac); 1054 printk(KERN_INFO "%s: Added filter for %pM"
1055 printk(KERN_INFO "%s: Added filter for %s"
1056 " %8.8x bit %d.\n", 1055 " %8.8x bit %d.\n",
1057 dev->name, print_mac(mac, mclist->dmi_addr), 1056 dev->name, mclist->dmi_addr,
1058 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); 1057 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
1059 }
1060 } 1058 }
1061 if (mc_filter[0] == tp->mc_filter[0] && 1059 if (mc_filter[0] == tp->mc_filter[0] &&
1062 mc_filter[1] == tp->mc_filter[1]) 1060 mc_filter[1] == tp->mc_filter[1])
@@ -1250,7 +1248,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1250 const char *chip_name = tulip_tbl[chip_idx].chip_name; 1248 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1251 unsigned int eeprom_missing = 0; 1249 unsigned int eeprom_missing = 0;
1252 unsigned int force_csr0 = 0; 1250 unsigned int force_csr0 = 0;
1253 DECLARE_MAC_BUF(mac);
1254 1251
1255#ifndef MODULE 1252#ifndef MODULE
1256 static int did_version; /* Already printed version info. */ 1253 static int did_version; /* Already printed version info. */
@@ -1432,9 +1429,9 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1432 for (i = 0; i < 3; i++) { 1429 for (i = 0; i < 3; i++) {
1433 int value, boguscnt = 100000; 1430 int value, boguscnt = 100000;
1434 iowrite32(0x600 | i, ioaddr + 0x98); 1431 iowrite32(0x600 | i, ioaddr + 0x98);
1435 do 1432 do {
1436 value = ioread32(ioaddr + CSR9); 1433 value = ioread32(ioaddr + CSR9);
1437 while (value < 0 && --boguscnt > 0); 1434 } while (value < 0 && --boguscnt > 0);
1438 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i); 1435 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1439 sum += value & 0xffff; 1436 sum += value & 0xffff;
1440 } 1437 }
@@ -1635,7 +1632,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1635 1632
1636 if (eeprom_missing) 1633 if (eeprom_missing)
1637 printk(" EEPROM not present,"); 1634 printk(" EEPROM not present,");
1638 printk(" %s", print_mac(mac, dev->dev_addr)); 1635 printk(" %pM", dev->dev_addr);
1639 printk(", IRQ %d.\n", irq); 1636 printk(", IRQ %d.\n", irq);
1640 1637
1641 if (tp->chip_id == PNIC2) 1638 if (tp->chip_id == PNIC2)
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index e9e628621639..00cbc5251dcc 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -261,7 +261,6 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
261 struct uli526x_board_info *db; /* board information structure */ 261 struct uli526x_board_info *db; /* board information structure */
262 struct net_device *dev; 262 struct net_device *dev;
263 int i, err; 263 int i, err;
264 DECLARE_MAC_BUF(mac);
265 264
266 ULI526X_DBUG(0, "uli526x_init_one()", 0); 265 ULI526X_DBUG(0, "uli526x_init_one()", 0);
267 266
@@ -379,9 +378,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
379 if (err) 378 if (err)
380 goto err_out_res; 379 goto err_out_res;
381 380
382 printk(KERN_INFO "%s: ULi M%04lx at pci%s, %s, irq %d.\n", 381 printk(KERN_INFO "%s: ULi M%04lx at pci%s, %pM, irq %d.\n",
383 dev->name,ent->driver_data >> 16,pci_name(pdev), 382 dev->name,ent->driver_data >> 16,pci_name(pdev),
384 print_mac(mac, dev->dev_addr), dev->irq); 383 dev->dev_addr, dev->irq);
385 384
386 pci_set_master(pdev); 385 pci_set_master(pdev);
387 386
@@ -855,7 +854,6 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
855 854
856 skb->protocol = eth_type_trans(skb, dev); 855 skb->protocol = eth_type_trans(skb, dev);
857 netif_rx(skb); 856 netif_rx(skb);
858 dev->last_rx = jiffies;
859 db->stats.rx_packets++; 857 db->stats.rx_packets++;
860 db->stats.rx_bytes += rxlen; 858 db->stats.rx_bytes += rxlen;
861 859
@@ -892,7 +890,7 @@ static struct net_device_stats * uli526x_get_stats(struct net_device *dev)
892 890
893static void uli526x_set_filter_mode(struct net_device * dev) 891static void uli526x_set_filter_mode(struct net_device * dev)
894{ 892{
895 struct uli526x_board_info *db = dev->priv; 893 struct uli526x_board_info *db = netdev_priv(dev);
896 unsigned long flags; 894 unsigned long flags;
897 895
898 ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0); 896 ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0);
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 50068194c163..022d99af8646 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -355,7 +355,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
355 int irq; 355 int irq;
356 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; 356 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
357 void __iomem *ioaddr; 357 void __iomem *ioaddr;
358 DECLARE_MAC_BUF(mac);
359 358
360 i = pci_enable_device(pdev); 359 i = pci_enable_device(pdev);
361 if (i) return i; 360 if (i) return i;
@@ -435,9 +434,9 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
435 if (i) 434 if (i)
436 goto err_out_cleardev; 435 goto err_out_cleardev;
437 436
438 printk(KERN_INFO "%s: %s at %p, %s, IRQ %d.\n", 437 printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
439 dev->name, pci_id_tbl[chip_idx].name, ioaddr, 438 dev->name, pci_id_tbl[chip_idx].name, ioaddr,
440 print_mac(mac, dev->dev_addr), irq); 439 dev->dev_addr, irq);
441 440
442 if (np->drv_flags & CanHaveMII) { 441 if (np->drv_flags & CanHaveMII) {
443 int phy, phy_idx = 0; 442 int phy, phy_idx = 0;
@@ -1245,20 +1244,15 @@ static int netdev_rx(struct net_device *dev)
1245 } 1244 }
1246#ifndef final_version /* Remove after testing. */ 1245#ifndef final_version /* Remove after testing. */
1247 /* You will want this info for the initial debug. */ 1246 /* You will want this info for the initial debug. */
1248 if (debug > 5) { 1247 if (debug > 5)
1249 DECLARE_MAC_BUF(mac); 1248 printk(KERN_DEBUG " Rx data %pM %pM"
1250 DECLARE_MAC_BUF(mac2);
1251
1252 printk(KERN_DEBUG " Rx data %s %s"
1253 " %2.2x%2.2x %d.%d.%d.%d.\n", 1249 " %2.2x%2.2x %d.%d.%d.%d.\n",
1254 print_mac(mac, &skb->data[0]), print_mac(mac2, &skb->data[6]), 1250 &skb->data[0], &skb->data[6],
1255 skb->data[12], skb->data[13], 1251 skb->data[12], skb->data[13],
1256 skb->data[14], skb->data[15], skb->data[16], skb->data[17]); 1252 skb->data[14], skb->data[15], skb->data[16], skb->data[17]);
1257 }
1258#endif 1253#endif
1259 skb->protocol = eth_type_trans(skb, dev); 1254 skb->protocol = eth_type_trans(skb, dev);
1260 netif_rx(skb); 1255 netif_rx(skb);
1261 dev->last_rx = jiffies;
1262 np->stats.rx_packets++; 1256 np->stats.rx_packets++;
1263 np->stats.rx_bytes += pkt_len; 1257 np->stats.rx_bytes += pkt_len;
1264 } 1258 }
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 6b93d0169116..13c8703ecb9f 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1072,7 +1072,6 @@ static void read_mac_address(struct xircom_private *card)
1072 unsigned char j, tuple, link, data_id, data_count; 1072 unsigned char j, tuple, link, data_id, data_count;
1073 unsigned long flags; 1073 unsigned long flags;
1074 int i; 1074 int i;
1075 DECLARE_MAC_BUF(mac);
1076 1075
1077 enter("read_mac_address"); 1076 enter("read_mac_address");
1078 1077
@@ -1102,7 +1101,7 @@ static void read_mac_address(struct xircom_private *card)
1102 } 1101 }
1103 } 1102 }
1104 spin_unlock_irqrestore(&card->lock, flags); 1103 spin_unlock_irqrestore(&card->lock, flags);
1105 pr_debug(" %s\n", print_mac(mac, card->dev->dev_addr)); 1104 pr_debug(" %pM\n", card->dev->dev_addr);
1106 leave("read_mac_address"); 1105 leave("read_mac_address");
1107} 1106}
1108 1107
@@ -1202,7 +1201,6 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
1202 skb_put(skb, pkt_len); 1201 skb_put(skb, pkt_len);
1203 skb->protocol = eth_type_trans(skb, dev); 1202 skb->protocol = eth_type_trans(skb, dev);
1204 netif_rx(skb); 1203 netif_rx(skb);
1205 dev->last_rx = jiffies;
1206 card->stats.rx_packets++; 1204 card->stats.rx_packets++;
1207 card->stats.rx_bytes += pkt_len; 1205 card->stats.rx_bytes += pkt_len;
1208 1206
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 55dc70c6b4db..666c1d98cdaf 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -305,6 +305,23 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
305 return 0; 305 return 0;
306} 306}
307 307
308static const struct net_device_ops tun_netdev_ops = {
309 .ndo_open = tun_net_open,
310 .ndo_stop = tun_net_close,
311 .ndo_start_xmit = tun_net_xmit,
312 .ndo_change_mtu = tun_net_change_mtu,
313};
314
315static const struct net_device_ops tap_netdev_ops = {
316 .ndo_open = tun_net_open,
317 .ndo_stop = tun_net_close,
318 .ndo_start_xmit = tun_net_xmit,
319 .ndo_change_mtu = tun_net_change_mtu,
320 .ndo_set_multicast_list = tun_net_mclist,
321 .ndo_set_mac_address = eth_mac_addr,
322 .ndo_validate_addr = eth_validate_addr,
323};
324
308/* Initialize net device. */ 325/* Initialize net device. */
309static void tun_net_init(struct net_device *dev) 326static void tun_net_init(struct net_device *dev)
310{ 327{
@@ -312,11 +329,12 @@ static void tun_net_init(struct net_device *dev)
312 329
313 switch (tun->flags & TUN_TYPE_MASK) { 330 switch (tun->flags & TUN_TYPE_MASK) {
314 case TUN_TUN_DEV: 331 case TUN_TUN_DEV:
332 dev->netdev_ops = &tun_netdev_ops;
333
315 /* Point-to-Point TUN Device */ 334 /* Point-to-Point TUN Device */
316 dev->hard_header_len = 0; 335 dev->hard_header_len = 0;
317 dev->addr_len = 0; 336 dev->addr_len = 0;
318 dev->mtu = 1500; 337 dev->mtu = 1500;
319 dev->change_mtu = tun_net_change_mtu;
320 338
321 /* Zero header length */ 339 /* Zero header length */
322 dev->type = ARPHRD_NONE; 340 dev->type = ARPHRD_NONE;
@@ -325,10 +343,9 @@ static void tun_net_init(struct net_device *dev)
325 break; 343 break;
326 344
327 case TUN_TAP_DEV: 345 case TUN_TAP_DEV:
346 dev->netdev_ops = &tun_netdev_ops;
328 /* Ethernet TAP Device */ 347 /* Ethernet TAP Device */
329 ether_setup(dev); 348 ether_setup(dev);
330 dev->change_mtu = tun_net_change_mtu;
331 dev->set_multicast_list = tun_net_mclist;
332 349
333 random_ether_addr(dev->dev_addr); 350 random_ether_addr(dev->dev_addr);
334 351
@@ -529,7 +546,6 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
529 } 546 }
530 547
531 netif_rx_ni(skb); 548 netif_rx_ni(skb);
532 tun->dev->last_rx = jiffies;
533 549
534 tun->dev->stats.rx_packets++; 550 tun->dev->stats.rx_packets++;
535 tun->dev->stats.rx_bytes += len; 551 tun->dev->stats.rx_bytes += len;
@@ -676,9 +692,6 @@ static void tun_setup(struct net_device *dev)
676 tun->owner = -1; 692 tun->owner = -1;
677 tun->group = -1; 693 tun->group = -1;
678 694
679 dev->open = tun_net_open;
680 dev->hard_start_xmit = tun_net_xmit;
681 dev->stop = tun_net_close;
682 dev->ethtool_ops = &tun_ethtool_ops; 695 dev->ethtool_ops = &tun_ethtool_ops;
683 dev->destructor = free_netdev; 696 dev->destructor = free_netdev;
684 dev->features |= NETIF_F_NETNS_LOCAL; 697 dev->features |= NETIF_F_NETNS_LOCAL;
@@ -752,6 +765,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
752 return -ENOMEM; 765 return -ENOMEM;
753 766
754 dev_net_set(dev, net); 767 dev_net_set(dev, net);
768
755 tun = netdev_priv(dev); 769 tun = netdev_priv(dev);
756 tun->dev = dev; 770 tun->dev = dev;
757 tun->flags = flags; 771 tun->flags = flags;
@@ -885,7 +899,6 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
885 void __user* argp = (void __user*)arg; 899 void __user* argp = (void __user*)arg;
886 struct ifreq ifr; 900 struct ifreq ifr;
887 int ret; 901 int ret;
888 DECLARE_MAC_BUF(mac);
889 902
890 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) 903 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
891 if (copy_from_user(&ifr, argp, sizeof ifr)) 904 if (copy_from_user(&ifr, argp, sizeof ifr))
@@ -1013,8 +1026,8 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
1013 1026
1014 case SIOCSIFHWADDR: 1027 case SIOCSIFHWADDR:
1015 /* Set hw address */ 1028 /* Set hw address */
1016 DBG(KERN_DEBUG "%s: set hw address: %s\n", 1029 DBG(KERN_DEBUG "%s: set hw address: %pM\n",
1017 tun->dev->name, print_mac(mac, ifr.ifr_hwaddr.sa_data)); 1030 tun->dev->name, ifr.ifr_hwaddr.sa_data);
1018 1031
1019 rtnl_lock(); 1032 rtnl_lock();
1020 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); 1033 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 734ce0977f02..0009f4e34433 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1729,7 +1729,6 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
1729 netif_receive_skb(new_skb); 1729 netif_receive_skb(new_skb);
1730 spin_unlock(&tp->state_lock); 1730 spin_unlock(&tp->state_lock);
1731 1731
1732 tp->dev->last_rx = jiffies;
1733 received++; 1732 received++;
1734 budget--; 1733 budget--;
1735 } 1734 }
@@ -1756,7 +1755,6 @@ static int
1756typhoon_poll(struct napi_struct *napi, int budget) 1755typhoon_poll(struct napi_struct *napi, int budget)
1757{ 1756{
1758 struct typhoon *tp = container_of(napi, struct typhoon, napi); 1757 struct typhoon *tp = container_of(napi, struct typhoon, napi);
1759 struct net_device *dev = tp->dev;
1760 struct typhoon_indexes *indexes = tp->indexes; 1758 struct typhoon_indexes *indexes = tp->indexes;
1761 int work_done; 1759 int work_done;
1762 1760
@@ -1785,7 +1783,7 @@ typhoon_poll(struct napi_struct *napi, int budget)
1785 } 1783 }
1786 1784
1787 if (work_done < budget) { 1785 if (work_done < budget) {
1788 netif_rx_complete(dev, napi); 1786 netif_rx_complete(napi);
1789 iowrite32(TYPHOON_INTR_NONE, 1787 iowrite32(TYPHOON_INTR_NONE,
1790 tp->ioaddr + TYPHOON_REG_INTR_MASK); 1788 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1791 typhoon_post_pci_writes(tp->ioaddr); 1789 typhoon_post_pci_writes(tp->ioaddr);
@@ -1798,7 +1796,7 @@ static irqreturn_t
1798typhoon_interrupt(int irq, void *dev_instance) 1796typhoon_interrupt(int irq, void *dev_instance)
1799{ 1797{
1800 struct net_device *dev = dev_instance; 1798 struct net_device *dev = dev_instance;
1801 struct typhoon *tp = dev->priv; 1799 struct typhoon *tp = netdev_priv(dev);
1802 void __iomem *ioaddr = tp->ioaddr; 1800 void __iomem *ioaddr = tp->ioaddr;
1803 u32 intr_status; 1801 u32 intr_status;
1804 1802
@@ -1808,10 +1806,10 @@ typhoon_interrupt(int irq, void *dev_instance)
1808 1806
1809 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); 1807 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1810 1808
1811 if (netif_rx_schedule_prep(dev, &tp->napi)) { 1809 if (netif_rx_schedule_prep(&tp->napi)) {
1812 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); 1810 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1813 typhoon_post_pci_writes(ioaddr); 1811 typhoon_post_pci_writes(ioaddr);
1814 __netif_rx_schedule(dev, &tp->napi); 1812 __netif_rx_schedule(&tp->napi);
1815 } else { 1813 } else {
1816 printk(KERN_ERR "%s: Error, poll already scheduled\n", 1814 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1817 dev->name); 1815 dev->name);
@@ -2311,7 +2309,6 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2311 struct cmd_desc xp_cmd; 2309 struct cmd_desc xp_cmd;
2312 struct resp_desc xp_resp[3]; 2310 struct resp_desc xp_resp[3];
2313 int err = 0; 2311 int err = 0;
2314 DECLARE_MAC_BUF(mac);
2315 2312
2316 if(!did_version++) 2313 if(!did_version++)
2317 printk(KERN_INFO "%s", version); 2314 printk(KERN_INFO "%s", version);
@@ -2526,11 +2523,11 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2526 2523
2527 pci_set_drvdata(pdev, dev); 2524 pci_set_drvdata(pdev, dev);
2528 2525
2529 printk(KERN_INFO "%s: %s at %s 0x%llx, %s\n", 2526 printk(KERN_INFO "%s: %s at %s 0x%llx, %pM\n",
2530 dev->name, typhoon_card_info[card_id].name, 2527 dev->name, typhoon_card_info[card_id].name,
2531 use_mmio ? "MMIO" : "IO", 2528 use_mmio ? "MMIO" : "IO",
2532 (unsigned long long)pci_resource_start(pdev, use_mmio), 2529 (unsigned long long)pci_resource_start(pdev, use_mmio),
2533 print_mac(mac, dev->dev_addr)); 2530 dev->dev_addr);
2534 2531
2535 /* xp_resp still contains the response to the READ_VERSIONS command. 2532 /* xp_resp still contains the response to the READ_VERSIONS command.
2536 * For debugging, let the user know what version he has. 2533 * For debugging, let the user know what version he has.
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index c87747bb24c5..7d5a1303e30d 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -188,17 +188,6 @@ static void mem_disp(u8 *addr, int size)
188} 188}
189#endif /* DEBUG */ 189#endif /* DEBUG */
190 190
191#ifdef CONFIG_UGETH_FILTERING
192static void enqueue(struct list_head *node, struct list_head *lh)
193{
194 unsigned long flags;
195
196 spin_lock_irqsave(&ugeth_lock, flags);
197 list_add_tail(node, lh);
198 spin_unlock_irqrestore(&ugeth_lock, flags);
199}
200#endif /* CONFIG_UGETH_FILTERING */
201
202static struct list_head *dequeue(struct list_head *lh) 191static struct list_head *dequeue(struct list_head *lh)
203{ 192{
204 unsigned long flags; 193 unsigned long flags;
@@ -391,23 +380,6 @@ static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
391} 380}
392#endif 381#endif
393 382
394#ifdef CONFIG_UGETH_FILTERING
395static struct enet_addr_container *get_enet_addr_container(void)
396{
397 struct enet_addr_container *enet_addr_cont;
398
399 /* allocate memory */
400 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL);
401 if (!enet_addr_cont) {
402 ugeth_err("%s: No memory for enet_addr_container object.",
403 __func__);
404 return NULL;
405 }
406
407 return enet_addr_cont;
408}
409#endif /* CONFIG_UGETH_FILTERING */
410
411static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) 383static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
412{ 384{
413 kfree(enet_addr_cont); 385 kfree(enet_addr_cont);
@@ -420,28 +392,6 @@ static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
420 out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]); 392 out_be16(&reg[2], ((u16)mac[1] << 8) | mac[0]);
421} 393}
422 394
423#ifdef CONFIG_UGETH_FILTERING
424static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
425 u8 *p_enet_addr, u8 paddr_num)
426{
427 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
428
429 if (!(paddr_num < NUM_OF_PADDRS)) {
430 ugeth_warn("%s: Illegal paddr_num.", __func__);
431 return -EINVAL;
432 }
433
434 p_82xx_addr_filt =
435 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
436 addressfiltering;
437
438 /* Ethernet frames are defined in Little Endian mode, */
439 /* therefore to insert the address we reverse the bytes. */
440 set_mac_addr(&p_82xx_addr_filt->paddr[paddr_num].h, p_enet_addr);
441 return 0;
442}
443#endif /* CONFIG_UGETH_FILTERING */
444
445static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) 395static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
446{ 396{
447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 397 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
@@ -1615,8 +1565,8 @@ static int init_phy(struct net_device *dev)
1615 priv->oldspeed = 0; 1565 priv->oldspeed = 0;
1616 priv->oldduplex = -1; 1566 priv->oldduplex = -1;
1617 1567
1618 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->ug_info->mdio_bus, 1568 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->ug_info->mdio_bus,
1619 priv->ug_info->phy_address); 1569 priv->ug_info->phy_address);
1620 1570
1621 phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface); 1571 phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
1622 1572
@@ -1647,6 +1597,7 @@ static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
1647 struct ucc_fast_private *uccf; 1597 struct ucc_fast_private *uccf;
1648 u32 cecr_subblock; 1598 u32 cecr_subblock;
1649 u32 temp; 1599 u32 temp;
1600 int i = 10;
1650 1601
1651 uccf = ugeth->uccf; 1602 uccf = ugeth->uccf;
1652 1603
@@ -1664,8 +1615,9 @@ static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
1664 1615
1665 /* Wait for command to complete */ 1616 /* Wait for command to complete */
1666 do { 1617 do {
1618 msleep(10);
1667 temp = in_be32(uccf->p_ucce); 1619 temp = in_be32(uccf->p_ucce);
1668 } while (!(temp & UCCE_GRA)); 1620 } while (!(temp & UCCE_GRA) && --i);
1669 1621
1670 uccf->stopped_tx = 1; 1622 uccf->stopped_tx = 1;
1671 1623
@@ -1677,6 +1629,7 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
1677 struct ucc_fast_private *uccf; 1629 struct ucc_fast_private *uccf;
1678 u32 cecr_subblock; 1630 u32 cecr_subblock;
1679 u8 temp; 1631 u8 temp;
1632 int i = 10;
1680 1633
1681 uccf = ugeth->uccf; 1634 uccf = ugeth->uccf;
1682 1635
@@ -1694,9 +1647,9 @@ static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
1694 ucc_num); 1647 ucc_num);
1695 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, 1648 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1696 QE_CR_PROTOCOL_ETHERNET, 0); 1649 QE_CR_PROTOCOL_ETHERNET, 0);
1697 1650 msleep(10);
1698 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); 1651 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1699 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX)); 1652 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i);
1700 1653
1701 uccf->stopped_rx = 1; 1654 uccf->stopped_rx = 1;
1702 1655
@@ -1799,196 +1752,6 @@ static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
1799#endif 1752#endif
1800} 1753}
1801 1754
1802#ifdef CONFIG_UGETH_FILTERING
1803static int ugeth_ext_filtering_serialize_tad(struct ucc_geth_tad_params *
1804 p_UccGethTadParams,
1805 struct qe_fltr_tad *qe_fltr_tad)
1806{
1807 u16 temp;
1808
1809 /* Zero serialized TAD */
1810 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
1811
1812 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
1813 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
1814 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
1815 || (p_UccGethTadParams->vnontag_op !=
1816 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
1817 )
1818 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
1819 if (p_UccGethTadParams->reject_frame)
1820 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
1821 temp =
1822 (u16) (((u16) p_UccGethTadParams->
1823 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
1824 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
1825
1826 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
1827 if (p_UccGethTadParams->vnontag_op ==
1828 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
1829 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
1830 qe_fltr_tad->serialized[1] |=
1831 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
1832
1833 qe_fltr_tad->serialized[2] |=
1834 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
1835 /* upper bits */
1836 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
1837 /* lower bits */
1838 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
1839
1840 return 0;
1841}
1842
1843static struct enet_addr_container_t
1844 *ugeth_82xx_filtering_get_match_addr_in_hash(struct ucc_geth_private *ugeth,
1845 struct enet_addr *p_enet_addr)
1846{
1847 struct enet_addr_container *enet_addr_cont;
1848 struct list_head *p_lh;
1849 u16 i, num;
1850 int32_t j;
1851 u8 *p_counter;
1852
1853 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1854 p_lh = &ugeth->group_hash_q;
1855 p_counter = &(ugeth->numGroupAddrInHash);
1856 } else {
1857 p_lh = &ugeth->ind_hash_q;
1858 p_counter = &(ugeth->numIndAddrInHash);
1859 }
1860
1861 if (!p_lh)
1862 return NULL;
1863
1864 num = *p_counter;
1865
1866 for (i = 0; i < num; i++) {
1867 enet_addr_cont =
1868 (struct enet_addr_container *)
1869 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
1870 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
1871 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
1872 break;
1873 if (j == 0)
1874 return enet_addr_cont; /* Found */
1875 }
1876 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
1877 }
1878 return NULL;
1879}
1880
1881static int ugeth_82xx_filtering_add_addr_in_hash(struct ucc_geth_private *ugeth,
1882 struct enet_addr *p_enet_addr)
1883{
1884 enum ucc_geth_enet_address_recognition_location location;
1885 struct enet_addr_container *enet_addr_cont;
1886 struct list_head *p_lh;
1887 u8 i;
1888 u32 limit;
1889 u8 *p_counter;
1890
1891 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1892 p_lh = &ugeth->group_hash_q;
1893 limit = ugeth->ug_info->maxGroupAddrInHash;
1894 location =
1895 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
1896 p_counter = &(ugeth->numGroupAddrInHash);
1897 } else {
1898 p_lh = &ugeth->ind_hash_q;
1899 limit = ugeth->ug_info->maxIndAddrInHash;
1900 location =
1901 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
1902 p_counter = &(ugeth->numIndAddrInHash);
1903 }
1904
1905 if ((enet_addr_cont =
1906 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
1907 list_add(p_lh, &enet_addr_cont->node); /* Put it back */
1908 return 0;
1909 }
1910 if ((!p_lh) || (!(*p_counter < limit)))
1911 return -EBUSY;
1912 if (!(enet_addr_cont = get_enet_addr_container()))
1913 return -ENOMEM;
1914 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
1915 (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
1916 enet_addr_cont->location = location;
1917 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
1918 ++(*p_counter);
1919
1920 hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
1921 return 0;
1922}
1923
1924static int ugeth_82xx_filtering_clear_addr_in_hash(struct ucc_geth_private *ugeth,
1925 struct enet_addr *p_enet_addr)
1926{
1927 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
1928 struct enet_addr_container *enet_addr_cont;
1929 struct ucc_fast_private *uccf;
1930 enum comm_dir comm_dir;
1931 u16 i, num;
1932 struct list_head *p_lh;
1933 u32 *addr_h, *addr_l;
1934 u8 *p_counter;
1935
1936 uccf = ugeth->uccf;
1937
1938 p_82xx_addr_filt =
1939 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
1940 addressfiltering;
1941
1942 if (!
1943 (enet_addr_cont =
1944 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
1945 return -ENOENT;
1946
1947 /* It's been found and removed from the CQ. */
1948 /* Now destroy its container */
1949 put_enet_addr_container(enet_addr_cont);
1950
1951 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
1952 addr_h = &(p_82xx_addr_filt->gaddr_h);
1953 addr_l = &(p_82xx_addr_filt->gaddr_l);
1954 p_lh = &ugeth->group_hash_q;
1955 p_counter = &(ugeth->numGroupAddrInHash);
1956 } else {
1957 addr_h = &(p_82xx_addr_filt->iaddr_h);
1958 addr_l = &(p_82xx_addr_filt->iaddr_l);
1959 p_lh = &ugeth->ind_hash_q;
1960 p_counter = &(ugeth->numIndAddrInHash);
1961 }
1962
1963 comm_dir = 0;
1964 if (uccf->enabled_tx)
1965 comm_dir |= COMM_DIR_TX;
1966 if (uccf->enabled_rx)
1967 comm_dir |= COMM_DIR_RX;
1968 if (comm_dir)
1969 ugeth_disable(ugeth, comm_dir);
1970
1971 /* Clear the hash table. */
1972 out_be32(addr_h, 0x00000000);
1973 out_be32(addr_l, 0x00000000);
1974
1975 /* Add all remaining CQ elements back into hash */
1976 num = --(*p_counter);
1977 for (i = 0; i < num; i++) {
1978 enet_addr_cont =
1979 (struct enet_addr_container *)
1980 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
1981 hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
1982 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
1983 }
1984
1985 if (comm_dir)
1986 ugeth_enable(ugeth, comm_dir);
1987
1988 return 0;
1989}
1990#endif /* CONFIG_UGETH_FILTERING */
1991
1992static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * 1755static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
1993 ugeth, 1756 ugeth,
1994 enum enet_addr_type 1757 enum enet_addr_type
@@ -2051,28 +1814,6 @@ static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
2051 return 0; 1814 return 0;
2052} 1815}
2053 1816
2054#ifdef CONFIG_UGETH_FILTERING
2055static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth,
2056 struct enet_addr *p_enet_addr,
2057 u8 paddr_num)
2058{
2059 int i;
2060
2061 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
2062 ugeth_warn
2063 ("%s: multicast address added to paddr will have no "
2064 "effect - is this what you wanted?",
2065 __func__);
2066
2067 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2068 /* store address in our database */
2069 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2070 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
2071 /* put in hardware */
2072 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
2073}
2074#endif /* CONFIG_UGETH_FILTERING */
2075
2076static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, 1817static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
2077 u8 paddr_num) 1818 u8 paddr_num)
2078{ 1819{
@@ -2215,7 +1956,10 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2215 while (!list_empty(&ugeth->ind_hash_q)) 1956 while (!list_empty(&ugeth->ind_hash_q))
2216 put_enet_addr_container(ENET_ADDR_CONT_ENTRY 1957 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2217 (dequeue(&ugeth->ind_hash_q))); 1958 (dequeue(&ugeth->ind_hash_q)));
2218 1959 if (ugeth->ug_regs) {
1960 iounmap(ugeth->ug_regs);
1961 ugeth->ug_regs = NULL;
1962 }
2219} 1963}
2220 1964
2221static void ucc_geth_set_multi(struct net_device *dev) 1965static void ucc_geth_set_multi(struct net_device *dev)
@@ -2297,8 +2041,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2297 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); 2041 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2298 out_be32(&ug_regs->maccfg1, tempval); 2042 out_be32(&ug_regs->maccfg1, tempval);
2299 2043
2300 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
2301
2302 ucc_geth_memclean(ugeth); 2044 ucc_geth_memclean(ugeth);
2303} 2045}
2304 2046
@@ -2419,11 +2161,15 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2419 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2161 if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2420 if (netif_msg_probe(ugeth)) 2162 if (netif_msg_probe(ugeth))
2421 ugeth_err("%s: Failed to init uccf.", __func__); 2163 ugeth_err("%s: Failed to init uccf.", __func__);
2422 ucc_geth_memclean(ugeth);
2423 return -ENOMEM; 2164 return -ENOMEM;
2424 } 2165 }
2425 2166
2426 ugeth->ug_regs = (struct ucc_geth __iomem *) ioremap(uf_info->regs, sizeof(struct ucc_geth)); 2167 ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
2168 if (!ugeth->ug_regs) {
2169 if (netif_msg_probe(ugeth))
2170 ugeth_err("%s: Failed to ioremap regs.", __func__);
2171 return -ENOMEM;
2172 }
2427 2173
2428 return 0; 2174 return 0;
2429} 2175}
@@ -2475,7 +2221,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2475 if (netif_msg_ifup(ugeth)) 2221 if (netif_msg_ifup(ugeth))
2476 ugeth_err("%s: Bad number of Rx threads value.", 2222 ugeth_err("%s: Bad number of Rx threads value.",
2477 __func__); 2223 __func__);
2478 ucc_geth_memclean(ugeth);
2479 return -EINVAL; 2224 return -EINVAL;
2480 break; 2225 break;
2481 } 2226 }
@@ -2500,7 +2245,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2500 if (netif_msg_ifup(ugeth)) 2245 if (netif_msg_ifup(ugeth))
2501 ugeth_err("%s: Bad number of Tx threads value.", 2246 ugeth_err("%s: Bad number of Tx threads value.",
2502 __func__); 2247 __func__);
2503 ucc_geth_memclean(ugeth);
2504 return -EINVAL; 2248 return -EINVAL;
2505 break; 2249 break;
2506 } 2250 }
@@ -2554,7 +2298,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2554 if (netif_msg_ifup(ugeth)) 2298 if (netif_msg_ifup(ugeth))
2555 ugeth_err("%s: IPGIFG initialization parameter too large.", 2299 ugeth_err("%s: IPGIFG initialization parameter too large.",
2556 __func__); 2300 __func__);
2557 ucc_geth_memclean(ugeth);
2558 return ret_val; 2301 return ret_val;
2559 } 2302 }
2560 2303
@@ -2572,7 +2315,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2572 if (netif_msg_ifup(ugeth)) 2315 if (netif_msg_ifup(ugeth))
2573 ugeth_err("%s: Half Duplex initialization parameter too large.", 2316 ugeth_err("%s: Half Duplex initialization parameter too large.",
2574 __func__); 2317 __func__);
2575 ucc_geth_memclean(ugeth);
2576 return ret_val; 2318 return ret_val;
2577 } 2319 }
2578 2320
@@ -2627,7 +2369,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2627 ugeth_err 2369 ugeth_err
2628 ("%s: Can not allocate memory for Tx bd rings.", 2370 ("%s: Can not allocate memory for Tx bd rings.",
2629 __func__); 2371 __func__);
2630 ucc_geth_memclean(ugeth);
2631 return -ENOMEM; 2372 return -ENOMEM;
2632 } 2373 }
2633 /* Zero unused end of bd ring, according to spec */ 2374 /* Zero unused end of bd ring, according to spec */
@@ -2663,7 +2404,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2663 ugeth_err 2404 ugeth_err
2664 ("%s: Can not allocate memory for Rx bd rings.", 2405 ("%s: Can not allocate memory for Rx bd rings.",
2665 __func__); 2406 __func__);
2666 ucc_geth_memclean(ugeth);
2667 return -ENOMEM; 2407 return -ENOMEM;
2668 } 2408 }
2669 } 2409 }
@@ -2679,7 +2419,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2679 if (netif_msg_ifup(ugeth)) 2419 if (netif_msg_ifup(ugeth))
2680 ugeth_err("%s: Could not allocate tx_skbuff", 2420 ugeth_err("%s: Could not allocate tx_skbuff",
2681 __func__); 2421 __func__);
2682 ucc_geth_memclean(ugeth);
2683 return -ENOMEM; 2422 return -ENOMEM;
2684 } 2423 }
2685 2424
@@ -2711,7 +2450,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2711 if (netif_msg_ifup(ugeth)) 2450 if (netif_msg_ifup(ugeth))
2712 ugeth_err("%s: Could not allocate rx_skbuff", 2451 ugeth_err("%s: Could not allocate rx_skbuff",
2713 __func__); 2452 __func__);
2714 ucc_geth_memclean(ugeth);
2715 return -ENOMEM; 2453 return -ENOMEM;
2716 } 2454 }
2717 2455
@@ -2745,7 +2483,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2745 ugeth_err 2483 ugeth_err
2746 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2484 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2747 __func__); 2485 __func__);
2748 ucc_geth_memclean(ugeth);
2749 return -ENOMEM; 2486 return -ENOMEM;
2750 } 2487 }
2751 ugeth->p_tx_glbl_pram = 2488 ugeth->p_tx_glbl_pram =
@@ -2768,7 +2505,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2768 ugeth_err 2505 ugeth_err
2769 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2506 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2770 __func__); 2507 __func__);
2771 ucc_geth_memclean(ugeth);
2772 return -ENOMEM; 2508 return -ENOMEM;
2773 } 2509 }
2774 2510
@@ -2798,7 +2534,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2798 ugeth_err 2534 ugeth_err
2799 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2535 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2800 __func__); 2536 __func__);
2801 ucc_geth_memclean(ugeth);
2802 return -ENOMEM; 2537 return -ENOMEM;
2803 } 2538 }
2804 2539
@@ -2842,7 +2577,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2842 ugeth_err 2577 ugeth_err
2843 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2578 ("%s: Can not allocate DPRAM memory for p_scheduler.",
2844 __func__); 2579 __func__);
2845 ucc_geth_memclean(ugeth);
2846 return -ENOMEM; 2580 return -ENOMEM;
2847 } 2581 }
2848 2582
@@ -2893,7 +2627,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2893 ("%s: Can not allocate DPRAM memory for" 2627 ("%s: Can not allocate DPRAM memory for"
2894 " p_tx_fw_statistics_pram.", 2628 " p_tx_fw_statistics_pram.",
2895 __func__); 2629 __func__);
2896 ucc_geth_memclean(ugeth);
2897 return -ENOMEM; 2630 return -ENOMEM;
2898 } 2631 }
2899 ugeth->p_tx_fw_statistics_pram = 2632 ugeth->p_tx_fw_statistics_pram =
@@ -2933,7 +2666,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2933 ugeth_err 2666 ugeth_err
2934 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2667 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
2935 __func__); 2668 __func__);
2936 ucc_geth_memclean(ugeth);
2937 return -ENOMEM; 2669 return -ENOMEM;
2938 } 2670 }
2939 ugeth->p_rx_glbl_pram = 2671 ugeth->p_rx_glbl_pram =
@@ -2955,7 +2687,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2955 ugeth_err 2687 ugeth_err
2956 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2688 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
2957 __func__); 2689 __func__);
2958 ucc_geth_memclean(ugeth);
2959 return -ENOMEM; 2690 return -ENOMEM;
2960 } 2691 }
2961 2692
@@ -2979,7 +2710,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2979 ugeth_err 2710 ugeth_err
2980 ("%s: Can not allocate DPRAM memory for" 2711 ("%s: Can not allocate DPRAM memory for"
2981 " p_rx_fw_statistics_pram.", __func__); 2712 " p_rx_fw_statistics_pram.", __func__);
2982 ucc_geth_memclean(ugeth);
2983 return -ENOMEM; 2713 return -ENOMEM;
2984 } 2714 }
2985 ugeth->p_rx_fw_statistics_pram = 2715 ugeth->p_rx_fw_statistics_pram =
@@ -3002,7 +2732,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3002 ugeth_err 2732 ugeth_err
3003 ("%s: Can not allocate DPRAM memory for" 2733 ("%s: Can not allocate DPRAM memory for"
3004 " p_rx_irq_coalescing_tbl.", __func__); 2734 " p_rx_irq_coalescing_tbl.", __func__);
3005 ucc_geth_memclean(ugeth);
3006 return -ENOMEM; 2735 return -ENOMEM;
3007 } 2736 }
3008 2737
@@ -3071,7 +2800,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3071 ugeth_err 2800 ugeth_err
3072 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 2801 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3073 __func__); 2802 __func__);
3074 ucc_geth_memclean(ugeth);
3075 return -ENOMEM; 2803 return -ENOMEM;
3076 } 2804 }
3077 2805
@@ -3148,7 +2876,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3148 if (netif_msg_ifup(ugeth)) 2876 if (netif_msg_ifup(ugeth))
3149 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 2877 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3150 __func__); 2878 __func__);
3151 ucc_geth_memclean(ugeth);
3152 return -EINVAL; 2879 return -EINVAL;
3153 } 2880 }
3154 2881
@@ -3162,7 +2889,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3162 ugeth_err 2889 ugeth_err
3163 ("%s: Can not allocate DPRAM memory for" 2890 ("%s: Can not allocate DPRAM memory for"
3164 " p_exf_glbl_param.", __func__); 2891 " p_exf_glbl_param.", __func__);
3165 ucc_geth_memclean(ugeth);
3166 return -ENOMEM; 2892 return -ENOMEM;
3167 } 2893 }
3168 2894
@@ -3210,7 +2936,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3210 ugeth_err 2936 ugeth_err
3211 ("%s: Can not allocate memory for" 2937 ("%s: Can not allocate memory for"
3212 " p_UccInitEnetParamShadows.", __func__); 2938 " p_UccInitEnetParamShadows.", __func__);
3213 ucc_geth_memclean(ugeth);
3214 return -ENOMEM; 2939 return -ENOMEM;
3215 } 2940 }
3216 /* Zero out *p_init_enet_param_shadow */ 2941 /* Zero out *p_init_enet_param_shadow */
@@ -3245,7 +2970,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3245 if (netif_msg_ifup(ugeth)) 2970 if (netif_msg_ifup(ugeth))
3246 ugeth_err("%s: Invalid largest External Lookup Key Size.", 2971 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3247 __func__); 2972 __func__);
3248 ucc_geth_memclean(ugeth);
3249 return -EINVAL; 2973 return -EINVAL;
3250 } 2974 }
3251 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = 2975 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
@@ -3272,7 +2996,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3272 if (netif_msg_ifup(ugeth)) 2996 if (netif_msg_ifup(ugeth))
3273 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 2997 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3274 __func__); 2998 __func__);
3275 ucc_geth_memclean(ugeth);
3276 return ret_val; 2999 return ret_val;
3277 } 3000 }
3278 3001
@@ -3288,7 +3011,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3288 if (netif_msg_ifup(ugeth)) 3011 if (netif_msg_ifup(ugeth))
3289 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3012 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3290 __func__); 3013 __func__);
3291 ucc_geth_memclean(ugeth);
3292 return ret_val; 3014 return ret_val;
3293 } 3015 }
3294 3016
@@ -3298,7 +3020,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3298 if (netif_msg_ifup(ugeth)) 3020 if (netif_msg_ifup(ugeth))
3299 ugeth_err("%s: Can not fill Rx bds with buffers.", 3021 ugeth_err("%s: Can not fill Rx bds with buffers.",
3300 __func__); 3022 __func__);
3301 ucc_geth_memclean(ugeth);
3302 return ret_val; 3023 return ret_val;
3303 } 3024 }
3304 } 3025 }
@@ -3310,7 +3031,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3310 ugeth_err 3031 ugeth_err
3311 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3032 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3312 __func__); 3033 __func__);
3313 ucc_geth_memclean(ugeth);
3314 return -ENOMEM; 3034 return -ENOMEM;
3315 } 3035 }
3316 p_init_enet_pram = 3036 p_init_enet_pram =
@@ -3352,28 +3072,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
3352 return 0; 3072 return 0;
3353} 3073}
3354 3074
3355/* ucc_geth_timeout gets called when a packet has not been
3356 * transmitted after a set amount of time.
3357 * For now, assume that clearing out all the structures, and
3358 * starting over will fix the problem. */
3359static void ucc_geth_timeout(struct net_device *dev)
3360{
3361 struct ucc_geth_private *ugeth = netdev_priv(dev);
3362
3363 ugeth_vdbg("%s: IN", __func__);
3364
3365 dev->stats.tx_errors++;
3366
3367 ugeth_dump_regs(ugeth);
3368
3369 if (dev->flags & IFF_UP) {
3370 ucc_geth_stop(ugeth);
3371 ucc_geth_startup(ugeth);
3372 }
3373
3374 netif_tx_schedule_all(dev);
3375}
3376
3377/* This is called by the kernel when a frame is ready for transmission. */ 3075/* This is called by the kernel when a frame is ready for transmission. */
3378/* It is pointed to by the dev->hard_start_xmit function pointer */ 3076/* It is pointed to by the dev->hard_start_xmit function pointer */
3379static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) 3077static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -3502,8 +3200,6 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3502 netif_receive_skb(skb); 3200 netif_receive_skb(skb);
3503 } 3201 }
3504 3202
3505 ugeth->dev->last_rx = jiffies;
3506
3507 skb = get_new_skb(ugeth, bd); 3203 skb = get_new_skb(ugeth, bd);
3508 if (!skb) { 3204 if (!skb) {
3509 if (netif_msg_rx_err(ugeth)) 3205 if (netif_msg_rx_err(ugeth))
@@ -3592,7 +3288,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
3592 struct ucc_fast_private *uccf; 3288 struct ucc_fast_private *uccf;
3593 u32 uccm; 3289 u32 uccm;
3594 3290
3595 netif_rx_complete(dev, napi); 3291 netif_rx_complete(napi);
3596 uccf = ugeth->uccf; 3292 uccf = ugeth->uccf;
3597 uccm = in_be32(uccf->p_uccm); 3293 uccm = in_be32(uccf->p_uccm);
3598 uccm |= UCCE_RX_EVENTS; 3294 uccm |= UCCE_RX_EVENTS;
@@ -3626,10 +3322,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3626 3322
3627 /* check for receive events that require processing */ 3323 /* check for receive events that require processing */
3628 if (ucce & UCCE_RX_EVENTS) { 3324 if (ucce & UCCE_RX_EVENTS) {
3629 if (netif_rx_schedule_prep(dev, &ugeth->napi)) { 3325 if (netif_rx_schedule_prep(&ugeth->napi)) {
3630 uccm &= ~UCCE_RX_EVENTS; 3326 uccm &= ~UCCE_RX_EVENTS;
3631 out_be32(uccf->p_uccm, uccm); 3327 out_be32(uccf->p_uccm, uccm);
3632 __netif_rx_schedule(dev, &ugeth->napi); 3328 __netif_rx_schedule(&ugeth->napi);
3633 } 3329 }
3634 } 3330 }
3635 3331
@@ -3697,7 +3393,7 @@ static int ucc_geth_open(struct net_device *dev)
3697 if (err) { 3393 if (err) {
3698 if (netif_msg_ifup(ugeth)) 3394 if (netif_msg_ifup(ugeth))
3699 ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name); 3395 ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name);
3700 return err; 3396 goto out_err_stop;
3701 } 3397 }
3702 3398
3703 napi_enable(&ugeth->napi); 3399 napi_enable(&ugeth->napi);
@@ -3738,22 +3434,19 @@ static int ucc_geth_open(struct net_device *dev)
3738 3434
3739 phy_start(ugeth->phydev); 3435 phy_start(ugeth->phydev);
3740 3436
3741 err = 3437 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
3742 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
3743 "UCC Geth", dev);
3744 if (err) { 3438 if (err) {
3745 if (netif_msg_ifup(ugeth)) 3439 if (netif_msg_ifup(ugeth))
3746 ugeth_err("%s: Cannot get IRQ for net device, aborting.", 3440 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
3747 dev->name);
3748 ucc_geth_stop(ugeth);
3749 goto out_err; 3441 goto out_err;
3750 } 3442 }
3751 3443
3752 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 3444 err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
3445 0, "UCC Geth", dev);
3753 if (err) { 3446 if (err) {
3754 if (netif_msg_ifup(ugeth)) 3447 if (netif_msg_ifup(ugeth))
3755 ugeth_err("%s: Cannot enable net device, aborting.", dev->name); 3448 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
3756 ucc_geth_stop(ugeth); 3449 dev->name);
3757 goto out_err; 3450 goto out_err;
3758 } 3451 }
3759 3452
@@ -3763,7 +3456,8 @@ static int ucc_geth_open(struct net_device *dev)
3763 3456
3764out_err: 3457out_err:
3765 napi_disable(&ugeth->napi); 3458 napi_disable(&ugeth->napi);
3766 3459out_err_stop:
3460 ucc_geth_stop(ugeth);
3767 return err; 3461 return err;
3768} 3462}
3769 3463
@@ -3778,6 +3472,8 @@ static int ucc_geth_close(struct net_device *dev)
3778 3472
3779 ucc_geth_stop(ugeth); 3473 ucc_geth_stop(ugeth);
3780 3474
3475 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
3476
3781 phy_disconnect(ugeth->phydev); 3477 phy_disconnect(ugeth->phydev);
3782 ugeth->phydev = NULL; 3478 ugeth->phydev = NULL;
3783 3479
@@ -3786,6 +3482,45 @@ static int ucc_geth_close(struct net_device *dev)
3786 return 0; 3482 return 0;
3787} 3483}
3788 3484
3485/* Reopen device. This will reset the MAC and PHY. */
3486static void ucc_geth_timeout_work(struct work_struct *work)
3487{
3488 struct ucc_geth_private *ugeth;
3489 struct net_device *dev;
3490
3491 ugeth = container_of(work, struct ucc_geth_private, timeout_work);
3492 dev = ugeth->dev;
3493
3494 ugeth_vdbg("%s: IN", __func__);
3495
3496 dev->stats.tx_errors++;
3497
3498 ugeth_dump_regs(ugeth);
3499
3500 if (dev->flags & IFF_UP) {
3501 /*
3502 * Must reset MAC *and* PHY. This is done by reopening
3503 * the device.
3504 */
3505 ucc_geth_close(dev);
3506 ucc_geth_open(dev);
3507 }
3508
3509 netif_tx_schedule_all(dev);
3510}
3511
3512/*
3513 * ucc_geth_timeout gets called when a packet has not been
3514 * transmitted after a set amount of time.
3515 */
3516static void ucc_geth_timeout(struct net_device *dev)
3517{
3518 struct ucc_geth_private *ugeth = netdev_priv(dev);
3519
3520 netif_carrier_off(dev);
3521 schedule_work(&ugeth->timeout_work);
3522}
3523
3789static phy_interface_t to_phy_interface(const char *phy_connection_type) 3524static phy_interface_t to_phy_interface(const char *phy_connection_type)
3790{ 3525{
3791 if (strcasecmp(phy_connection_type, "mii") == 0) 3526 if (strcasecmp(phy_connection_type, "mii") == 0)
@@ -4026,6 +3761,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
4026 dev->hard_start_xmit = ucc_geth_start_xmit; 3761 dev->hard_start_xmit = ucc_geth_start_xmit;
4027 dev->tx_timeout = ucc_geth_timeout; 3762 dev->tx_timeout = ucc_geth_timeout;
4028 dev->watchdog_timeo = TX_TIMEOUT; 3763 dev->watchdog_timeo = TX_TIMEOUT;
3764 INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
4029 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); 3765 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
4030#ifdef CONFIG_NET_POLL_CONTROLLER 3766#ifdef CONFIG_NET_POLL_CONTROLLER
4031 dev->poll_controller = ucc_netpoll; 3767 dev->poll_controller = ucc_netpoll;
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index abc0e2242634..d74d2f7cb739 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -1186,6 +1186,7 @@ struct ucc_geth_private {
1186 struct ucc_fast_private *uccf; 1186 struct ucc_fast_private *uccf;
1187 struct net_device *dev; 1187 struct net_device *dev;
1188 struct napi_struct napi; 1188 struct napi_struct napi;
1189 struct work_struct timeout_work;
1189 struct ucc_geth __iomem *ug_regs; 1190 struct ucc_geth __iomem *ug_regs;
1190 struct ucc_geth_init_pram *p_init_enet_param_shadow; 1191 struct ucc_geth_init_pram *p_init_enet_param_shadow;
1191 struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param; 1192 struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param;
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index de57490103fc..e009481c606c 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -246,10 +246,11 @@ out:
246static void asix_async_cmd_callback(struct urb *urb) 246static void asix_async_cmd_callback(struct urb *urb)
247{ 247{
248 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; 248 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
249 int status = urb->status;
249 250
250 if (urb->status < 0) 251 if (status < 0)
251 printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d", 252 printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
252 urb->status); 253 status);
253 254
254 kfree(req); 255 kfree(req);
255 usb_free_urb(urb); 256 usb_free_urb(urb);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 466a89e24444..cb7acbbb2798 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -229,14 +229,15 @@ static void catc_rx_done(struct urb *urb)
229 u8 *pkt_start = urb->transfer_buffer; 229 u8 *pkt_start = urb->transfer_buffer;
230 struct sk_buff *skb; 230 struct sk_buff *skb;
231 int pkt_len, pkt_offset = 0; 231 int pkt_len, pkt_offset = 0;
232 int status = urb->status;
232 233
233 if (!catc->is_f5u011) { 234 if (!catc->is_f5u011) {
234 clear_bit(RX_RUNNING, &catc->flags); 235 clear_bit(RX_RUNNING, &catc->flags);
235 pkt_offset = 2; 236 pkt_offset = 2;
236 } 237 }
237 238
238 if (urb->status) { 239 if (status) {
239 dbg("rx_done, status %d, length %d", urb->status, urb->actual_length); 240 dbg("rx_done, status %d, length %d", status, urb->actual_length);
240 return; 241 return;
241 } 242 }
242 243
@@ -271,16 +272,14 @@ static void catc_rx_done(struct urb *urb)
271 272
272 } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length); 273 } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length);
273 274
274 catc->netdev->last_rx = jiffies;
275
276 if (catc->is_f5u011) { 275 if (catc->is_f5u011) {
277 if (atomic_read(&catc->recq_sz)) { 276 if (atomic_read(&catc->recq_sz)) {
278 int status; 277 int state;
279 atomic_dec(&catc->recq_sz); 278 atomic_dec(&catc->recq_sz);
280 dbg("getting extra packet"); 279 dbg("getting extra packet");
281 urb->dev = catc->usbdev; 280 urb->dev = catc->usbdev;
282 if ((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { 281 if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
283 dbg("submit(rx_urb) status %d", status); 282 dbg("submit(rx_urb) status %d", state);
284 } 283 }
285 } else { 284 } else {
286 clear_bit(RX_RUNNING, &catc->flags); 285 clear_bit(RX_RUNNING, &catc->flags);
@@ -292,8 +291,9 @@ static void catc_irq_done(struct urb *urb)
292{ 291{
293 struct catc *catc = urb->context; 292 struct catc *catc = urb->context;
294 u8 *data = urb->transfer_buffer; 293 u8 *data = urb->transfer_buffer;
295 int status; 294 int status = urb->status;
296 unsigned int hasdata = 0, linksts = LinkNoChange; 295 unsigned int hasdata = 0, linksts = LinkNoChange;
296 int res;
297 297
298 if (!catc->is_f5u011) { 298 if (!catc->is_f5u011) {
299 hasdata = data[1] & 0x80; 299 hasdata = data[1] & 0x80;
@@ -309,7 +309,7 @@ static void catc_irq_done(struct urb *urb)
309 linksts = LinkBad; 309 linksts = LinkBad;
310 } 310 }
311 311
312 switch (urb->status) { 312 switch (status) {
313 case 0: /* success */ 313 case 0: /* success */
314 break; 314 break;
315 case -ECONNRESET: /* unlink */ 315 case -ECONNRESET: /* unlink */
@@ -318,7 +318,7 @@ static void catc_irq_done(struct urb *urb)
318 return; 318 return;
319 /* -EPIPE: should clear the halt */ 319 /* -EPIPE: should clear the halt */
320 default: /* error */ 320 default: /* error */
321 dbg("irq_done, status %d, data %02x %02x.", urb->status, data[0], data[1]); 321 dbg("irq_done, status %d, data %02x %02x.", status, data[0], data[1]);
322 goto resubmit; 322 goto resubmit;
323 } 323 }
324 324
@@ -338,17 +338,17 @@ static void catc_irq_done(struct urb *urb)
338 atomic_inc(&catc->recq_sz); 338 atomic_inc(&catc->recq_sz);
339 } else { 339 } else {
340 catc->rx_urb->dev = catc->usbdev; 340 catc->rx_urb->dev = catc->usbdev;
341 if ((status = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) { 341 if ((res = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) {
342 err("submit(rx_urb) status %d", status); 342 err("submit(rx_urb) status %d", res);
343 } 343 }
344 } 344 }
345 } 345 }
346resubmit: 346resubmit:
347 status = usb_submit_urb (urb, GFP_ATOMIC); 347 res = usb_submit_urb (urb, GFP_ATOMIC);
348 if (status) 348 if (res)
349 err ("can't resubmit intr, %s-%s, status %d", 349 err ("can't resubmit intr, %s-%s, status %d",
350 catc->usbdev->bus->bus_name, 350 catc->usbdev->bus->bus_name,
351 catc->usbdev->devpath, status); 351 catc->usbdev->devpath, res);
352} 352}
353 353
354/* 354/*
@@ -380,9 +380,9 @@ static void catc_tx_done(struct urb *urb)
380{ 380{
381 struct catc *catc = urb->context; 381 struct catc *catc = urb->context;
382 unsigned long flags; 382 unsigned long flags;
383 int r; 383 int r, status = urb->status;
384 384
385 if (urb->status == -ECONNRESET) { 385 if (status == -ECONNRESET) {
386 dbg("Tx Reset."); 386 dbg("Tx Reset.");
387 urb->status = 0; 387 urb->status = 0;
388 catc->netdev->trans_start = jiffies; 388 catc->netdev->trans_start = jiffies;
@@ -392,8 +392,8 @@ static void catc_tx_done(struct urb *urb)
392 return; 392 return;
393 } 393 }
394 394
395 if (urb->status) { 395 if (status) {
396 dbg("tx_done, status %d, length %d", urb->status, urb->actual_length); 396 dbg("tx_done, status %d, length %d", status, urb->actual_length);
397 return; 397 return;
398 } 398 }
399 399
@@ -504,9 +504,10 @@ static void catc_ctrl_done(struct urb *urb)
504 struct catc *catc = urb->context; 504 struct catc *catc = urb->context;
505 struct ctrl_queue *q; 505 struct ctrl_queue *q;
506 unsigned long flags; 506 unsigned long flags;
507 int status = urb->status;
507 508
508 if (urb->status) 509 if (status)
509 dbg("ctrl_done, status %d, len %d.", urb->status, urb->actual_length); 510 dbg("ctrl_done, status %d, len %d.", status, urb->actual_length);
510 511
511 spin_lock_irqsave(&catc->ctrl_lock, flags); 512 spin_lock_irqsave(&catc->ctrl_lock, flags);
512 513
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index db3377dae9d5..edd244f3acb5 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -123,10 +123,11 @@ static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
123static void dm_write_async_callback(struct urb *urb) 123static void dm_write_async_callback(struct urb *urb)
124{ 124{
125 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; 125 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
126 int status = urb->status;
126 127
127 if (urb->status < 0) 128 if (status < 0)
128 printk(KERN_DEBUG "dm_write_async_callback() failed with %d\n", 129 printk(KERN_DEBUG "dm_write_async_callback() failed with %d\n",
129 urb->status); 130 status);
130 131
131 kfree(req); 132 kfree(req);
132 usb_free_urb(urb); 133 usb_free_urb(urb);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 8e90891f0e42..198ce3cf378a 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -417,6 +417,11 @@ static const struct usb_device_id hso_ids[] = {
417 {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */ 417 {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */
418 {USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */ 418 {USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */
419 {USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */ 419 {USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */
420 {USB_DEVICE(0x0af0, 0x7701)},
421 {USB_DEVICE(0x0af0, 0x7801)},
422 {USB_DEVICE(0x0af0, 0x7901)},
423 {USB_DEVICE(0x0af0, 0x7361)},
424 {icon321_port_device(0x0af0, 0xd051)},
420 {} 425 {}
421}; 426};
422MODULE_DEVICE_TABLE(usb, hso_ids); 427MODULE_DEVICE_TABLE(usb, hso_ids);
@@ -658,10 +663,9 @@ static int hso_net_open(struct net_device *net)
658 odev->rx_buf_missing = sizeof(struct iphdr); 663 odev->rx_buf_missing = sizeof(struct iphdr);
659 spin_unlock_irqrestore(&odev->net_lock, flags); 664 spin_unlock_irqrestore(&odev->net_lock, flags);
660 665
661 hso_start_net_device(odev->parent);
662
663 /* We are up and running. */ 666 /* We are up and running. */
664 set_bit(HSO_NET_RUNNING, &odev->flags); 667 set_bit(HSO_NET_RUNNING, &odev->flags);
668 hso_start_net_device(odev->parent);
665 669
666 /* Tell the kernel we are ready to start receiving from it */ 670 /* Tell the kernel we are ready to start receiving from it */
667 netif_start_queue(net); 671 netif_start_queue(net);
@@ -2750,18 +2754,21 @@ static int hso_resume(struct usb_interface *iface)
2750 if (network_table[i] && 2754 if (network_table[i] &&
2751 (network_table[i]->interface == iface)) { 2755 (network_table[i]->interface == iface)) {
2752 hso_net = dev2net(network_table[i]); 2756 hso_net = dev2net(network_table[i]);
2753 /* First transmit any lingering data, then restart the 2757 if (hso_net->flags & IFF_UP) {
2754 * device. */ 2758 /* First transmit any lingering data,
2755 if (hso_net->skb_tx_buf) { 2759 then restart the device. */
2756 dev_dbg(&iface->dev, 2760 if (hso_net->skb_tx_buf) {
2757 "Transmitting lingering data\n"); 2761 dev_dbg(&iface->dev,
2758 hso_net_start_xmit(hso_net->skb_tx_buf, 2762 "Transmitting"
2759 hso_net->net); 2763 " lingering data\n");
2760 hso_net->skb_tx_buf = NULL; 2764 hso_net_start_xmit(hso_net->skb_tx_buf,
2765 hso_net->net);
2766 hso_net->skb_tx_buf = NULL;
2767 }
2768 result = hso_start_net_device(network_table[i]);
2769 if (result)
2770 goto out;
2761 } 2771 }
2762 result = hso_start_net_device(network_table[i]);
2763 if (result)
2764 goto out;
2765 } 2772 }
2766 } 2773 }
2767 2774
@@ -2894,6 +2901,7 @@ static struct usb_driver hso_driver = {
2894 .id_table = hso_ids, 2901 .id_table = hso_ids,
2895 .suspend = hso_suspend, 2902 .suspend = hso_suspend,
2896 .resume = hso_resume, 2903 .resume = hso_resume,
2904 .reset_resume = hso_resume,
2897 .supports_autosuspend = 1, 2905 .supports_autosuspend = 1,
2898}; 2906};
2899 2907
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index fdbf3be24fda..2ee034f70d1c 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -516,8 +516,9 @@ static void int_callback(struct urb *u)
516{ 516{
517 struct kaweth_device *kaweth = u->context; 517 struct kaweth_device *kaweth = u->context;
518 int act_state; 518 int act_state;
519 int status = u->status;
519 520
520 switch (u->status) { 521 switch (status) {
521 case 0: /* success */ 522 case 0: /* success */
522 break; 523 break;
523 case -ECONNRESET: /* unlink */ 524 case -ECONNRESET: /* unlink */
@@ -598,6 +599,7 @@ static void kaweth_usb_receive(struct urb *urb)
598{ 599{
599 struct kaweth_device *kaweth = urb->context; 600 struct kaweth_device *kaweth = urb->context;
600 struct net_device *net = kaweth->net; 601 struct net_device *net = kaweth->net;
602 int status = urb->status;
601 603
602 int count = urb->actual_length; 604 int count = urb->actual_length;
603 int count2 = urb->transfer_buffer_length; 605 int count2 = urb->transfer_buffer_length;
@@ -606,7 +608,7 @@ static void kaweth_usb_receive(struct urb *urb)
606 608
607 struct sk_buff *skb; 609 struct sk_buff *skb;
608 610
609 if(unlikely(urb->status == -ECONNRESET || urb->status == -ESHUTDOWN)) 611 if(unlikely(status == -ECONNRESET || status == -ESHUTDOWN))
610 /* we are killed - set a flag and wake the disconnect handler */ 612 /* we are killed - set a flag and wake the disconnect handler */
611 { 613 {
612 kaweth->end = 1; 614 kaweth->end = 1;
@@ -621,10 +623,10 @@ static void kaweth_usb_receive(struct urb *urb)
621 } 623 }
622 spin_unlock(&kaweth->device_lock); 624 spin_unlock(&kaweth->device_lock);
623 625
624 if(urb->status && urb->status != -EREMOTEIO && count != 1) { 626 if(status && status != -EREMOTEIO && count != 1) {
625 err("%s RX status: %d count: %d packet_len: %d", 627 err("%s RX status: %d count: %d packet_len: %d",
626 net->name, 628 net->name,
627 urb->status, 629 status,
628 count, 630 count,
629 (int)pkt_len); 631 (int)pkt_len);
630 kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC); 632 kaweth_resubmit_rx_urb(kaweth, GFP_ATOMIC);
@@ -775,10 +777,11 @@ static void kaweth_usb_transmit_complete(struct urb *urb)
775{ 777{
776 struct kaweth_device *kaweth = urb->context; 778 struct kaweth_device *kaweth = urb->context;
777 struct sk_buff *skb = kaweth->tx_skb; 779 struct sk_buff *skb = kaweth->tx_skb;
780 int status = urb->status;
778 781
779 if (unlikely(urb->status != 0)) 782 if (unlikely(status != 0))
780 if (urb->status != -ENOENT) 783 if (status != -ENOENT)
781 dbg("%s: TX status %d.", kaweth->net->name, urb->status); 784 dbg("%s: TX status %d.", kaweth->net->name, status);
782 785
783 netif_wake_queue(kaweth->net); 786 netif_wake_queue(kaweth->net);
784 dev_kfree_skb_irq(skb); 787 dev_kfree_skb_irq(skb);
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index b5143509e8be..5385d66b306e 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -115,10 +115,11 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data)
115static void mcs7830_async_cmd_callback(struct urb *urb) 115static void mcs7830_async_cmd_callback(struct urb *urb)
116{ 116{
117 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context; 117 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
118 int status = urb->status;
118 119
119 if (urb->status < 0) 120 if (status < 0)
120 printk(KERN_DEBUG "%s() failed with %d\n", 121 printk(KERN_DEBUG "%s() failed with %d\n",
121 __func__, urb->status); 122 __func__, status);
122 123
123 kfree(req); 124 kfree(req);
124 usb_free_urb(urb); 125 usb_free_urb(urb);
@@ -344,14 +345,14 @@ out:
344static int mcs7830_mdio_read(struct net_device *netdev, int phy_id, 345static int mcs7830_mdio_read(struct net_device *netdev, int phy_id,
345 int location) 346 int location)
346{ 347{
347 struct usbnet *dev = netdev->priv; 348 struct usbnet *dev = netdev_priv(netdev);
348 return mcs7830_read_phy(dev, location); 349 return mcs7830_read_phy(dev, location);
349} 350}
350 351
351static void mcs7830_mdio_write(struct net_device *netdev, int phy_id, 352static void mcs7830_mdio_write(struct net_device *netdev, int phy_id,
352 int location, int val) 353 int location, int val)
353{ 354{
354 struct usbnet *dev = netdev->priv; 355 struct usbnet *dev = netdev_priv(netdev);
355 mcs7830_write_phy(dev, location, val); 356 mcs7830_write_phy(dev, location, val);
356} 357}
357 358
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 7914867110ed..166880c113d6 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -99,11 +99,12 @@ static int update_eth_regs_async(pegasus_t *);
99static void ctrl_callback(struct urb *urb) 99static void ctrl_callback(struct urb *urb)
100{ 100{
101 pegasus_t *pegasus = urb->context; 101 pegasus_t *pegasus = urb->context;
102 int status = urb->status;
102 103
103 if (!pegasus) 104 if (!pegasus)
104 return; 105 return;
105 106
106 switch (urb->status) { 107 switch (status) {
107 case 0: 108 case 0:
108 if (pegasus->flags & ETH_REGS_CHANGE) { 109 if (pegasus->flags & ETH_REGS_CHANGE) {
109 pegasus->flags &= ~ETH_REGS_CHANGE; 110 pegasus->flags &= ~ETH_REGS_CHANGE;
@@ -119,7 +120,7 @@ static void ctrl_callback(struct urb *urb)
119 default: 120 default:
120 if (netif_msg_drv(pegasus) && printk_ratelimit()) 121 if (netif_msg_drv(pegasus) && printk_ratelimit())
121 dev_dbg(&pegasus->intf->dev, "%s, status %d\n", 122 dev_dbg(&pegasus->intf->dev, "%s, status %d\n",
122 __func__, urb->status); 123 __func__, status);
123 } 124 }
124 pegasus->flags &= ~ETH_REGS_CHANGED; 125 pegasus->flags &= ~ETH_REGS_CHANGED;
125 wake_up(&pegasus->ctrl_wait); 126 wake_up(&pegasus->ctrl_wait);
@@ -611,6 +612,7 @@ static void read_bulk_callback(struct urb *urb)
611 pegasus_t *pegasus = urb->context; 612 pegasus_t *pegasus = urb->context;
612 struct net_device *net; 613 struct net_device *net;
613 int rx_status, count = urb->actual_length; 614 int rx_status, count = urb->actual_length;
615 int status = urb->status;
614 u8 *buf = urb->transfer_buffer; 616 u8 *buf = urb->transfer_buffer;
615 __u16 pkt_len; 617 __u16 pkt_len;
616 618
@@ -621,7 +623,7 @@ static void read_bulk_callback(struct urb *urb)
621 if (!netif_device_present(net) || !netif_running(net)) 623 if (!netif_device_present(net) || !netif_running(net))
622 return; 624 return;
623 625
624 switch (urb->status) { 626 switch (status) {
625 case 0: 627 case 0:
626 break; 628 break;
627 case -ETIME: 629 case -ETIME:
@@ -639,11 +641,11 @@ static void read_bulk_callback(struct urb *urb)
639 case -ECONNRESET: 641 case -ECONNRESET:
640 case -ESHUTDOWN: 642 case -ESHUTDOWN:
641 if (netif_msg_ifdown(pegasus)) 643 if (netif_msg_ifdown(pegasus))
642 pr_debug("%s: rx unlink, %d\n", net->name, urb->status); 644 pr_debug("%s: rx unlink, %d\n", net->name, status);
643 return; 645 return;
644 default: 646 default:
645 if (netif_msg_rx_err(pegasus)) 647 if (netif_msg_rx_err(pegasus))
646 pr_debug("%s: RX status %d\n", net->name, urb->status); 648 pr_debug("%s: RX status %d\n", net->name, status);
647 goto goon; 649 goto goon;
648 } 650 }
649 651
@@ -769,6 +771,7 @@ static void write_bulk_callback(struct urb *urb)
769{ 771{
770 pegasus_t *pegasus = urb->context; 772 pegasus_t *pegasus = urb->context;
771 struct net_device *net; 773 struct net_device *net;
774 int status = urb->status;
772 775
773 if (!pegasus) 776 if (!pegasus)
774 return; 777 return;
@@ -778,7 +781,7 @@ static void write_bulk_callback(struct urb *urb)
778 if (!netif_device_present(net) || !netif_running(net)) 781 if (!netif_device_present(net) || !netif_running(net))
779 return; 782 return;
780 783
781 switch (urb->status) { 784 switch (status) {
782 case -EPIPE: 785 case -EPIPE:
783 /* FIXME schedule_work() to clear the tx halt */ 786 /* FIXME schedule_work() to clear the tx halt */
784 netif_stop_queue(net); 787 netif_stop_queue(net);
@@ -790,11 +793,11 @@ static void write_bulk_callback(struct urb *urb)
790 case -ECONNRESET: 793 case -ECONNRESET:
791 case -ESHUTDOWN: 794 case -ESHUTDOWN:
792 if (netif_msg_ifdown(pegasus)) 795 if (netif_msg_ifdown(pegasus))
793 pr_debug("%s: tx unlink, %d\n", net->name, urb->status); 796 pr_debug("%s: tx unlink, %d\n", net->name, status);
794 return; 797 return;
795 default: 798 default:
796 if (netif_msg_tx_err(pegasus)) 799 if (netif_msg_tx_err(pegasus))
797 pr_info("%s: TX status %d\n", net->name, urb->status); 800 pr_info("%s: TX status %d\n", net->name, status);
798 /* FALL THROUGH */ 801 /* FALL THROUGH */
799 case 0: 802 case 0:
800 break; 803 break;
@@ -808,13 +811,13 @@ static void intr_callback(struct urb *urb)
808{ 811{
809 pegasus_t *pegasus = urb->context; 812 pegasus_t *pegasus = urb->context;
810 struct net_device *net; 813 struct net_device *net;
811 int status; 814 int res, status = urb->status;
812 815
813 if (!pegasus) 816 if (!pegasus)
814 return; 817 return;
815 net = pegasus->net; 818 net = pegasus->net;
816 819
817 switch (urb->status) { 820 switch (status) {
818 case 0: 821 case 0:
819 break; 822 break;
820 case -ECONNRESET: /* unlink */ 823 case -ECONNRESET: /* unlink */
@@ -827,7 +830,7 @@ static void intr_callback(struct urb *urb)
827 */ 830 */
828 if (netif_msg_timer(pegasus)) 831 if (netif_msg_timer(pegasus))
829 pr_debug("%s: intr status %d\n", net->name, 832 pr_debug("%s: intr status %d\n", net->name,
830 urb->status); 833 status);
831 } 834 }
832 835
833 if (urb->actual_length >= 6) { 836 if (urb->actual_length >= 6) {
@@ -854,12 +857,12 @@ static void intr_callback(struct urb *urb)
854 pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4]; 857 pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4];
855 } 858 }
856 859
857 status = usb_submit_urb(urb, GFP_ATOMIC); 860 res = usb_submit_urb(urb, GFP_ATOMIC);
858 if (status == -ENODEV) 861 if (res == -ENODEV)
859 netif_device_detach(pegasus->net); 862 netif_device_detach(pegasus->net);
860 if (status && netif_msg_timer(pegasus)) 863 if (res && netif_msg_timer(pegasus))
861 printk(KERN_ERR "%s: can't resubmit interrupt urb, %d\n", 864 printk(KERN_ERR "%s: can't resubmit interrupt urb, %d\n",
862 net->name, status); 865 net->name, res);
863} 866}
864 867
865static void pegasus_tx_timeout(struct net_device *net) 868static void pegasus_tx_timeout(struct net_device *net)
@@ -1213,7 +1216,7 @@ static void pegasus_set_multicast(struct net_device *net)
1213 pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST; 1216 pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
1214 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; 1217 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
1215 if (netif_msg_link(pegasus)) 1218 if (netif_msg_link(pegasus))
1216 pr_info("%s: set allmulti\n", net->name); 1219 pr_debug("%s: set allmulti\n", net->name);
1217 } else { 1220 } else {
1218 pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST; 1221 pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST;
1219 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; 1222 pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
@@ -1273,6 +1276,7 @@ static inline void setup_pegasus_II(pegasus_t * pegasus)
1273} 1276}
1274 1277
1275 1278
1279static int pegasus_count;
1276static struct workqueue_struct *pegasus_workqueue = NULL; 1280static struct workqueue_struct *pegasus_workqueue = NULL;
1277#define CARRIER_CHECK_DELAY (2 * HZ) 1281#define CARRIER_CHECK_DELAY (2 * HZ)
1278 1282
@@ -1301,6 +1305,18 @@ static int pegasus_blacklisted(struct usb_device *udev)
1301 return 0; 1305 return 0;
1302} 1306}
1303 1307
1308/* we rely on probe() and remove() being serialized so we
1309 * don't need extra locking on pegasus_count.
1310 */
1311static void pegasus_dec_workqueue(void)
1312{
1313 pegasus_count--;
1314 if (pegasus_count == 0) {
1315 destroy_workqueue(pegasus_workqueue);
1316 pegasus_workqueue = NULL;
1317 }
1318}
1319
1304static int pegasus_probe(struct usb_interface *intf, 1320static int pegasus_probe(struct usb_interface *intf,
1305 const struct usb_device_id *id) 1321 const struct usb_device_id *id)
1306{ 1322{
@@ -1309,14 +1325,18 @@ static int pegasus_probe(struct usb_interface *intf,
1309 pegasus_t *pegasus; 1325 pegasus_t *pegasus;
1310 int dev_index = id - pegasus_ids; 1326 int dev_index = id - pegasus_ids;
1311 int res = -ENOMEM; 1327 int res = -ENOMEM;
1312 DECLARE_MAC_BUF(mac);
1313 1328
1314 usb_get_dev(dev); 1329 if (pegasus_blacklisted(dev))
1330 return -ENODEV;
1315 1331
1316 if (pegasus_blacklisted(dev)) { 1332 if (pegasus_count == 0) {
1317 res = -ENODEV; 1333 pegasus_workqueue = create_singlethread_workqueue("pegasus");
1318 goto out; 1334 if (!pegasus_workqueue)
1335 return -ENOMEM;
1319 } 1336 }
1337 pegasus_count++;
1338
1339 usb_get_dev(dev);
1320 1340
1321 net = alloc_etherdev(sizeof(struct pegasus)); 1341 net = alloc_etherdev(sizeof(struct pegasus));
1322 if (!net) { 1342 if (!net) {
@@ -1386,10 +1406,10 @@ static int pegasus_probe(struct usb_interface *intf,
1386 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, 1406 queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
1387 CARRIER_CHECK_DELAY); 1407 CARRIER_CHECK_DELAY);
1388 1408
1389 dev_info(&intf->dev, "%s, %s, %s\n", 1409 dev_info(&intf->dev, "%s, %s, %pM\n",
1390 net->name, 1410 net->name,
1391 usb_dev_id[dev_index].name, 1411 usb_dev_id[dev_index].name,
1392 print_mac(mac, net->dev_addr)); 1412 net->dev_addr);
1393 return 0; 1413 return 0;
1394 1414
1395out3: 1415out3:
@@ -1401,6 +1421,7 @@ out1:
1401 free_netdev(net); 1421 free_netdev(net);
1402out: 1422out:
1403 usb_put_dev(dev); 1423 usb_put_dev(dev);
1424 pegasus_dec_workqueue();
1404 return res; 1425 return res;
1405} 1426}
1406 1427
@@ -1426,6 +1447,7 @@ static void pegasus_disconnect(struct usb_interface *intf)
1426 pegasus->rx_skb = NULL; 1447 pegasus->rx_skb = NULL;
1427 } 1448 }
1428 free_netdev(pegasus->net); 1449 free_netdev(pegasus->net);
1450 pegasus_dec_workqueue();
1429} 1451}
1430 1452
1431static int pegasus_suspend (struct usb_interface *intf, pm_message_t message) 1453static int pegasus_suspend (struct usb_interface *intf, pm_message_t message)
@@ -1469,7 +1491,7 @@ static struct usb_driver pegasus_driver = {
1469 .resume = pegasus_resume, 1491 .resume = pegasus_resume,
1470}; 1492};
1471 1493
1472static void parse_id(char *id) 1494static void __init parse_id(char *id)
1473{ 1495{
1474 unsigned int vendor_id=0, device_id=0, flags=0, i=0; 1496 unsigned int vendor_id=0, device_id=0, flags=0, i=0;
1475 char *token, *name=NULL; 1497 char *token, *name=NULL;
@@ -1505,15 +1527,11 @@ static int __init pegasus_init(void)
1505 pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION); 1527 pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION);
1506 if (devid) 1528 if (devid)
1507 parse_id(devid); 1529 parse_id(devid);
1508 pegasus_workqueue = create_singlethread_workqueue("pegasus");
1509 if (!pegasus_workqueue)
1510 return -ENOMEM;
1511 return usb_register(&pegasus_driver); 1530 return usb_register(&pegasus_driver);
1512} 1531}
1513 1532
1514static void __exit pegasus_exit(void) 1533static void __exit pegasus_exit(void)
1515{ 1534{
1516 destroy_workqueue(pegasus_workqueue);
1517 usb_deregister(&pegasus_driver); 1535 usb_deregister(&pegasus_driver);
1518} 1536}
1519 1537
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 6133401ebc67..d8664bf18c00 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -212,8 +212,9 @@ static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
212static void ctrl_callback(struct urb *urb) 212static void ctrl_callback(struct urb *urb)
213{ 213{
214 rtl8150_t *dev; 214 rtl8150_t *dev;
215 int status = urb->status;
215 216
216 switch (urb->status) { 217 switch (status) {
217 case 0: 218 case 0:
218 break; 219 break;
219 case -EINPROGRESS: 220 case -EINPROGRESS:
@@ -221,7 +222,7 @@ static void ctrl_callback(struct urb *urb)
221 case -ENOENT: 222 case -ENOENT:
222 break; 223 break;
223 default: 224 default:
224 dev_warn(&urb->dev->dev, "ctrl urb status %d\n", urb->status); 225 dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status);
225 } 226 }
226 dev = urb->context; 227 dev = urb->context;
227 clear_bit(RX_REG_SET, &dev->flags); 228 clear_bit(RX_REG_SET, &dev->flags);
@@ -424,7 +425,8 @@ static void read_bulk_callback(struct urb *urb)
424 struct sk_buff *skb; 425 struct sk_buff *skb;
425 struct net_device *netdev; 426 struct net_device *netdev;
426 u16 rx_stat; 427 u16 rx_stat;
427 int status; 428 int status = urb->status;
429 int result;
428 430
429 dev = urb->context; 431 dev = urb->context;
430 if (!dev) 432 if (!dev)
@@ -435,7 +437,7 @@ static void read_bulk_callback(struct urb *urb)
435 if (!netif_device_present(netdev)) 437 if (!netif_device_present(netdev))
436 return; 438 return;
437 439
438 switch (urb->status) { 440 switch (status) {
439 case 0: 441 case 0:
440 break; 442 break;
441 case -ENOENT: 443 case -ENOENT:
@@ -444,7 +446,7 @@ static void read_bulk_callback(struct urb *urb)
444 dev_warn(&urb->dev->dev, "may be reset is needed?..\n"); 446 dev_warn(&urb->dev->dev, "may be reset is needed?..\n");
445 goto goon; 447 goto goon;
446 default: 448 default:
447 dev_warn(&urb->dev->dev, "Rx status %d\n", urb->status); 449 dev_warn(&urb->dev->dev, "Rx status %d\n", status);
448 goto goon; 450 goto goon;
449 } 451 }
450 452
@@ -474,10 +476,10 @@ static void read_bulk_callback(struct urb *urb)
474goon: 476goon:
475 usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), 477 usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
476 dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); 478 dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev);
477 status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC); 479 result = usb_submit_urb(dev->rx_urb, GFP_ATOMIC);
478 if (status == -ENODEV) 480 if (result == -ENODEV)
479 netif_device_detach(dev->netdev); 481 netif_device_detach(dev->netdev);
480 else if (status) { 482 else if (result) {
481 set_bit(RX_URB_FAIL, &dev->flags); 483 set_bit(RX_URB_FAIL, &dev->flags);
482 goto resched; 484 goto resched;
483 } else { 485 } else {
@@ -530,6 +532,7 @@ tlsched:
530static void write_bulk_callback(struct urb *urb) 532static void write_bulk_callback(struct urb *urb)
531{ 533{
532 rtl8150_t *dev; 534 rtl8150_t *dev;
535 int status = urb->status;
533 536
534 dev = urb->context; 537 dev = urb->context;
535 if (!dev) 538 if (!dev)
@@ -537,9 +540,9 @@ static void write_bulk_callback(struct urb *urb)
537 dev_kfree_skb_irq(dev->tx_skb); 540 dev_kfree_skb_irq(dev->tx_skb);
538 if (!netif_device_present(dev->netdev)) 541 if (!netif_device_present(dev->netdev))
539 return; 542 return;
540 if (urb->status) 543 if (status)
541 dev_info(&urb->dev->dev, "%s: Tx status %d\n", 544 dev_info(&urb->dev->dev, "%s: Tx status %d\n",
542 dev->netdev->name, urb->status); 545 dev->netdev->name, status);
543 dev->netdev->trans_start = jiffies; 546 dev->netdev->trans_start = jiffies;
544 netif_wake_queue(dev->netdev); 547 netif_wake_queue(dev->netdev);
545} 548}
@@ -548,12 +551,13 @@ static void intr_callback(struct urb *urb)
548{ 551{
549 rtl8150_t *dev; 552 rtl8150_t *dev;
550 __u8 *d; 553 __u8 *d;
551 int status; 554 int status = urb->status;
555 int res;
552 556
553 dev = urb->context; 557 dev = urb->context;
554 if (!dev) 558 if (!dev)
555 return; 559 return;
556 switch (urb->status) { 560 switch (status) {
557 case 0: /* success */ 561 case 0: /* success */
558 break; 562 break;
559 case -ECONNRESET: /* unlink */ 563 case -ECONNRESET: /* unlink */
@@ -563,7 +567,7 @@ static void intr_callback(struct urb *urb)
563 /* -EPIPE: should clear the halt */ 567 /* -EPIPE: should clear the halt */
564 default: 568 default:
565 dev_info(&urb->dev->dev, "%s: intr status %d\n", 569 dev_info(&urb->dev->dev, "%s: intr status %d\n",
566 dev->netdev->name, urb->status); 570 dev->netdev->name, status);
567 goto resubmit; 571 goto resubmit;
568 } 572 }
569 573
@@ -591,13 +595,13 @@ static void intr_callback(struct urb *urb)
591 } 595 }
592 596
593resubmit: 597resubmit:
594 status = usb_submit_urb (urb, GFP_ATOMIC); 598 res = usb_submit_urb (urb, GFP_ATOMIC);
595 if (status == -ENODEV) 599 if (res == -ENODEV)
596 netif_device_detach(dev->netdev); 600 netif_device_detach(dev->netdev);
597 else if (status) 601 else if (res)
598 err ("can't resubmit intr, %s-%s/input0, status %d", 602 err ("can't resubmit intr, %s-%s/input0, status %d",
599 dev->udev->bus->bus_name, 603 dev->udev->bus->bus_name,
600 dev->udev->devpath, status); 604 dev->udev->devpath, res);
601} 605}
602 606
603static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message) 607static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 51e2f5d7d14e..5574abe29c73 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -31,7 +31,7 @@
31#include "smsc95xx.h" 31#include "smsc95xx.h"
32 32
33#define SMSC_CHIPNAME "smsc95xx" 33#define SMSC_CHIPNAME "smsc95xx"
34#define SMSC_DRIVER_VERSION "1.0.3" 34#define SMSC_DRIVER_VERSION "1.0.4"
35#define HS_USB_PKT_SIZE (512) 35#define HS_USB_PKT_SIZE (512)
36#define FS_USB_PKT_SIZE (64) 36#define FS_USB_PKT_SIZE (64)
37#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE) 37#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE)
@@ -40,15 +40,16 @@
40#define MAX_SINGLE_PACKET_SIZE (2048) 40#define MAX_SINGLE_PACKET_SIZE (2048)
41#define LAN95XX_EEPROM_MAGIC (0x9500) 41#define LAN95XX_EEPROM_MAGIC (0x9500)
42#define EEPROM_MAC_OFFSET (0x01) 42#define EEPROM_MAC_OFFSET (0x01)
43#define DEFAULT_TX_CSUM_ENABLE (true)
43#define DEFAULT_RX_CSUM_ENABLE (true) 44#define DEFAULT_RX_CSUM_ENABLE (true)
44#define SMSC95XX_INTERNAL_PHY_ID (1) 45#define SMSC95XX_INTERNAL_PHY_ID (1)
45#define SMSC95XX_TX_OVERHEAD (8) 46#define SMSC95XX_TX_OVERHEAD (8)
46#define FLOW_CTRL_TX (1) 47#define SMSC95XX_TX_OVERHEAD_CSUM (12)
47#define FLOW_CTRL_RX (2)
48 48
49struct smsc95xx_priv { 49struct smsc95xx_priv {
50 u32 mac_cr; 50 u32 mac_cr;
51 spinlock_t mac_cr_lock; 51 spinlock_t mac_cr_lock;
52 bool use_tx_csum;
52 bool use_rx_csum; 53 bool use_rx_csum;
53}; 54};
54 55
@@ -310,9 +311,10 @@ static void smsc95xx_async_cmd_callback(struct urb *urb, struct pt_regs *regs)
310{ 311{
311 struct usb_context *usb_context = urb->context; 312 struct usb_context *usb_context = urb->context;
312 struct usbnet *dev = usb_context->dev; 313 struct usbnet *dev = usb_context->dev;
314 int status = urb->status;
313 315
314 if (urb->status < 0) 316 if (status < 0)
315 devwarn(dev, "async callback failed with %d", urb->status); 317 devwarn(dev, "async callback failed with %d", status);
316 318
317 complete(&usb_context->notify); 319 complete(&usb_context->notify);
318 320
@@ -434,28 +436,6 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
434 smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); 436 smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr);
435} 437}
436 438
437static u8 smsc95xx_resolve_flowctrl_fulldplx(u16 lcladv, u16 rmtadv)
438{
439 u8 cap = 0;
440
441 if (lcladv & ADVERTISE_PAUSE_CAP) {
442 if (lcladv & ADVERTISE_PAUSE_ASYM) {
443 if (rmtadv & LPA_PAUSE_CAP)
444 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
445 else if (rmtadv & LPA_PAUSE_ASYM)
446 cap = FLOW_CTRL_RX;
447 } else {
448 if (rmtadv & LPA_PAUSE_CAP)
449 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
450 }
451 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
452 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
453 cap = FLOW_CTRL_TX;
454 }
455
456 return cap;
457}
458
459static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, 439static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
460 u16 lcladv, u16 rmtadv) 440 u16 lcladv, u16 rmtadv)
461{ 441{
@@ -468,7 +448,7 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
468 } 448 }
469 449
470 if (duplex == DUPLEX_FULL) { 450 if (duplex == DUPLEX_FULL) {
471 u8 cap = smsc95xx_resolve_flowctrl_fulldplx(lcladv, rmtadv); 451 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
472 452
473 if (cap & FLOW_CTRL_RX) 453 if (cap & FLOW_CTRL_RX)
474 flow = 0xFFFF0002; 454 flow = 0xFFFF0002;
@@ -556,9 +536,10 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
556 devwarn(dev, "unexpected interrupt, intdata=0x%08X", intdata); 536 devwarn(dev, "unexpected interrupt, intdata=0x%08X", intdata);
557} 537}
558 538
559/* Enable or disable Rx checksum offload engine */ 539/* Enable or disable Tx & Rx checksum offload engines */
560static int smsc95xx_set_rx_csum(struct usbnet *dev, bool enable) 540static int smsc95xx_set_csums(struct usbnet *dev)
561{ 541{
542 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
562 u32 read_buf; 543 u32 read_buf;
563 int ret = smsc95xx_read_reg(dev, COE_CR, &read_buf); 544 int ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
564 if (ret < 0) { 545 if (ret < 0) {
@@ -566,7 +547,12 @@ static int smsc95xx_set_rx_csum(struct usbnet *dev, bool enable)
566 return ret; 547 return ret;
567 } 548 }
568 549
569 if (enable) 550 if (pdata->use_tx_csum)
551 read_buf |= Tx_COE_EN_;
552 else
553 read_buf &= ~Tx_COE_EN_;
554
555 if (pdata->use_rx_csum)
570 read_buf |= Rx_COE_EN_; 556 read_buf |= Rx_COE_EN_;
571 else 557 else
572 read_buf &= ~Rx_COE_EN_; 558 read_buf &= ~Rx_COE_EN_;
@@ -626,7 +612,26 @@ static int smsc95xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val)
626 612
627 pdata->use_rx_csum = !!val; 613 pdata->use_rx_csum = !!val;
628 614
629 return smsc95xx_set_rx_csum(dev, pdata->use_rx_csum); 615 return smsc95xx_set_csums(dev);
616}
617
618static u32 smsc95xx_ethtool_get_tx_csum(struct net_device *netdev)
619{
620 struct usbnet *dev = netdev_priv(netdev);
621 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
622
623 return pdata->use_tx_csum;
624}
625
626static int smsc95xx_ethtool_set_tx_csum(struct net_device *netdev, u32 val)
627{
628 struct usbnet *dev = netdev_priv(netdev);
629 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
630
631 pdata->use_tx_csum = !!val;
632
633 ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum);
634 return smsc95xx_set_csums(dev);
630} 635}
631 636
632static struct ethtool_ops smsc95xx_ethtool_ops = { 637static struct ethtool_ops smsc95xx_ethtool_ops = {
@@ -640,6 +645,8 @@ static struct ethtool_ops smsc95xx_ethtool_ops = {
640 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len, 645 .get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
641 .get_eeprom = smsc95xx_ethtool_get_eeprom, 646 .get_eeprom = smsc95xx_ethtool_get_eeprom,
642 .set_eeprom = smsc95xx_ethtool_set_eeprom, 647 .set_eeprom = smsc95xx_ethtool_set_eeprom,
648 .get_tx_csum = smsc95xx_ethtool_get_tx_csum,
649 .set_tx_csum = smsc95xx_ethtool_set_tx_csum,
643 .get_rx_csum = smsc95xx_ethtool_get_rx_csum, 650 .get_rx_csum = smsc95xx_ethtool_get_rx_csum,
644 .set_rx_csum = smsc95xx_ethtool_set_rx_csum, 651 .set_rx_csum = smsc95xx_ethtool_set_rx_csum,
645}; 652};
@@ -757,9 +764,9 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
757static int smsc95xx_reset(struct usbnet *dev) 764static int smsc95xx_reset(struct usbnet *dev)
758{ 765{
759 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 766 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
767 struct net_device *netdev = dev->net;
760 u32 read_buf, write_buf, burst_cap; 768 u32 read_buf, write_buf, burst_cap;
761 int ret = 0, timeout; 769 int ret = 0, timeout;
762 DECLARE_MAC_BUF(mac);
763 770
764 if (netif_msg_ifup(dev)) 771 if (netif_msg_ifup(dev))
765 devdbg(dev, "entering smsc95xx_reset"); 772 devdbg(dev, "entering smsc95xx_reset");
@@ -818,8 +825,7 @@ static int smsc95xx_reset(struct usbnet *dev)
818 return ret; 825 return ret;
819 826
820 if (netif_msg_ifup(dev)) 827 if (netif_msg_ifup(dev))
821 devdbg(dev, "MAC Address: %s", 828 devdbg(dev, "MAC Address: %pM", dev->net->dev_addr);
822 print_mac(mac, dev->net->dev_addr));
823 829
824 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 830 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
825 if (ret < 0) { 831 if (ret < 0) {
@@ -970,10 +976,11 @@ static int smsc95xx_reset(struct usbnet *dev)
970 return ret; 976 return ret;
971 } 977 }
972 978
973 /* Enable or disable Rx checksum offload engine */ 979 /* Enable or disable checksum offload engines */
974 ret = smsc95xx_set_rx_csum(dev, pdata->use_rx_csum); 980 ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum);
981 ret = smsc95xx_set_csums(dev);
975 if (ret < 0) { 982 if (ret < 0) {
976 devwarn(dev, "Failed to set Rx csum offload: %d", ret); 983 devwarn(dev, "Failed to set csum offload: %d", ret);
977 return ret; 984 return ret;
978 } 985 }
979 986
@@ -1029,6 +1036,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1029 1036
1030 spin_lock_init(&pdata->mac_cr_lock); 1037 spin_lock_init(&pdata->mac_cr_lock);
1031 1038
1039 pdata->use_tx_csum = DEFAULT_TX_CSUM_ENABLE;
1032 pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE; 1040 pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE;
1033 1041
1034 /* Init all registers */ 1042 /* Init all registers */
@@ -1148,22 +1156,44 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1148 return 1; 1156 return 1;
1149} 1157}
1150 1158
1159static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
1160{
1161 int len = skb->data - skb->head;
1162 u16 high_16 = (u16)(skb->csum_offset + skb->csum_start - len);
1163 u16 low_16 = (u16)(skb->csum_start - len);
1164 return (high_16 << 16) | low_16;
1165}
1166
1151static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, 1167static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
1152 struct sk_buff *skb, gfp_t flags) 1168 struct sk_buff *skb, gfp_t flags)
1153{ 1169{
1170 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1171 bool csum = pdata->use_tx_csum && (skb->ip_summed == CHECKSUM_PARTIAL);
1172 int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD;
1154 u32 tx_cmd_a, tx_cmd_b; 1173 u32 tx_cmd_a, tx_cmd_b;
1155 1174
1156 if (skb_headroom(skb) < SMSC95XX_TX_OVERHEAD) { 1175 /* We do not advertise SG, so skbs should be already linearized */
1176 BUG_ON(skb_shinfo(skb)->nr_frags);
1177
1178 if (skb_headroom(skb) < overhead) {
1157 struct sk_buff *skb2 = skb_copy_expand(skb, 1179 struct sk_buff *skb2 = skb_copy_expand(skb,
1158 SMSC95XX_TX_OVERHEAD, 0, flags); 1180 overhead, 0, flags);
1159 dev_kfree_skb_any(skb); 1181 dev_kfree_skb_any(skb);
1160 skb = skb2; 1182 skb = skb2;
1161 if (!skb) 1183 if (!skb)
1162 return NULL; 1184 return NULL;
1163 } 1185 }
1164 1186
1187 if (csum) {
1188 u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
1189 skb_push(skb, 4);
1190 memcpy(skb->data, &csum_preamble, 4);
1191 }
1192
1165 skb_push(skb, 4); 1193 skb_push(skb, 4);
1166 tx_cmd_b = (u32)(skb->len - 4); 1194 tx_cmd_b = (u32)(skb->len - 4);
1195 if (csum)
1196 tx_cmd_b |= TX_CMD_B_CSUM_ENABLE;
1167 cpu_to_le32s(&tx_cmd_b); 1197 cpu_to_le32s(&tx_cmd_b);
1168 memcpy(skb->data, &tx_cmd_b, 4); 1198 memcpy(skb->data, &tx_cmd_b, 4);
1169 1199
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 02d25c743994..aa3149078888 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1125,7 +1125,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1125 struct usb_device *xdev; 1125 struct usb_device *xdev;
1126 int status; 1126 int status;
1127 const char *name; 1127 const char *name;
1128 DECLARE_MAC_BUF(mac);
1129 1128
1130 name = udev->dev.driver->name; 1129 name = udev->dev.driver->name;
1131 info = (struct driver_info *) prod->driver_info; 1130 info = (struct driver_info *) prod->driver_info;
@@ -1236,11 +1235,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1236 if (status) 1235 if (status)
1237 goto out3; 1236 goto out3;
1238 if (netif_msg_probe (dev)) 1237 if (netif_msg_probe (dev))
1239 devinfo (dev, "register '%s' at usb-%s-%s, %s, %s", 1238 devinfo (dev, "register '%s' at usb-%s-%s, %s, %pM",
1240 udev->dev.driver->name, 1239 udev->dev.driver->name,
1241 xdev->bus->bus_name, xdev->devpath, 1240 xdev->bus->bus_name, xdev->devpath,
1242 dev->driver_info->description, 1241 dev->driver_info->description,
1243 print_mac(mac, net->dev_addr)); 1242 net->dev_addr);
1244 1243
1245 // ok, it's ready to go. 1244 // ok, it's ready to go.
1246 usb_set_intfdata (udev, dev); 1245 usb_set_intfdata (udev, dev);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 31cd817f33f9..852d0e7c4e62 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -8,7 +8,6 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/list.h>
12#include <linux/netdevice.h> 11#include <linux/netdevice.h>
13#include <linux/ethtool.h> 12#include <linux/ethtool.h>
14#include <linux/etherdevice.h> 13#include <linux/etherdevice.h>
@@ -30,14 +29,10 @@ struct veth_net_stats {
30 29
31struct veth_priv { 30struct veth_priv {
32 struct net_device *peer; 31 struct net_device *peer;
33 struct net_device *dev;
34 struct list_head list;
35 struct veth_net_stats *stats; 32 struct veth_net_stats *stats;
36 unsigned ip_summed; 33 unsigned ip_summed;
37}; 34};
38 35
39static LIST_HEAD(veth_list);
40
41/* 36/*
42 * ethtool interface 37 * ethtool interface
43 */ 38 */
@@ -267,16 +262,20 @@ static void veth_dev_free(struct net_device *dev)
267 free_netdev(dev); 262 free_netdev(dev);
268} 263}
269 264
265static const struct net_device_ops veth_netdev_ops = {
266 .ndo_init = veth_dev_init,
267 .ndo_open = veth_open,
268 .ndo_start_xmit = veth_xmit,
269 .ndo_get_stats = veth_get_stats,
270};
271
270static void veth_setup(struct net_device *dev) 272static void veth_setup(struct net_device *dev)
271{ 273{
272 ether_setup(dev); 274 ether_setup(dev);
273 275
274 dev->hard_start_xmit = veth_xmit; 276 dev->netdev_ops = &veth_netdev_ops;
275 dev->get_stats = veth_get_stats;
276 dev->open = veth_open;
277 dev->ethtool_ops = &veth_ethtool_ops; 277 dev->ethtool_ops = &veth_ethtool_ops;
278 dev->features |= NETIF_F_LLTX; 278 dev->features |= NETIF_F_LLTX;
279 dev->init = veth_dev_init;
280 dev->destructor = veth_dev_free; 279 dev->destructor = veth_dev_free;
281} 280}
282 281
@@ -302,7 +301,7 @@ static int veth_device_event(struct notifier_block *unused,
302{ 301{
303 struct net_device *dev = ptr; 302 struct net_device *dev = ptr;
304 303
305 if (dev->open != veth_open) 304 if (dev->netdev_ops->ndo_open != veth_open)
306 goto out; 305 goto out;
307 306
308 switch (event) { 307 switch (event) {
@@ -420,14 +419,10 @@ static int veth_newlink(struct net_device *dev,
420 */ 419 */
421 420
422 priv = netdev_priv(dev); 421 priv = netdev_priv(dev);
423 priv->dev = dev;
424 priv->peer = peer; 422 priv->peer = peer;
425 list_add(&priv->list, &veth_list);
426 423
427 priv = netdev_priv(peer); 424 priv = netdev_priv(peer);
428 priv->dev = peer;
429 priv->peer = dev; 425 priv->peer = dev;
430 INIT_LIST_HEAD(&priv->list);
431 return 0; 426 return 0;
432 427
433err_register_dev: 428err_register_dev:
@@ -449,13 +444,6 @@ static void veth_dellink(struct net_device *dev)
449 priv = netdev_priv(dev); 444 priv = netdev_priv(dev);
450 peer = priv->peer; 445 peer = priv->peer;
451 446
452 if (!list_empty(&priv->list))
453 list_del(&priv->list);
454
455 priv = netdev_priv(peer);
456 if (!list_empty(&priv->list))
457 list_del(&priv->list);
458
459 unregister_netdevice(dev); 447 unregister_netdevice(dev);
460 unregister_netdevice(peer); 448 unregister_netdevice(peer);
461} 449}
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 5b7870080c56..ac07cc6e3cb2 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -191,12 +191,13 @@ IIId. Synchronization
191 191
192The driver runs as two independent, single-threaded flows of control. One 192The driver runs as two independent, single-threaded flows of control. One
193is the send-packet routine, which enforces single-threaded use by the 193is the send-packet routine, which enforces single-threaded use by the
194dev->priv->lock spinlock. The other thread is the interrupt handler, which 194netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
195is single threaded by the hardware and interrupt handling software. 195which is single threaded by the hardware and interrupt handling software.
196 196
197The send packet thread has partial control over the Tx ring. It locks the 197The send packet thread has partial control over the Tx ring. It locks the
198dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring 198netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
199is not available it stops the transmit queue by calling netif_stop_queue. 199the ring is not available it stops the transmit queue by
200calling netif_stop_queue.
200 201
201The interrupt handler has exclusive control over the Rx ring and records stats 202The interrupt handler has exclusive control over the Rx ring and records stats
202from the Tx ring. After reaping the stats, it marks the Tx queue entry as 203from the Tx ring. After reaping the stats, it marks the Tx queue entry as
@@ -588,7 +589,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
588 work_done = rhine_rx(dev, budget); 589 work_done = rhine_rx(dev, budget);
589 590
590 if (work_done < budget) { 591 if (work_done < budget) {
591 netif_rx_complete(dev, napi); 592 netif_rx_complete(napi);
592 593
593 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 594 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
594 IntrRxDropped | IntrRxNoBuf | IntrTxAborted | 595 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
@@ -614,6 +615,20 @@ static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
614 rhine_reload_eeprom(pioaddr, dev); 615 rhine_reload_eeprom(pioaddr, dev);
615} 616}
616 617
618static const struct net_device_ops rhine_netdev_ops = {
619 .ndo_open = rhine_open,
620 .ndo_stop = rhine_close,
621 .ndo_start_xmit = rhine_start_tx,
622 .ndo_get_stats = rhine_get_stats,
623 .ndo_set_multicast_list = rhine_set_rx_mode,
624 .ndo_validate_addr = eth_validate_addr,
625 .ndo_do_ioctl = netdev_ioctl,
626 .ndo_tx_timeout = rhine_tx_timeout,
627#ifdef CONFIG_NET_POLL_CONTROLLER
628 .ndo_poll_controller = rhine_poll,
629#endif
630};
631
617static int __devinit rhine_init_one(struct pci_dev *pdev, 632static int __devinit rhine_init_one(struct pci_dev *pdev,
618 const struct pci_device_id *ent) 633 const struct pci_device_id *ent)
619{ 634{
@@ -631,7 +646,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
631#else 646#else
632 int bar = 0; 647 int bar = 0;
633#endif 648#endif
634 DECLARE_MAC_BUF(mac);
635 649
636/* when built into the kernel, we only print version if device is found */ 650/* when built into the kernel, we only print version if device is found */
637#ifndef MODULE 651#ifndef MODULE
@@ -765,18 +779,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
765 rp->mii_if.reg_num_mask = 0x1f; 779 rp->mii_if.reg_num_mask = 0x1f;
766 780
767 /* The chip-specific entries in the device structure. */ 781 /* The chip-specific entries in the device structure. */
768 dev->open = rhine_open; 782 dev->netdev_ops = &rhine_netdev_ops;
769 dev->hard_start_xmit = rhine_start_tx; 783 dev->ethtool_ops = &netdev_ethtool_ops,
770 dev->stop = rhine_close;
771 dev->get_stats = rhine_get_stats;
772 dev->set_multicast_list = rhine_set_rx_mode;
773 dev->do_ioctl = netdev_ioctl;
774 dev->ethtool_ops = &netdev_ethtool_ops;
775 dev->tx_timeout = rhine_tx_timeout;
776 dev->watchdog_timeo = TX_TIMEOUT; 784 dev->watchdog_timeo = TX_TIMEOUT;
777#ifdef CONFIG_NET_POLL_CONTROLLER 785
778 dev->poll_controller = rhine_poll;
779#endif
780 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); 786 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
781 787
782 if (rp->quirks & rqRhineI) 788 if (rp->quirks & rqRhineI)
@@ -787,14 +793,14 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
787 if (rc) 793 if (rc)
788 goto err_out_unmap; 794 goto err_out_unmap;
789 795
790 printk(KERN_INFO "%s: VIA %s at 0x%lx, %s, IRQ %d.\n", 796 printk(KERN_INFO "%s: VIA %s at 0x%lx, %pM, IRQ %d.\n",
791 dev->name, name, 797 dev->name, name,
792#ifdef USE_MMIO 798#ifdef USE_MMIO
793 memaddr, 799 memaddr,
794#else 800#else
795 (long)ioaddr, 801 (long)ioaddr,
796#endif 802#endif
797 print_mac(mac, dev->dev_addr), pdev->irq); 803 dev->dev_addr, pdev->irq);
798 804
799 pci_set_drvdata(pdev, dev); 805 pci_set_drvdata(pdev, dev);
800 806
@@ -1312,7 +1318,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1312 IntrPCIErr | IntrStatsMax | IntrLinkChange, 1318 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1313 ioaddr + IntrEnable); 1319 ioaddr + IntrEnable);
1314 1320
1315 netif_rx_schedule(dev, &rp->napi); 1321 netif_rx_schedule(&rp->napi);
1316 } 1322 }
1317 1323
1318 if (intr_status & (IntrTxErrSummary | IntrTxDone)) { 1324 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
@@ -1505,7 +1511,6 @@ static int rhine_rx(struct net_device *dev, int limit)
1505 } 1511 }
1506 skb->protocol = eth_type_trans(skb, dev); 1512 skb->protocol = eth_type_trans(skb, dev);
1507 netif_receive_skb(skb); 1513 netif_receive_skb(skb);
1508 dev->last_rx = jiffies;
1509 rp->stats.rx_bytes += pkt_len; 1514 rp->stats.rx_bytes += pkt_len;
1510 rp->stats.rx_packets++; 1515 rp->stats.rx_packets++;
1511 } 1516 }
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 11cb3e504e1c..58e25d090ae0 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -849,6 +849,20 @@ static int velocity_soft_reset(struct velocity_info *vptr)
849 return 0; 849 return 0;
850} 850}
851 851
852static const struct net_device_ops velocity_netdev_ops = {
853 .ndo_open = velocity_open,
854 .ndo_stop = velocity_close,
855 .ndo_start_xmit = velocity_xmit,
856 .ndo_get_stats = velocity_get_stats,
857 .ndo_validate_addr = eth_validate_addr,
858 .ndo_set_multicast_list = velocity_set_multi,
859 .ndo_change_mtu = velocity_change_mtu,
860 .ndo_do_ioctl = velocity_ioctl,
861 .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
862 .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
863 .ndo_vlan_rx_register = velocity_vlan_rx_register,
864};
865
852/** 866/**
853 * velocity_found1 - set up discovered velocity card 867 * velocity_found1 - set up discovered velocity card
854 * @pdev: PCI device 868 * @pdev: PCI device
@@ -958,18 +972,8 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
958 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); 972 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
959 973
960 dev->irq = pdev->irq; 974 dev->irq = pdev->irq;
961 dev->open = velocity_open; 975 dev->netdev_ops = &velocity_netdev_ops;
962 dev->hard_start_xmit = velocity_xmit;
963 dev->stop = velocity_close;
964 dev->get_stats = velocity_get_stats;
965 dev->set_multicast_list = velocity_set_multi;
966 dev->do_ioctl = velocity_ioctl;
967 dev->ethtool_ops = &velocity_ethtool_ops; 976 dev->ethtool_ops = &velocity_ethtool_ops;
968 dev->change_mtu = velocity_change_mtu;
969
970 dev->vlan_rx_add_vid = velocity_vlan_rx_add_vid;
971 dev->vlan_rx_kill_vid = velocity_vlan_rx_kill_vid;
972 dev->vlan_rx_register = velocity_vlan_rx_register;
973 977
974#ifdef VELOCITY_ZERO_COPY_SUPPORT 978#ifdef VELOCITY_ZERO_COPY_SUPPORT
975 dev->features |= NETIF_F_SG; 979 dev->features |= NETIF_F_SG;
@@ -1412,8 +1416,6 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
1412 1416
1413 rd->size |= RX_INTEN; 1417 rd->size |= RX_INTEN;
1414 1418
1415 vptr->dev->last_rx = jiffies;
1416
1417 rd_curr++; 1419 rd_curr++;
1418 if (rd_curr >= vptr->options.numrx) 1420 if (rd_curr >= vptr->options.numrx)
1419 rd_curr = 0; 1421 rd_curr = 0;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0196a0df9021..b7004ff36451 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -34,6 +34,7 @@ module_param(gso, bool, 0444);
34 34
35/* FIXME: MTU in config. */ 35/* FIXME: MTU in config. */
36#define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN) 36#define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN)
37#define GOOD_COPY_LEN 128
37 38
38struct virtnet_info 39struct virtnet_info
39{ 40{
@@ -58,6 +59,9 @@ struct virtnet_info
58 /* I like... big packets and I cannot lie! */ 59 /* I like... big packets and I cannot lie! */
59 bool big_packets; 60 bool big_packets;
60 61
62 /* Host will merge rx buffers for big packets (shake it! shake it!) */
63 bool mergeable_rx_bufs;
64
61 /* Receive & send queues. */ 65 /* Receive & send queues. */
62 struct sk_buff_head recv; 66 struct sk_buff_head recv;
63 struct sk_buff_head send; 67 struct sk_buff_head send;
@@ -66,22 +70,27 @@ struct virtnet_info
66 struct page *pages; 70 struct page *pages;
67}; 71};
68 72
69static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb) 73static inline void *skb_vnet_hdr(struct sk_buff *skb)
70{ 74{
71 return (struct virtio_net_hdr *)skb->cb; 75 return (struct virtio_net_hdr *)skb->cb;
72} 76}
73 77
74static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
75{
76 sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
77}
78
79static void give_a_page(struct virtnet_info *vi, struct page *page) 78static void give_a_page(struct virtnet_info *vi, struct page *page)
80{ 79{
81 page->private = (unsigned long)vi->pages; 80 page->private = (unsigned long)vi->pages;
82 vi->pages = page; 81 vi->pages = page;
83} 82}
84 83
84static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
85{
86 unsigned int i;
87
88 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
89 give_a_page(vi, skb_shinfo(skb)->frags[i].page);
90 skb_shinfo(skb)->nr_frags = 0;
91 skb->data_len = 0;
92}
93
85static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) 94static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
86{ 95{
87 struct page *p = vi->pages; 96 struct page *p = vi->pages;
@@ -111,31 +120,97 @@ static void skb_xmit_done(struct virtqueue *svq)
111static void receive_skb(struct net_device *dev, struct sk_buff *skb, 120static void receive_skb(struct net_device *dev, struct sk_buff *skb,
112 unsigned len) 121 unsigned len)
113{ 122{
123 struct virtnet_info *vi = netdev_priv(dev);
114 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 124 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
115 int err; 125 int err;
126 int i;
116 127
117 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 128 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
118 pr_debug("%s: short packet %i\n", dev->name, len); 129 pr_debug("%s: short packet %i\n", dev->name, len);
119 dev->stats.rx_length_errors++; 130 dev->stats.rx_length_errors++;
120 goto drop; 131 goto drop;
121 } 132 }
122 len -= sizeof(struct virtio_net_hdr);
123 133
124 if (len <= MAX_PACKET_LEN) { 134 if (vi->mergeable_rx_bufs) {
125 unsigned int i; 135 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
136 unsigned int copy;
137 char *p = page_address(skb_shinfo(skb)->frags[0].page);
126 138
127 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 139 if (len > PAGE_SIZE)
128 give_a_page(dev->priv, skb_shinfo(skb)->frags[i].page); 140 len = PAGE_SIZE;
129 skb->data_len = 0; 141 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
130 skb_shinfo(skb)->nr_frags = 0;
131 }
132 142
133 err = pskb_trim(skb, len); 143 memcpy(hdr, p, sizeof(*mhdr));
134 if (err) { 144 p += sizeof(*mhdr);
135 pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err); 145
136 dev->stats.rx_dropped++; 146 copy = len;
137 goto drop; 147 if (copy > skb_tailroom(skb))
148 copy = skb_tailroom(skb);
149
150 memcpy(skb_put(skb, copy), p, copy);
151
152 len -= copy;
153
154 if (!len) {
155 give_a_page(vi, skb_shinfo(skb)->frags[0].page);
156 skb_shinfo(skb)->nr_frags--;
157 } else {
158 skb_shinfo(skb)->frags[0].page_offset +=
159 sizeof(*mhdr) + copy;
160 skb_shinfo(skb)->frags[0].size = len;
161 skb->data_len += len;
162 skb->len += len;
163 }
164
165 while (--mhdr->num_buffers) {
166 struct sk_buff *nskb;
167
168 i = skb_shinfo(skb)->nr_frags;
169 if (i >= MAX_SKB_FRAGS) {
170 pr_debug("%s: packet too long %d\n", dev->name,
171 len);
172 dev->stats.rx_length_errors++;
173 goto drop;
174 }
175
176 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
177 if (!nskb) {
178 pr_debug("%s: rx error: %d buffers missing\n",
179 dev->name, mhdr->num_buffers);
180 dev->stats.rx_length_errors++;
181 goto drop;
182 }
183
184 __skb_unlink(nskb, &vi->recv);
185 vi->num--;
186
187 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
188 skb_shinfo(nskb)->nr_frags = 0;
189 kfree_skb(nskb);
190
191 if (len > PAGE_SIZE)
192 len = PAGE_SIZE;
193
194 skb_shinfo(skb)->frags[i].size = len;
195 skb_shinfo(skb)->nr_frags++;
196 skb->data_len += len;
197 skb->len += len;
198 }
199 } else {
200 len -= sizeof(struct virtio_net_hdr);
201
202 if (len <= MAX_PACKET_LEN)
203 trim_pages(vi, skb);
204
205 err = pskb_trim(skb, len);
206 if (err) {
207 pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
208 len, err);
209 dev->stats.rx_dropped++;
210 goto drop;
211 }
138 } 212 }
213
139 skb->truesize += skb->data_len; 214 skb->truesize += skb->data_len;
140 dev->stats.rx_bytes += skb->len; 215 dev->stats.rx_bytes += skb->len;
141 dev->stats.rx_packets++; 216 dev->stats.rx_packets++;
@@ -194,7 +269,7 @@ drop:
194 dev_kfree_skb(skb); 269 dev_kfree_skb(skb);
195} 270}
196 271
197static void try_fill_recv(struct virtnet_info *vi) 272static void try_fill_recv_maxbufs(struct virtnet_info *vi)
198{ 273{
199 struct sk_buff *skb; 274 struct sk_buff *skb;
200 struct scatterlist sg[2+MAX_SKB_FRAGS]; 275 struct scatterlist sg[2+MAX_SKB_FRAGS];
@@ -202,12 +277,16 @@ static void try_fill_recv(struct virtnet_info *vi)
202 277
203 sg_init_table(sg, 2+MAX_SKB_FRAGS); 278 sg_init_table(sg, 2+MAX_SKB_FRAGS);
204 for (;;) { 279 for (;;) {
280 struct virtio_net_hdr *hdr;
281
205 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN); 282 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
206 if (unlikely(!skb)) 283 if (unlikely(!skb))
207 break; 284 break;
208 285
209 skb_put(skb, MAX_PACKET_LEN); 286 skb_put(skb, MAX_PACKET_LEN);
210 vnet_hdr_to_sg(sg, skb); 287
288 hdr = skb_vnet_hdr(skb);
289 sg_init_one(sg, hdr, sizeof(*hdr));
211 290
212 if (vi->big_packets) { 291 if (vi->big_packets) {
213 for (i = 0; i < MAX_SKB_FRAGS; i++) { 292 for (i = 0; i < MAX_SKB_FRAGS; i++) {
@@ -232,6 +311,55 @@ static void try_fill_recv(struct virtnet_info *vi)
232 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); 311 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
233 if (err) { 312 if (err) {
234 skb_unlink(skb, &vi->recv); 313 skb_unlink(skb, &vi->recv);
314 trim_pages(vi, skb);
315 kfree_skb(skb);
316 break;
317 }
318 vi->num++;
319 }
320 if (unlikely(vi->num > vi->max))
321 vi->max = vi->num;
322 vi->rvq->vq_ops->kick(vi->rvq);
323}
324
325static void try_fill_recv(struct virtnet_info *vi)
326{
327 struct sk_buff *skb;
328 struct scatterlist sg[1];
329 int err;
330
331 if (!vi->mergeable_rx_bufs) {
332 try_fill_recv_maxbufs(vi);
333 return;
334 }
335
336 for (;;) {
337 skb_frag_t *f;
338
339 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
340 if (unlikely(!skb))
341 break;
342
343 skb_reserve(skb, NET_IP_ALIGN);
344
345 f = &skb_shinfo(skb)->frags[0];
346 f->page = get_a_page(vi, GFP_ATOMIC);
347 if (!f->page) {
348 kfree_skb(skb);
349 break;
350 }
351
352 f->page_offset = 0;
353 f->size = PAGE_SIZE;
354
355 skb_shinfo(skb)->nr_frags++;
356
357 sg_init_one(sg, page_address(f->page), PAGE_SIZE);
358 skb_queue_head(&vi->recv, skb);
359
360 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
361 if (err) {
362 skb_unlink(skb, &vi->recv);
235 kfree_skb(skb); 363 kfree_skb(skb);
236 break; 364 break;
237 } 365 }
@@ -246,9 +374,9 @@ static void skb_recv_done(struct virtqueue *rvq)
246{ 374{
247 struct virtnet_info *vi = rvq->vdev->priv; 375 struct virtnet_info *vi = rvq->vdev->priv;
248 /* Schedule NAPI, Suppress further interrupts if successful. */ 376 /* Schedule NAPI, Suppress further interrupts if successful. */
249 if (netif_rx_schedule_prep(vi->dev, &vi->napi)) { 377 if (netif_rx_schedule_prep(&vi->napi)) {
250 rvq->vq_ops->disable_cb(rvq); 378 rvq->vq_ops->disable_cb(rvq);
251 __netif_rx_schedule(vi->dev, &vi->napi); 379 __netif_rx_schedule(&vi->napi);
252 } 380 }
253} 381}
254 382
@@ -274,11 +402,11 @@ again:
274 402
275 /* Out of packets? */ 403 /* Out of packets? */
276 if (received < budget) { 404 if (received < budget) {
277 netif_rx_complete(vi->dev, napi); 405 netif_rx_complete(napi);
278 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) 406 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
279 && napi_schedule_prep(napi)) { 407 && napi_schedule_prep(napi)) {
280 vi->rvq->vq_ops->disable_cb(vi->rvq); 408 vi->rvq->vq_ops->disable_cb(vi->rvq);
281 __netif_rx_schedule(vi->dev, napi); 409 __netif_rx_schedule(napi);
282 goto again; 410 goto again;
283 } 411 }
284 } 412 }
@@ -320,17 +448,14 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
320{ 448{
321 int num, err; 449 int num, err;
322 struct scatterlist sg[2+MAX_SKB_FRAGS]; 450 struct scatterlist sg[2+MAX_SKB_FRAGS];
323 struct virtio_net_hdr *hdr; 451 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
452 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
324 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 453 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
325 454
326 sg_init_table(sg, 2+MAX_SKB_FRAGS); 455 sg_init_table(sg, 2+MAX_SKB_FRAGS);
327 456
328 pr_debug("%s: xmit %p " MAC_FMT "\n", vi->dev->name, skb, 457 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
329 dest[0], dest[1], dest[2],
330 dest[3], dest[4], dest[5]);
331 458
332 /* Encode metadata header at front. */
333 hdr = skb_vnet_hdr(skb);
334 if (skb->ip_summed == CHECKSUM_PARTIAL) { 459 if (skb->ip_summed == CHECKSUM_PARTIAL) {
335 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 460 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
336 hdr->csum_start = skb->csum_start - skb_headroom(skb); 461 hdr->csum_start = skb->csum_start - skb_headroom(skb);
@@ -358,7 +483,14 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
358 hdr->gso_size = hdr->hdr_len = 0; 483 hdr->gso_size = hdr->hdr_len = 0;
359 } 484 }
360 485
361 vnet_hdr_to_sg(sg, skb); 486 mhdr->num_buffers = 0;
487
488 /* Encode metadata header at front. */
489 if (vi->mergeable_rx_bufs)
490 sg_init_one(sg, mhdr, sizeof(*mhdr));
491 else
492 sg_init_one(sg, hdr, sizeof(*hdr));
493
362 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 494 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
363 495
364 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); 496 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
@@ -448,9 +580,9 @@ static int virtnet_open(struct net_device *dev)
448 * won't get another interrupt, so process any outstanding packets 580 * won't get another interrupt, so process any outstanding packets
449 * now. virtnet_poll wants re-enable the queue, so we disable here. 581 * now. virtnet_poll wants re-enable the queue, so we disable here.
450 * We synchronize against interrupts via NAPI_STATE_SCHED */ 582 * We synchronize against interrupts via NAPI_STATE_SCHED */
451 if (netif_rx_schedule_prep(dev, &vi->napi)) { 583 if (netif_rx_schedule_prep(&vi->napi)) {
452 vi->rvq->vq_ops->disable_cb(vi->rvq); 584 vi->rvq->vq_ops->disable_cb(vi->rvq);
453 __netif_rx_schedule(dev, &vi->napi); 585 __netif_rx_schedule(&vi->napi);
454 } 586 }
455 return 0; 587 return 0;
456} 588}
@@ -478,8 +610,20 @@ static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
478static struct ethtool_ops virtnet_ethtool_ops = { 610static struct ethtool_ops virtnet_ethtool_ops = {
479 .set_tx_csum = virtnet_set_tx_csum, 611 .set_tx_csum = virtnet_set_tx_csum,
480 .set_sg = ethtool_op_set_sg, 612 .set_sg = ethtool_op_set_sg,
613 .set_tso = ethtool_op_set_tso,
481}; 614};
482 615
616#define MIN_MTU 68
617#define MAX_MTU 65535
618
619static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
620{
621 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
622 return -EINVAL;
623 dev->mtu = new_mtu;
624 return 0;
625}
626
483static int virtnet_probe(struct virtio_device *vdev) 627static int virtnet_probe(struct virtio_device *vdev)
484{ 628{
485 int err; 629 int err;
@@ -495,6 +639,7 @@ static int virtnet_probe(struct virtio_device *vdev)
495 dev->open = virtnet_open; 639 dev->open = virtnet_open;
496 dev->stop = virtnet_close; 640 dev->stop = virtnet_close;
497 dev->hard_start_xmit = start_xmit; 641 dev->hard_start_xmit = start_xmit;
642 dev->change_mtu = virtnet_change_mtu;
498 dev->features = NETIF_F_HIGHDMA; 643 dev->features = NETIF_F_HIGHDMA;
499#ifdef CONFIG_NET_POLL_CONTROLLER 644#ifdef CONFIG_NET_POLL_CONTROLLER
500 dev->poll_controller = virtnet_netpoll; 645 dev->poll_controller = virtnet_netpoll;
@@ -547,6 +692,9 @@ static int virtnet_probe(struct virtio_device *vdev)
547 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 692 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
548 vi->big_packets = true; 693 vi->big_packets = true;
549 694
695 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
696 vi->mergeable_rx_bufs = true;
697
550 /* We expect two virtqueues, receive then send. */ 698 /* We expect two virtqueues, receive then send. */
551 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); 699 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
552 if (IS_ERR(vi->rvq)) { 700 if (IS_ERR(vi->rvq)) {
@@ -639,6 +787,7 @@ static unsigned int features[] = {
639 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 787 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
640 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 788 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
641 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */ 789 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
790 VIRTIO_NET_F_MRG_RXBUF,
642 VIRTIO_F_NOTIFY_ON_EMPTY, 791 VIRTIO_F_NOTIFY_ON_EMPTY,
643}; 792};
644 793
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 21efd99b9294..d08ce6a264cb 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -207,6 +207,8 @@ config PC300
207 tristate "Cyclades-PC300 support (RS-232/V.35, X.21, T1/E1 boards)" 207 tristate "Cyclades-PC300 support (RS-232/V.35, X.21, T1/E1 boards)"
208 depends on HDLC && PCI && BROKEN 208 depends on HDLC && PCI && BROKEN
209 ---help--- 209 ---help---
210 This driver is broken because of struct tty_driver change.
211
210 Driver for the Cyclades-PC300 synchronous communication boards. 212 Driver for the Cyclades-PC300 synchronous communication boards.
211 213
212 These boards provide synchronous serial interfaces to your 214 These boards provide synchronous serial interfaces to your
@@ -333,6 +335,13 @@ config DSCC4_PCI_RST
333 335
334 Say Y if your card supports this feature. 336 Say Y if your card supports this feature.
335 337
338config IXP4XX_HSS
339 tristate "Intel IXP4xx HSS (synchronous serial port) support"
340 depends on HDLC && ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
341 help
342 Say Y here if you want to use built-in HSS ports
343 on IXP4xx processor.
344
336config DLCI 345config DLCI
337 tristate "Frame Relay DLCI support" 346 tristate "Frame Relay DLCI support"
338 ---help--- 347 ---help---
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index 102549605d09..19d14bc28356 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_HDLC_RAW) += hdlc_raw.o
14obj-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o 14obj-$(CONFIG_HDLC_RAW_ETH) += hdlc_raw_eth.o
15obj-$(CONFIG_HDLC_CISCO) += hdlc_cisco.o 15obj-$(CONFIG_HDLC_CISCO) += hdlc_cisco.o
16obj-$(CONFIG_HDLC_FR) += hdlc_fr.o 16obj-$(CONFIG_HDLC_FR) += hdlc_fr.o
17obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o syncppp.o 17obj-$(CONFIG_HDLC_PPP) += hdlc_ppp.o
18obj-$(CONFIG_HDLC_X25) += hdlc_x25.o 18obj-$(CONFIG_HDLC_X25) += hdlc_x25.o
19 19
20pc300-y := pc300_drv.o 20pc300-y := pc300_drv.o
@@ -41,6 +41,7 @@ obj-$(CONFIG_C101) += c101.o
41obj-$(CONFIG_WANXL) += wanxl.o 41obj-$(CONFIG_WANXL) += wanxl.o
42obj-$(CONFIG_PCI200SYN) += pci200syn.o 42obj-$(CONFIG_PCI200SYN) += pci200syn.o
43obj-$(CONFIG_PC300TOO) += pc300too.o 43obj-$(CONFIG_PC300TOO) += pc300too.o
44obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o
44 45
45clean-files := wanxlfw.inc 46clean-files := wanxlfw.inc
46$(obj)/wanxl.o: $(obj)/wanxlfw.inc 47$(obj)/wanxl.o: $(obj)/wanxlfw.inc
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index c8e563106a4a..b46897996f7e 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -88,7 +88,7 @@ static card_t **new_card = &first_card;
88/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */ 88/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */
89#define sca_outw(value, reg, card) do { \ 89#define sca_outw(value, reg, card) do { \
90 writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \ 90 writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \
91 writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg+1));\ 91 writeb((value >> 8 ) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\
92} while(0) 92} while(0)
93 93
94#define port_to_card(port) (port) 94#define port_to_card(port) (port)
@@ -113,7 +113,7 @@ static inline void openwin(card_t *card, u8 page)
113} 113}
114 114
115 115
116#include "hd6457x.c" 116#include "hd64570.c"
117 117
118 118
119static inline void set_carrier(port_t *port) 119static inline void set_carrier(port_t *port)
@@ -381,7 +381,7 @@ static int __init c101_run(unsigned long irq, unsigned long winbase)
381 return result; 381 return result;
382 } 382 }
383 383
384 sca_init_sync_port(card); /* Set up C101 memory */ 384 sca_init_port(card); /* Set up C101 memory */
385 set_carrier(card); 385 set_carrier(card);
386 386
387 printk(KERN_INFO "%s: Moxa C101 on IRQ%u," 387 printk(KERN_INFO "%s: Moxa C101 on IRQ%u,"
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7f97f8d08c39..d80b72e22dea 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -754,7 +754,6 @@ static int cosa_net_rx_done(struct channel_data *chan)
754 chan->netdev->stats.rx_bytes += chan->cosa->rxsize; 754 chan->netdev->stats.rx_bytes += chan->cosa->rxsize;
755 netif_rx(chan->rx_skb); 755 netif_rx(chan->rx_skb);
756 chan->rx_skb = NULL; 756 chan->rx_skb = NULL;
757 chan->netdev->last_rx = jiffies;
758 return 0; 757 return 0;
759} 758}
760 759
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index 5a7303dc0965..5fa52923efa8 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -199,6 +199,8 @@ static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
199static struct net_device * 199static struct net_device *
200 cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte); 200 cycx_x25_get_dev_by_dte_addr(struct wan_device *wandev, char *dte);
201 201
202static void cycx_x25_chan_setup(struct net_device *dev);
203
202#ifdef CYCLOMX_X25_DEBUG 204#ifdef CYCLOMX_X25_DEBUG
203static void hex_dump(char *msg, unsigned char *p, int len); 205static void hex_dump(char *msg, unsigned char *p, int len);
204static void cycx_x25_dump_config(struct cycx_x25_config *conf); 206static void cycx_x25_dump_config(struct cycx_x25_config *conf);
@@ -353,6 +355,12 @@ static int cycx_wan_update(struct wan_device *wandev)
353 return 0; 355 return 0;
354} 356}
355 357
358/* callback to initialize device */
359static void cycx_x25_chan_setup(struct net_device *dev)
360{
361 dev->init = cycx_netdevice_init;
362}
363
356/* Create new logical channel. 364/* Create new logical channel.
357 * This routine is called by the router when ROUTER_IFNEW IOCTL is being 365 * This routine is called by the router when ROUTER_IFNEW IOCTL is being
358 * handled. 366 * handled.
@@ -376,11 +384,12 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
376 return -EINVAL; 384 return -EINVAL;
377 } 385 }
378 386
379 /* allocate and initialize private data */ 387 dev = alloc_netdev(sizeof(struct cycx_x25_channel), conf->name,
380 chan = kzalloc(sizeof(struct cycx_x25_channel), GFP_KERNEL); 388 cycx_x25_chan_setup);
381 if (!chan) 389 if (!dev)
382 return -ENOMEM; 390 return -ENOMEM;
383 391
392 chan = netdev_priv(dev);
384 strcpy(chan->name, conf->name); 393 strcpy(chan->name, conf->name);
385 chan->card = card; 394 chan->card = card;
386 chan->link = conf->port; 395 chan->link = conf->port;
@@ -396,14 +405,14 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
396 if (len > WAN_ADDRESS_SZ) { 405 if (len > WAN_ADDRESS_SZ) {
397 printk(KERN_ERR "%s: %s local addr too long!\n", 406 printk(KERN_ERR "%s: %s local addr too long!\n",
398 wandev->name, chan->name); 407 wandev->name, chan->name);
399 kfree(chan); 408 err = -EINVAL;
400 return -EINVAL; 409 goto error;
401 } else { 410 } else {
402 chan->local_addr = kmalloc(len + 1, GFP_KERNEL); 411 chan->local_addr = kmalloc(len + 1, GFP_KERNEL);
403 412
404 if (!chan->local_addr) { 413 if (!chan->local_addr) {
405 kfree(chan); 414 err = -ENOMEM;
406 return -ENOMEM; 415 goto error;
407 } 416 }
408 } 417 }
409 418
@@ -429,41 +438,31 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
429 "%s: PVC %u is out of range on interface %s!\n", 438 "%s: PVC %u is out of range on interface %s!\n",
430 wandev->name, lcn, chan->name); 439 wandev->name, lcn, chan->name);
431 err = -EINVAL; 440 err = -EINVAL;
441 goto error;
432 } 442 }
433 } else { 443 } else {
434 printk(KERN_ERR "%s: invalid media address on interface %s!\n", 444 printk(KERN_ERR "%s: invalid media address on interface %s!\n",
435 wandev->name, chan->name); 445 wandev->name, chan->name);
436 err = -EINVAL; 446 err = -EINVAL;
447 goto error;
437 } 448 }
438 449
439 if (err) {
440 kfree(chan->local_addr);
441 kfree(chan);
442 return err;
443 }
444
445 /* prepare network device data space for registration */
446 strcpy(dev->name, chan->name);
447 dev->init = cycx_netdevice_init;
448 dev->priv = chan;
449
450 return 0; 450 return 0;
451
452error:
453 free_netdev(dev);
454 return err;
451} 455}
452 456
453/* Delete logical channel. */ 457/* Delete logical channel. */
454static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev) 458static int cycx_wan_del_if(struct wan_device *wandev, struct net_device *dev)
455{ 459{
456 if (dev->priv) { 460 struct cycx_x25_channel *chan = netdev_priv(dev);
457 struct cycx_x25_channel *chan = dev->priv;
458 461
459 if (chan->svc) { 462 if (chan->svc) {
460 kfree(chan->local_addr); 463 kfree(chan->local_addr);
461 if (chan->state == WAN_CONNECTED) 464 if (chan->state == WAN_CONNECTED)
462 del_timer(&chan->timer); 465 del_timer(&chan->timer);
463 }
464
465 kfree(chan);
466 dev->priv = NULL;
467 } 466 }
468 467
469 return 0; 468 return 0;
@@ -484,7 +483,7 @@ static const struct header_ops cycx_header_ops = {
484 * registration. */ 483 * registration. */
485static int cycx_netdevice_init(struct net_device *dev) 484static int cycx_netdevice_init(struct net_device *dev)
486{ 485{
487 struct cycx_x25_channel *chan = dev->priv; 486 struct cycx_x25_channel *chan = netdev_priv(dev);
488 struct cycx_device *card = chan->card; 487 struct cycx_device *card = chan->card;
489 struct wan_device *wandev = &card->wandev; 488 struct wan_device *wandev = &card->wandev;
490 489
@@ -542,7 +541,7 @@ static int cycx_netdevice_open(struct net_device *dev)
542 * o if there's no more open channels then disconnect physical link. */ 541 * o if there's no more open channels then disconnect physical link. */
543static int cycx_netdevice_stop(struct net_device *dev) 542static int cycx_netdevice_stop(struct net_device *dev)
544{ 543{
545 struct cycx_x25_channel *chan = dev->priv; 544 struct cycx_x25_channel *chan = netdev_priv(dev);
546 545
547 netif_stop_queue(dev); 546 netif_stop_queue(dev);
548 547
@@ -596,7 +595,7 @@ static int cycx_netdevice_rebuild_header(struct sk_buff *skb)
596static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb, 595static int cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
597 struct net_device *dev) 596 struct net_device *dev)
598{ 597{
599 struct cycx_x25_channel *chan = dev->priv; 598 struct cycx_x25_channel *chan = netdev_priv(dev);
600 struct cycx_device *card = chan->card; 599 struct cycx_device *card = chan->card;
601 600
602 if (!chan->svc) 601 if (!chan->svc)
@@ -670,7 +669,7 @@ free_packet:
670 * Return a pointer to struct net_device_stats */ 669 * Return a pointer to struct net_device_stats */
671static struct net_device_stats *cycx_netdevice_get_stats(struct net_device *dev) 670static struct net_device_stats *cycx_netdevice_get_stats(struct net_device *dev)
672{ 671{
673 struct cycx_x25_channel *chan = dev->priv; 672 struct cycx_x25_channel *chan = netdev_priv(dev);
674 673
675 return chan ? &chan->ifstats : NULL; 674 return chan ? &chan->ifstats : NULL;
676} 675}
@@ -783,7 +782,7 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
783 return; 782 return;
784 } 783 }
785 784
786 chan = dev->priv; 785 chan = netdev_priv(dev);
787 reset_timer(dev); 786 reset_timer(dev);
788 787
789 if (chan->drop_sequence) { 788 if (chan->drop_sequence) {
@@ -843,7 +842,6 @@ static void cycx_x25_irq_rx(struct cycx_device *card, struct cycx_x25_cmd *cmd)
843 842
844 skb_reset_mac_header(skb); 843 skb_reset_mac_header(skb);
845 netif_rx(skb); 844 netif_rx(skb);
846 dev->last_rx = jiffies; /* timestamp */
847} 845}
848 846
849/* Connect interrupt handler. */ 847/* Connect interrupt handler. */
@@ -884,7 +882,7 @@ static void cycx_x25_irq_connect(struct cycx_device *card,
884 return; 882 return;
885 } 883 }
886 884
887 chan = dev->priv; 885 chan = netdev_priv(dev);
888 chan->lcn = lcn; 886 chan->lcn = lcn;
889 cycx_x25_connect_response(card, chan); 887 cycx_x25_connect_response(card, chan);
890 cycx_x25_set_chan_state(dev, WAN_CONNECTED); 888 cycx_x25_set_chan_state(dev, WAN_CONNECTED);
@@ -914,7 +912,7 @@ static void cycx_x25_irq_connect_confirm(struct cycx_device *card,
914 } 912 }
915 913
916 clear_bit(--key, (void*)&card->u.x.connection_keys); 914 clear_bit(--key, (void*)&card->u.x.connection_keys);
917 chan = dev->priv; 915 chan = netdev_priv(dev);
918 chan->lcn = lcn; 916 chan->lcn = lcn;
919 cycx_x25_set_chan_state(dev, WAN_CONNECTED); 917 cycx_x25_set_chan_state(dev, WAN_CONNECTED);
920} 918}
@@ -954,7 +952,7 @@ static void cycx_x25_irq_disconnect(struct cycx_device *card,
954 952
955 dev = cycx_x25_get_dev_by_lcn(wandev, lcn); 953 dev = cycx_x25_get_dev_by_lcn(wandev, lcn);
956 if (dev) { 954 if (dev) {
957 struct cycx_x25_channel *chan = dev->priv; 955 struct cycx_x25_channel *chan = netdev_priv(dev);
958 956
959 cycx_x25_disconnect_response(card, chan->link, lcn); 957 cycx_x25_disconnect_response(card, chan->link, lcn);
960 cycx_x25_set_chan_state(dev, WAN_DISCONNECTED); 958 cycx_x25_set_chan_state(dev, WAN_DISCONNECTED);
@@ -1302,7 +1300,7 @@ static struct net_device *cycx_x25_get_dev_by_lcn(struct wan_device *wandev,
1302 struct cycx_x25_channel *chan; 1300 struct cycx_x25_channel *chan;
1303 1301
1304 while (dev) { 1302 while (dev) {
1305 chan = (struct cycx_x25_channel*)dev->priv; 1303 chan = netdev_priv(dev);
1306 1304
1307 if (chan->lcn == lcn) 1305 if (chan->lcn == lcn)
1308 break; 1306 break;
@@ -1319,7 +1317,7 @@ static struct net_device *
1319 struct cycx_x25_channel *chan; 1317 struct cycx_x25_channel *chan;
1320 1318
1321 while (dev) { 1319 while (dev) {
1322 chan = (struct cycx_x25_channel*)dev->priv; 1320 chan = netdev_priv(dev);
1323 1321
1324 if (!strcmp(chan->addr, dte)) 1322 if (!strcmp(chan->addr, dte))
1325 break; 1323 break;
@@ -1337,7 +1335,7 @@ static struct net_device *
1337 * <0 failure */ 1335 * <0 failure */
1338static int cycx_x25_chan_connect(struct net_device *dev) 1336static int cycx_x25_chan_connect(struct net_device *dev)
1339{ 1337{
1340 struct cycx_x25_channel *chan = dev->priv; 1338 struct cycx_x25_channel *chan = netdev_priv(dev);
1341 struct cycx_device *card = chan->card; 1339 struct cycx_device *card = chan->card;
1342 1340
1343 if (chan->svc) { 1341 if (chan->svc) {
@@ -1362,7 +1360,7 @@ static int cycx_x25_chan_connect(struct net_device *dev)
1362 * o if SVC then clear X.25 call */ 1360 * o if SVC then clear X.25 call */
1363static void cycx_x25_chan_disconnect(struct net_device *dev) 1361static void cycx_x25_chan_disconnect(struct net_device *dev)
1364{ 1362{
1365 struct cycx_x25_channel *chan = dev->priv; 1363 struct cycx_x25_channel *chan = netdev_priv(dev);
1366 1364
1367 if (chan->svc) { 1365 if (chan->svc) {
1368 x25_clear_call(chan->card, chan->link, chan->lcn, 0, 0); 1366 x25_clear_call(chan->card, chan->link, chan->lcn, 0, 0);
@@ -1375,7 +1373,7 @@ static void cycx_x25_chan_disconnect(struct net_device *dev)
1375static void cycx_x25_chan_timer(unsigned long d) 1373static void cycx_x25_chan_timer(unsigned long d)
1376{ 1374{
1377 struct net_device *dev = (struct net_device *)d; 1375 struct net_device *dev = (struct net_device *)d;
1378 struct cycx_x25_channel *chan = dev->priv; 1376 struct cycx_x25_channel *chan = netdev_priv(dev);
1379 1377
1380 if (chan->state == WAN_CONNECTED) 1378 if (chan->state == WAN_CONNECTED)
1381 cycx_x25_chan_disconnect(dev); 1379 cycx_x25_chan_disconnect(dev);
@@ -1387,7 +1385,7 @@ static void cycx_x25_chan_timer(unsigned long d)
1387/* Set logical channel state. */ 1385/* Set logical channel state. */
1388static void cycx_x25_set_chan_state(struct net_device *dev, u8 state) 1386static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
1389{ 1387{
1390 struct cycx_x25_channel *chan = dev->priv; 1388 struct cycx_x25_channel *chan = netdev_priv(dev);
1391 struct cycx_device *card = chan->card; 1389 struct cycx_device *card = chan->card;
1392 unsigned long flags; 1390 unsigned long flags;
1393 char *string_state = NULL; 1391 char *string_state = NULL;
@@ -1453,7 +1451,7 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
1453 * to the router. */ 1451 * to the router. */
1454static int cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb) 1452static int cycx_x25_chan_send(struct net_device *dev, struct sk_buff *skb)
1455{ 1453{
1456 struct cycx_x25_channel *chan = dev->priv; 1454 struct cycx_x25_channel *chan = netdev_priv(dev);
1457 struct cycx_device *card = chan->card; 1455 struct cycx_device *card = chan->card;
1458 int bitm = 0; /* final packet */ 1456 int bitm = 0; /* final packet */
1459 unsigned len = skb->len; 1457 unsigned len = skb->len;
@@ -1494,7 +1492,6 @@ static void cycx_x25_chan_send_event(struct net_device *dev, u8 event)
1494 1492
1495 skb->protocol = x25_type_trans(skb, dev); 1493 skb->protocol = x25_type_trans(skb, dev);
1496 netif_rx(skb); 1494 netif_rx(skb);
1497 dev->last_rx = jiffies; /* timestamp */
1498} 1495}
1499 1496
1500/* Convert line speed in bps to a number used by cyclom 2x code. */ 1497/* Convert line speed in bps to a number used by cyclom 2x code. */
@@ -1547,7 +1544,7 @@ static unsigned dec_to_uint(u8 *str, int len)
1547 1544
1548static void reset_timer(struct net_device *dev) 1545static void reset_timer(struct net_device *dev)
1549{ 1546{
1550 struct cycx_x25_channel *chan = dev->priv; 1547 struct cycx_x25_channel *chan = netdev_priv(dev);
1551 1548
1552 if (chan->svc) 1549 if (chan->svc)
1553 mod_timer(&chan->timer, jiffies+chan->idle_tmout*HZ); 1550 mod_timer(&chan->timer, jiffies+chan->idle_tmout*HZ);
@@ -1600,7 +1597,7 @@ static void cycx_x25_dump_devs(struct wan_device *wandev)
1600 printk(KERN_INFO "---------------------------------------\n"); 1597 printk(KERN_INFO "---------------------------------------\n");
1601 1598
1602 while(dev) { 1599 while(dev) {
1603 struct cycx_x25_channel *chan = dev->priv; 1600 struct cycx_x25_channel *chan = netdev_priv(dev);
1604 1601
1605 printk(KERN_INFO "%-5.5s %-15.15s %d ETH_P_%s\n", 1602 printk(KERN_INFO "%-5.5s %-15.15s %d ETH_P_%s\n",
1606 chan->name, chan->addr, netif_queue_stopped(dev), 1603 chan->name, chan->addr, netif_queue_stopped(dev),
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index b14242768fad..a297e3efa05d 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -74,7 +74,7 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
74 unsigned int hlen; 74 unsigned int hlen;
75 char *dest; 75 char *dest;
76 76
77 dlp = dev->priv; 77 dlp = netdev_priv(dev);
78 78
79 hdr.control = FRAD_I_UI; 79 hdr.control = FRAD_I_UI;
80 switch(type) 80 switch(type)
@@ -110,7 +110,7 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
110 struct frhdr *hdr; 110 struct frhdr *hdr;
111 int process, header; 111 int process, header;
112 112
113 dlp = dev->priv; 113 dlp = netdev_priv(dev);
114 if (!pskb_may_pull(skb, sizeof(*hdr))) { 114 if (!pskb_may_pull(skb, sizeof(*hdr))) {
115 printk(KERN_NOTICE "%s: invalid data no header\n", 115 printk(KERN_NOTICE "%s: invalid data no header\n",
116 dev->name); 116 dev->name);
@@ -181,7 +181,6 @@ static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
181 dlp->stats.rx_bytes += skb->len; 181 dlp->stats.rx_bytes += skb->len;
182 netif_rx(skb); 182 netif_rx(skb);
183 dlp->stats.rx_packets++; 183 dlp->stats.rx_packets++;
184 dev->last_rx = jiffies;
185 } 184 }
186 else 185 else
187 dev_kfree_skb(skb); 186 dev_kfree_skb(skb);
@@ -197,7 +196,7 @@ static int dlci_transmit(struct sk_buff *skb, struct net_device *dev)
197 if (!skb || !dev) 196 if (!skb || !dev)
198 return(0); 197 return(0);
199 198
200 dlp = dev->priv; 199 dlp = netdev_priv(dev);
201 200
202 netif_stop_queue(dev); 201 netif_stop_queue(dev);
203 202
@@ -235,9 +234,9 @@ static int dlci_config(struct net_device *dev, struct dlci_conf __user *conf, in
235 struct frad_local *flp; 234 struct frad_local *flp;
236 int err; 235 int err;
237 236
238 dlp = dev->priv; 237 dlp = netdev_priv(dev);
239 238
240 flp = dlp->slave->priv; 239 flp = netdev_priv(dlp->slave);
241 240
242 if (!get) 241 if (!get)
243 { 242 {
@@ -269,7 +268,7 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
269 if (!capable(CAP_NET_ADMIN)) 268 if (!capable(CAP_NET_ADMIN))
270 return(-EPERM); 269 return(-EPERM);
271 270
272 dlp = dev->priv; 271 dlp = netdev_priv(dev);
273 272
274 switch(cmd) 273 switch(cmd)
275 { 274 {
@@ -298,7 +297,7 @@ static int dlci_change_mtu(struct net_device *dev, int new_mtu)
298{ 297{
299 struct dlci_local *dlp; 298 struct dlci_local *dlp;
300 299
301 dlp = dev->priv; 300 dlp = netdev_priv(dev);
302 301
303 return((*dlp->slave->change_mtu)(dlp->slave, new_mtu)); 302 return((*dlp->slave->change_mtu)(dlp->slave, new_mtu));
304} 303}
@@ -309,7 +308,7 @@ static int dlci_open(struct net_device *dev)
309 struct frad_local *flp; 308 struct frad_local *flp;
310 int err; 309 int err;
311 310
312 dlp = dev->priv; 311 dlp = netdev_priv(dev);
313 312
314 if (!*(short *)(dev->dev_addr)) 313 if (!*(short *)(dev->dev_addr))
315 return(-EINVAL); 314 return(-EINVAL);
@@ -317,7 +316,7 @@ static int dlci_open(struct net_device *dev)
317 if (!netif_running(dlp->slave)) 316 if (!netif_running(dlp->slave))
318 return(-ENOTCONN); 317 return(-ENOTCONN);
319 318
320 flp = dlp->slave->priv; 319 flp = netdev_priv(dlp->slave);
321 err = (*flp->activate)(dlp->slave, dev); 320 err = (*flp->activate)(dlp->slave, dev);
322 if (err) 321 if (err)
323 return(err); 322 return(err);
@@ -335,9 +334,9 @@ static int dlci_close(struct net_device *dev)
335 334
336 netif_stop_queue(dev); 335 netif_stop_queue(dev);
337 336
338 dlp = dev->priv; 337 dlp = netdev_priv(dev);
339 338
340 flp = dlp->slave->priv; 339 flp = netdev_priv(dlp->slave);
341 err = (*flp->deactivate)(dlp->slave, dev); 340 err = (*flp->deactivate)(dlp->slave, dev);
342 341
343 return 0; 342 return 0;
@@ -347,7 +346,7 @@ static struct net_device_stats *dlci_get_stats(struct net_device *dev)
347{ 346{
348 struct dlci_local *dlp; 347 struct dlci_local *dlp;
349 348
350 dlp = dev->priv; 349 dlp = netdev_priv(dev);
351 350
352 return(&dlp->stats); 351 return(&dlp->stats);
353} 352}
@@ -365,7 +364,7 @@ static int dlci_add(struct dlci_add *dlci)
365 if (!slave) 364 if (!slave)
366 return -ENODEV; 365 return -ENODEV;
367 366
368 if (slave->type != ARPHRD_FRAD || slave->priv == NULL) 367 if (slave->type != ARPHRD_FRAD || netdev_priv(slave) == NULL)
369 goto err1; 368 goto err1;
370 369
371 /* create device name */ 370 /* create device name */
@@ -391,11 +390,11 @@ static int dlci_add(struct dlci_add *dlci)
391 390
392 *(short *)(master->dev_addr) = dlci->dlci; 391 *(short *)(master->dev_addr) = dlci->dlci;
393 392
394 dlp = (struct dlci_local *) master->priv; 393 dlp = netdev_priv(master);
395 dlp->slave = slave; 394 dlp->slave = slave;
396 dlp->master = master; 395 dlp->master = master;
397 396
398 flp = slave->priv; 397 flp = netdev_priv(slave);
399 err = (*flp->assoc)(slave, master); 398 err = (*flp->assoc)(slave, master);
400 if (err < 0) 399 if (err < 0)
401 goto err2; 400 goto err2;
@@ -435,9 +434,9 @@ static int dlci_del(struct dlci_add *dlci)
435 return(-EBUSY); 434 return(-EBUSY);
436 } 435 }
437 436
438 dlp = master->priv; 437 dlp = netdev_priv(master);
439 slave = dlp->slave; 438 slave = dlp->slave;
440 flp = slave->priv; 439 flp = netdev_priv(slave);
441 440
442 rtnl_lock(); 441 rtnl_lock();
443 err = (*flp->deassoc)(slave, master); 442 err = (*flp->deassoc)(slave, master);
@@ -491,7 +490,7 @@ static const struct header_ops dlci_header_ops = {
491 490
492static void dlci_setup(struct net_device *dev) 491static void dlci_setup(struct net_device *dev)
493{ 492{
494 struct dlci_local *dlp = dev->priv; 493 struct dlci_local *dlp = netdev_priv(dev);
495 494
496 dev->flags = 0; 495 dev->flags = 0;
497 dev->open = dlci_open; 496 dev->open = dlci_open;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 5f1ccb2b08b1..888025db2f02 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -659,7 +659,6 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
659 skb_put(skb, pkt_len); 659 skb_put(skb, pkt_len);
660 if (netif_running(dev)) 660 if (netif_running(dev))
661 skb->protocol = hdlc_type_trans(skb, dev); 661 skb->protocol = hdlc_type_trans(skb, dev);
662 skb->dev->last_rx = jiffies;
663 netif_rx(skb); 662 netif_rx(skb);
664 } else { 663 } else {
665 if (skb->data[pkt_len] & FrameRdo) 664 if (skb->data[pkt_len] & FrameRdo)
@@ -730,8 +729,7 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev,
730 goto err_free_mmio_region_1; 729 goto err_free_mmio_region_1;
731 } 730 }
732 731
733 ioaddr = ioremap(pci_resource_start(pdev, 0), 732 ioaddr = pci_ioremap_bar(pdev, 0);
734 pci_resource_len(pdev, 0));
735 if (!ioaddr) { 733 if (!ioaddr) {
736 printk(KERN_ERR "%s: cannot remap MMIO region %llx @ %llx\n", 734 printk(KERN_ERR "%s: cannot remap MMIO region %llx @ %llx\n",
737 DRV_NAME, (unsigned long long)pci_resource_len(pdev, 0), 735 DRV_NAME, (unsigned long long)pci_resource_len(pdev, 0),
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 9557ad078ab8..48a2c9d28950 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -896,7 +896,6 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
896 fst_process_rx_status(rx_status, port_to_dev(port)->name); 896 fst_process_rx_status(rx_status, port_to_dev(port)->name);
897 if (rx_status == NET_RX_DROP) 897 if (rx_status == NET_RX_DROP)
898 dev->stats.rx_dropped++; 898 dev->stats.rx_dropped++;
899 dev->last_rx = jiffies;
900} 899}
901 900
902/* 901/*
@@ -1322,7 +1321,6 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
1322 fst_process_rx_status(rx_status, port_to_dev(port)->name); 1321 fst_process_rx_status(rx_status, port_to_dev(port)->name);
1323 if (rx_status == NET_RX_DROP) 1322 if (rx_status == NET_RX_DROP)
1324 dev->stats.rx_dropped++; 1323 dev->stats.rx_dropped++;
1325 dev->last_rx = jiffies;
1326 } else { 1324 } else {
1327 card->dma_skb_rx = skb; 1325 card->dma_skb_rx = skb;
1328 card->dma_port_rx = port; 1326 card->dma_port_rx = port;
diff --git a/drivers/net/wan/hd6457x.c b/drivers/net/wan/hd64570.c
index 591fb45a7c68..223238de475c 100644
--- a/drivers/net/wan/hd6457x.c
+++ b/drivers/net/wan/hd64570.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Hitachi SCA HD64570 and HD64572 common driver for Linux 2 * Hitachi SCA HD64570 driver for Linux
3 * 3 *
4 * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl> 4 * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
5 * 5 *
@@ -7,9 +7,7 @@
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation. 8 * as published by the Free Software Foundation.
9 * 9 *
10 * Sources of information: 10 * Source of information: Hitachi HD64570 SCA User's Manual
11 * Hitachi HD64570 SCA User's Manual
12 * Hitachi HD64572 SCA-II User's Manual
13 * 11 *
14 * We use the following SCA memory map: 12 * We use the following SCA memory map:
15 * 13 *
@@ -26,33 +24,26 @@
26 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used) 24 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
27 */ 25 */
28 26
29#include <linux/module.h> 27#include <linux/bitops.h>
30#include <linux/kernel.h> 28#include <linux/errno.h>
31#include <linux/slab.h>
32#include <linux/jiffies.h>
33#include <linux/types.h>
34#include <linux/fcntl.h> 29#include <linux/fcntl.h>
35#include <linux/interrupt.h> 30#include <linux/hdlc.h>
36#include <linux/in.h> 31#include <linux/in.h>
37#include <linux/string.h>
38#include <linux/errno.h>
39#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/interrupt.h>
40#include <linux/ioport.h> 34#include <linux/ioport.h>
41#include <linux/bitops.h> 35#include <linux/jiffies.h>
42 36#include <linux/kernel.h>
43#include <asm/system.h> 37#include <linux/module.h>
44#include <asm/uaccess.h>
45#include <asm/io.h>
46
47#include <linux/netdevice.h> 38#include <linux/netdevice.h>
48#include <linux/skbuff.h> 39#include <linux/skbuff.h>
49 40#include <linux/slab.h>
50#include <linux/hdlc.h> 41#include <linux/string.h>
51 42#include <linux/types.h>
52#if (!defined (__HD64570_H) && !defined (__HD64572_H)) || \ 43#include <asm/io.h>
53 (defined (__HD64570_H) && defined (__HD64572_H)) 44#include <asm/system.h>
54#error Either hd64570.h or hd64572.h must be included 45#include <asm/uaccess.h>
55#endif 46#include "hd64570.h"
56 47
57#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) 48#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
58#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET) 49#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
@@ -62,16 +53,6 @@
62#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02) 53#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
63#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04) 54#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
64 55
65#ifdef __HD64570_H /* HD64570 */
66#define sca_outa(value, reg, card) sca_outw(value, reg, card)
67#define sca_ina(reg, card) sca_inw(reg, card)
68#define writea(value, ptr) writew(value, ptr)
69
70#else /* HD64572 */
71#define sca_outa(value, reg, card) sca_outl(value, reg, card)
72#define sca_ina(reg, card) sca_inl(reg, card)
73#define writea(value, ptr) writel(value, ptr)
74#endif
75 56
76static inline struct net_device *port_to_dev(port_t *port) 57static inline struct net_device *port_to_dev(port_t *port)
77{ 58{
@@ -81,8 +62,6 @@ static inline struct net_device *port_to_dev(port_t *port)
81static inline int sca_intr_status(card_t *card) 62static inline int sca_intr_status(card_t *card)
82{ 63{
83 u8 result = 0; 64 u8 result = 0;
84
85#ifdef __HD64570_H /* HD64570 */
86 u8 isr0 = sca_in(ISR0, card); 65 u8 isr0 = sca_in(ISR0, card);
87 u8 isr1 = sca_in(ISR1, card); 66 u8 isr1 = sca_in(ISR1, card);
88 67
@@ -93,18 +72,6 @@ static inline int sca_intr_status(card_t *card)
93 if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0); 72 if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
94 if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1); 73 if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
95 74
96#else /* HD64572 */
97 u32 isr0 = sca_inl(ISR0, card);
98
99 if (isr0 & 0x0000000F) result |= SCA_INTR_DMAC_RX(0);
100 if (isr0 & 0x000000F0) result |= SCA_INTR_DMAC_TX(0);
101 if (isr0 & 0x00000F00) result |= SCA_INTR_DMAC_RX(1);
102 if (isr0 & 0x0000F000) result |= SCA_INTR_DMAC_TX(1);
103 if (isr0 & 0x003E0000) result |= SCA_INTR_MSCI(0);
104 if (isr0 & 0x3E000000) result |= SCA_INTR_MSCI(1);
105
106#endif /* HD64570 vs HD64572 */
107
108 if (!(result & SCA_INTR_DMAC_TX(0))) 75 if (!(result & SCA_INTR_DMAC_TX(0)))
109 if (sca_in(DSR_TX(0), card) & DSR_EOM) 76 if (sca_in(DSR_TX(0), card) & DSR_EOM)
110 result |= SCA_INTR_DMAC_TX(0); 77 result |= SCA_INTR_DMAC_TX(0);
@@ -127,7 +94,6 @@ static inline u16 next_desc(port_t *port, u16 desc, int transmit)
127} 94}
128 95
129 96
130
131static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit) 97static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
132{ 98{
133 u16 rx_buffs = port_to_card(port)->rx_ring_buffers; 99 u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
@@ -139,28 +105,26 @@ static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
139} 105}
140 106
141 107
142
143static inline u16 desc_offset(port_t *port, u16 desc, int transmit) 108static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
144{ 109{
145 /* Descriptor offset always fits in 16 bytes */ 110 /* Descriptor offset always fits in 16 bits */
146 return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc); 111 return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
147} 112}
148 113
149 114
150 115static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
151static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc, int transmit) 116 int transmit)
152{ 117{
153#ifdef PAGE0_ALWAYS_MAPPED 118#ifdef PAGE0_ALWAYS_MAPPED
154 return (pkt_desc __iomem *)(win0base(port_to_card(port)) 119 return (pkt_desc __iomem *)(win0base(port_to_card(port))
155 + desc_offset(port, desc, transmit)); 120 + desc_offset(port, desc, transmit));
156#else 121#else
157 return (pkt_desc __iomem *)(winbase(port_to_card(port)) 122 return (pkt_desc __iomem *)(winbase(port_to_card(port))
158 + desc_offset(port, desc, transmit)); 123 + desc_offset(port, desc, transmit));
159#endif 124#endif
160} 125}
161 126
162 127
163
164static inline u32 buffer_offset(port_t *port, u16 desc, int transmit) 128static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
165{ 129{
166 return port_to_card(port)->buff_offset + 130 return port_to_card(port)->buff_offset +
@@ -186,7 +150,7 @@ static inline void sca_set_carrier(port_t *port)
186} 150}
187 151
188 152
189static void sca_init_sync_port(port_t *port) 153static void sca_init_port(port_t *port)
190{ 154{
191 card_t *card = port_to_card(port); 155 card_t *card = port_to_card(port);
192 int transmit, i; 156 int transmit, i;
@@ -195,7 +159,7 @@ static void sca_init_sync_port(port_t *port)
195 port->txin = 0; 159 port->txin = 0;
196 port->txlast = 0; 160 port->txlast = 0;
197 161
198#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED) 162#ifndef PAGE0_ALWAYS_MAPPED
199 openwin(card, 0); 163 openwin(card, 0);
200#endif 164#endif
201 165
@@ -209,7 +173,7 @@ static void sca_init_sync_port(port_t *port)
209 u16 chain_off = desc_offset(port, i + 1, transmit); 173 u16 chain_off = desc_offset(port, i + 1, transmit);
210 u32 buff_off = buffer_offset(port, i, transmit); 174 u32 buff_off = buffer_offset(port, i, transmit);
211 175
212 writea(chain_off, &desc->cp); 176 writew(chain_off, &desc->cp);
213 writel(buff_off, &desc->bp); 177 writel(buff_off, &desc->bp);
214 writew(0, &desc->len); 178 writew(0, &desc->len);
215 writeb(0, &desc->stat); 179 writeb(0, &desc->stat);
@@ -222,16 +186,14 @@ static void sca_init_sync_port(port_t *port)
222 sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) : 186 sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
223 DCR_RX(phy_node(port)), card); 187 DCR_RX(phy_node(port)), card);
224 188
225#ifdef __HD64570_H
226 sca_out(0, dmac + CPB, card); /* pointer base */
227#endif
228 /* current desc addr */ 189 /* current desc addr */
229 sca_outa(desc_offset(port, 0, transmit), dmac + CDAL, card); 190 sca_out(0, dmac + CPB, card); /* pointer base */
191 sca_outw(desc_offset(port, 0, transmit), dmac + CDAL, card);
230 if (!transmit) 192 if (!transmit)
231 sca_outa(desc_offset(port, buffs - 1, transmit), 193 sca_outw(desc_offset(port, buffs - 1, transmit),
232 dmac + EDAL, card); 194 dmac + EDAL, card);
233 else 195 else
234 sca_outa(desc_offset(port, 0, transmit), dmac + EDAL, 196 sca_outw(desc_offset(port, 0, transmit), dmac + EDAL,
235 card); 197 card);
236 198
237 /* clear frame end interrupt counter */ 199 /* clear frame end interrupt counter */
@@ -258,7 +220,6 @@ static void sca_init_sync_port(port_t *port)
258} 220}
259 221
260 222
261
262#ifdef NEED_SCA_MSCI_INTR 223#ifdef NEED_SCA_MSCI_INTR
263/* MSCI interrupt service */ 224/* MSCI interrupt service */
264static inline void sca_msci_intr(port_t *port) 225static inline void sca_msci_intr(port_t *port)
@@ -282,17 +243,15 @@ static inline void sca_msci_intr(port_t *port)
282#endif 243#endif
283 244
284 245
285 246static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
286static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u16 rxin) 247 u16 rxin)
287{ 248{
288 struct net_device *dev = port_to_dev(port); 249 struct net_device *dev = port_to_dev(port);
289 struct sk_buff *skb; 250 struct sk_buff *skb;
290 u16 len; 251 u16 len;
291 u32 buff; 252 u32 buff;
292#ifndef ALL_PAGES_ALWAYS_MAPPED
293 u32 maxlen; 253 u32 maxlen;
294 u8 page; 254 u8 page;
295#endif
296 255
297 len = readw(&desc->len); 256 len = readw(&desc->len);
298 skb = dev_alloc_skb(len); 257 skb = dev_alloc_skb(len);
@@ -302,7 +261,6 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1
302 } 261 }
303 262
304 buff = buffer_offset(port, rxin, 0); 263 buff = buffer_offset(port, rxin, 0);
305#ifndef ALL_PAGES_ALWAYS_MAPPED
306 page = buff / winsize(card); 264 page = buff / winsize(card);
307 buff = buff % winsize(card); 265 buff = buff % winsize(card);
308 maxlen = winsize(card) - buff; 266 maxlen = winsize(card) - buff;
@@ -314,12 +272,10 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1
314 openwin(card, page + 1); 272 openwin(card, page + 1);
315 memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen); 273 memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
316 } else 274 } else
317#endif 275 memcpy_fromio(skb->data, winbase(card) + buff, len);
318 memcpy_fromio(skb->data, winbase(card) + buff, len);
319 276
320#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED) 277#ifndef PAGE0_ALWAYS_MAPPED
321 /* select pkt_desc table page back */ 278 openwin(card, 0); /* select pkt_desc table page back */
322 openwin(card, 0);
323#endif 279#endif
324 skb_put(skb, len); 280 skb_put(skb, len);
325#ifdef DEBUG_PKT 281#ifdef DEBUG_PKT
@@ -328,13 +284,11 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc, u1
328#endif 284#endif
329 dev->stats.rx_packets++; 285 dev->stats.rx_packets++;
330 dev->stats.rx_bytes += skb->len; 286 dev->stats.rx_bytes += skb->len;
331 dev->last_rx = jiffies;
332 skb->protocol = hdlc_type_trans(skb, dev); 287 skb->protocol = hdlc_type_trans(skb, dev);
333 netif_rx(skb); 288 netif_rx(skb);
334} 289}
335 290
336 291
337
338/* Receive DMA interrupt service */ 292/* Receive DMA interrupt service */
339static inline void sca_rx_intr(port_t *port) 293static inline void sca_rx_intr(port_t *port)
340{ 294{
@@ -354,7 +308,7 @@ static inline void sca_rx_intr(port_t *port)
354 while (1) { 308 while (1) {
355 u32 desc_off = desc_offset(port, port->rxin, 0); 309 u32 desc_off = desc_offset(port, port->rxin, 0);
356 pkt_desc __iomem *desc; 310 pkt_desc __iomem *desc;
357 u32 cda = sca_ina(dmac + CDAL, card); 311 u32 cda = sca_inw(dmac + CDAL, card);
358 312
359 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc))) 313 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
360 break; /* No frame received */ 314 break; /* No frame received */
@@ -378,7 +332,7 @@ static inline void sca_rx_intr(port_t *port)
378 sca_rx(card, port, desc, port->rxin); 332 sca_rx(card, port, desc, port->rxin);
379 333
380 /* Set new error descriptor address */ 334 /* Set new error descriptor address */
381 sca_outa(desc_off, dmac + EDAL, card); 335 sca_outw(desc_off, dmac + EDAL, card);
382 port->rxin = next_desc(port, port->rxin, 0); 336 port->rxin = next_desc(port, port->rxin, 0);
383 } 337 }
384 338
@@ -387,7 +341,6 @@ static inline void sca_rx_intr(port_t *port)
387} 341}
388 342
389 343
390
391/* Transmit DMA interrupt service */ 344/* Transmit DMA interrupt service */
392static inline void sca_tx_intr(port_t *port) 345static inline void sca_tx_intr(port_t *port)
393{ 346{
@@ -408,7 +361,7 @@ static inline void sca_tx_intr(port_t *port)
408 pkt_desc __iomem *desc; 361 pkt_desc __iomem *desc;
409 362
410 u32 desc_off = desc_offset(port, port->txlast, 1); 363 u32 desc_off = desc_offset(port, port->txlast, 1);
411 u32 cda = sca_ina(dmac + CDAL, card); 364 u32 cda = sca_inw(dmac + CDAL, card);
412 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc))) 365 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
413 break; /* Transmitter is/will_be sending this frame */ 366 break; /* Transmitter is/will_be sending this frame */
414 367
@@ -424,17 +377,13 @@ static inline void sca_tx_intr(port_t *port)
424} 377}
425 378
426 379
427
428static irqreturn_t sca_intr(int irq, void* dev_id) 380static irqreturn_t sca_intr(int irq, void* dev_id)
429{ 381{
430 card_t *card = dev_id; 382 card_t *card = dev_id;
431 int i; 383 int i;
432 u8 stat; 384 u8 stat;
433 int handled = 0; 385 int handled = 0;
434
435#ifndef ALL_PAGES_ALWAYS_MAPPED
436 u8 page = sca_get_page(card); 386 u8 page = sca_get_page(card);
437#endif
438 387
439 while((stat = sca_intr_status(card)) != 0) { 388 while((stat = sca_intr_status(card)) != 0) {
440 handled = 1; 389 handled = 1;
@@ -453,14 +402,11 @@ static irqreturn_t sca_intr(int irq, void* dev_id)
453 } 402 }
454 } 403 }
455 404
456#ifndef ALL_PAGES_ALWAYS_MAPPED
457 openwin(card, page); /* Restore original page */ 405 openwin(card, page); /* Restore original page */
458#endif
459 return IRQ_RETVAL(handled); 406 return IRQ_RETVAL(handled);
460} 407}
461 408
462 409
463
464static void sca_set_port(port_t *port) 410static void sca_set_port(port_t *port)
465{ 411{
466 card_t* card = port_to_card(port); 412 card_t* card = port_to_card(port);
@@ -498,12 +444,7 @@ static void sca_set_port(port_t *port)
498 port->tmc = tmc; 444 port->tmc = tmc;
499 445
500 /* baud divisor - time constant*/ 446 /* baud divisor - time constant*/
501#ifdef __HD64570_H
502 sca_out(port->tmc, msci + TMC, card); 447 sca_out(port->tmc, msci + TMC, card);
503#else
504 sca_out(port->tmc, msci + TMCR, card);
505 sca_out(port->tmc, msci + TMCT, card);
506#endif
507 448
508 /* Set BRG bits */ 449 /* Set BRG bits */
509 sca_out(port->rxs, msci + RXS, card); 450 sca_out(port->rxs, msci + RXS, card);
@@ -519,7 +460,6 @@ static void sca_set_port(port_t *port)
519} 460}
520 461
521 462
522
523static void sca_open(struct net_device *dev) 463static void sca_open(struct net_device *dev)
524{ 464{
525 port_t *port = dev_to_port(dev); 465 port_t *port = dev_to_port(dev);
@@ -541,11 +481,7 @@ static void sca_open(struct net_device *dev)
541 switch(port->parity) { 481 switch(port->parity) {
542 case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break; 482 case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
543 case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break; 483 case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
544#ifdef __HD64570_H
545 case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break; 484 case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break;
546#else
547 case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
548#endif
549 case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break; 485 case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
550 default: md0 = MD0_HDLC | MD0_CRC_NONE; 486 default: md0 = MD0_HDLC | MD0_CRC_NONE;
551 } 487 }
@@ -555,35 +491,20 @@ static void sca_open(struct net_device *dev)
555 sca_out(0x00, msci + MD1, card); /* no address field check */ 491 sca_out(0x00, msci + MD1, card); /* no address field check */
556 sca_out(md2, msci + MD2, card); 492 sca_out(md2, msci + MD2, card);
557 sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */ 493 sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
558#ifdef __HD64570_H
559 sca_out(CTL_IDLE, msci + CTL, card); 494 sca_out(CTL_IDLE, msci + CTL, card);
560#else
561 /* Skip the rest of underrun frame */
562 sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
563#endif
564 495
565#ifdef __HD64570_H
566 /* Allow at least 8 bytes before requesting RX DMA operation */ 496 /* Allow at least 8 bytes before requesting RX DMA operation */
567 /* TX with higher priority and possibly with shorter transfers */ 497 /* TX with higher priority and possibly with shorter transfers */
568 sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/ 498 sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/
569 sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/ 499 sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/
570 sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */ 500 sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
571#else
572 sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
573 sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
574 sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
575 sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
576 sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
577#endif
578 501
579/* We're using the following interrupts: 502/* We're using the following interrupts:
580 - TXINT (DMAC completed all transmisions, underrun or DCD change) 503 - TXINT (DMAC completed all transmisions, underrun or DCD change)
581 - all DMA interrupts 504 - all DMA interrupts
582*/ 505*/
583
584 sca_set_carrier(port); 506 sca_set_carrier(port);
585 507
586#ifdef __HD64570_H
587 /* MSCI TX INT and RX INT A IRQ enable */ 508 /* MSCI TX INT and RX INT A IRQ enable */
588 sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card); 509 sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card);
589 sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card); 510 sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card);
@@ -592,21 +513,8 @@ static void sca_open(struct net_device *dev)
592 /* enable DMA IRQ */ 513 /* enable DMA IRQ */
593 sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F), 514 sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F),
594 IER1, card); 515 IER1, card);
595#else
596 /* MSCI TXINT and RXINTA interrupt enable */
597 sca_outl(IE0_TXINT | IE0_RXINTA | IE0_UDRN | IE0_CDCD, msci + IE0,
598 card);
599 /* DMA & MSCI IRQ enable */
600 sca_outl(sca_inl(IER0, card) |
601 (phy_node(port) ? 0x0A006600 : 0x000A0066), IER0, card);
602#endif
603 516
604#ifdef __HD64570_H
605 sca_out(port->tmc, msci + TMC, card); /* Restore registers */ 517 sca_out(port->tmc, msci + TMC, card); /* Restore registers */
606#else
607 sca_out(port->tmc, msci + TMCR, card);
608 sca_out(port->tmc, msci + TMCT, card);
609#endif
610 sca_out(port->rxs, msci + RXS, card); 518 sca_out(port->rxs, msci + RXS, card);
611 sca_out(port->txs, msci + TXS, card); 519 sca_out(port->txs, msci + TXS, card);
612 sca_out(CMD_TX_ENABLE, msci + CMD, card); 520 sca_out(CMD_TX_ENABLE, msci + CMD, card);
@@ -616,7 +524,6 @@ static void sca_open(struct net_device *dev)
616} 524}
617 525
618 526
619
620static void sca_close(struct net_device *dev) 527static void sca_close(struct net_device *dev)
621{ 528{
622 port_t *port = dev_to_port(dev); 529 port_t *port = dev_to_port(dev);
@@ -624,23 +531,17 @@ static void sca_close(struct net_device *dev)
624 531
625 /* reset channel */ 532 /* reset channel */
626 sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port)); 533 sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
627#ifdef __HD64570_H
628 /* disable MSCI interrupts */ 534 /* disable MSCI interrupts */
629 sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0), 535 sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0),
630 IER0, card); 536 IER0, card);
631 /* disable DMA interrupts */ 537 /* disable DMA interrupts */
632 sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0), 538 sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0),
633 IER1, card); 539 IER1, card);
634#else 540
635 /* disable DMA & MSCI IRQ */
636 sca_outl(sca_inl(IER0, card) &
637 (phy_node(port) ? 0x00FF00FF : 0xFF00FF00), IER0, card);
638#endif
639 netif_stop_queue(dev); 541 netif_stop_queue(dev);
640} 542}
641 543
642 544
643
644static int sca_attach(struct net_device *dev, unsigned short encoding, 545static int sca_attach(struct net_device *dev, unsigned short encoding,
645 unsigned short parity) 546 unsigned short parity)
646{ 547{
@@ -654,11 +555,7 @@ static int sca_attach(struct net_device *dev, unsigned short encoding,
654 if (parity != PARITY_NONE && 555 if (parity != PARITY_NONE &&
655 parity != PARITY_CRC16_PR0 && 556 parity != PARITY_CRC16_PR0 &&
656 parity != PARITY_CRC16_PR1 && 557 parity != PARITY_CRC16_PR1 &&
657#ifdef __HD64570_H
658 parity != PARITY_CRC16_PR0_CCITT && 558 parity != PARITY_CRC16_PR0_CCITT &&
659#else
660 parity != PARITY_CRC32_PR1_CCITT &&
661#endif
662 parity != PARITY_CRC16_PR1_CCITT) 559 parity != PARITY_CRC16_PR1_CCITT)
663 return -EINVAL; 560 return -EINVAL;
664 561
@@ -668,34 +565,30 @@ static int sca_attach(struct net_device *dev, unsigned short encoding,
668} 565}
669 566
670 567
671
672#ifdef DEBUG_RINGS 568#ifdef DEBUG_RINGS
673static void sca_dump_rings(struct net_device *dev) 569static void sca_dump_rings(struct net_device *dev)
674{ 570{
675 port_t *port = dev_to_port(dev); 571 port_t *port = dev_to_port(dev);
676 card_t *card = port_to_card(port); 572 card_t *card = port_to_card(port);
677 u16 cnt; 573 u16 cnt;
678#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED) 574#ifndef PAGE0_ALWAYS_MAPPED
679 u8 page; 575 u8 page = sca_get_page(card);
680#endif
681 576
682#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED)
683 page = sca_get_page(card);
684 openwin(card, 0); 577 openwin(card, 0);
685#endif 578#endif
686 579
687 printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive", 580 printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
688 sca_ina(get_dmac_rx(port) + CDAL, card), 581 sca_inw(get_dmac_rx(port) + CDAL, card),
689 sca_ina(get_dmac_rx(port) + EDAL, card), 582 sca_inw(get_dmac_rx(port) + EDAL, card),
690 sca_in(DSR_RX(phy_node(port)), card), port->rxin, 583 sca_in(DSR_RX(phy_node(port)), card), port->rxin,
691 sca_in(DSR_RX(phy_node(port)), card) & DSR_DE?"":"in"); 584 sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in");
692 for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++) 585 for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
693 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat))); 586 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
694 587
695 printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u " 588 printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
696 "last=%u %sactive", 589 "last=%u %sactive",
697 sca_ina(get_dmac_tx(port) + CDAL, card), 590 sca_inw(get_dmac_tx(port) + CDAL, card),
698 sca_ina(get_dmac_tx(port) + EDAL, card), 591 sca_inw(get_dmac_tx(port) + EDAL, card),
699 sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast, 592 sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast,
700 sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in"); 593 sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
701 594
@@ -703,12 +596,8 @@ static void sca_dump_rings(struct net_device *dev)
703 printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat))); 596 printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
704 printk("\n"); 597 printk("\n");
705 598
706 printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, " 599 printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, ST: %02x %02x %02x %02x,"
707 "ST: %02x %02x %02x %02x" 600 " FST: %02x CST: %02x %02x\n",
708#ifdef __HD64572_H
709 " %02x"
710#endif
711 ", FST: %02x CST: %02x %02x\n",
712 sca_in(get_msci(port) + MD0, card), 601 sca_in(get_msci(port) + MD0, card),
713 sca_in(get_msci(port) + MD1, card), 602 sca_in(get_msci(port) + MD1, card),
714 sca_in(get_msci(port) + MD2, card), 603 sca_in(get_msci(port) + MD2, card),
@@ -716,52 +605,33 @@ static void sca_dump_rings(struct net_device *dev)
716 sca_in(get_msci(port) + ST1, card), 605 sca_in(get_msci(port) + ST1, card),
717 sca_in(get_msci(port) + ST2, card), 606 sca_in(get_msci(port) + ST2, card),
718 sca_in(get_msci(port) + ST3, card), 607 sca_in(get_msci(port) + ST3, card),
719#ifdef __HD64572_H
720 sca_in(get_msci(port) + ST4, card),
721#endif
722 sca_in(get_msci(port) + FST, card), 608 sca_in(get_msci(port) + FST, card),
723 sca_in(get_msci(port) + CST0, card), 609 sca_in(get_msci(port) + CST0, card),
724 sca_in(get_msci(port) + CST1, card)); 610 sca_in(get_msci(port) + CST1, card));
725 611
726#ifdef __HD64572_H
727 printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
728 sca_inl(ISR0, card), sca_inl(ISR1, card));
729#else
730 printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card), 612 printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card),
731 sca_in(ISR1, card), sca_in(ISR2, card)); 613 sca_in(ISR1, card), sca_in(ISR2, card));
732#endif
733 614
734#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED) 615#ifndef PAGE0_ALWAYS_MAPPED
735 openwin(card, page); /* Restore original page */ 616 openwin(card, page); /* Restore original page */
736#endif 617#endif
737} 618}
738#endif /* DEBUG_RINGS */ 619#endif /* DEBUG_RINGS */
739 620
740 621
741
742static int sca_xmit(struct sk_buff *skb, struct net_device *dev) 622static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
743{ 623{
744 port_t *port = dev_to_port(dev); 624 port_t *port = dev_to_port(dev);
745 card_t *card = port_to_card(port); 625 card_t *card = port_to_card(port);
746 pkt_desc __iomem *desc; 626 pkt_desc __iomem *desc;
747 u32 buff, len; 627 u32 buff, len;
748#ifndef ALL_PAGES_ALWAYS_MAPPED
749 u8 page; 628 u8 page;
750 u32 maxlen; 629 u32 maxlen;
751#endif
752 630
753 spin_lock_irq(&port->lock); 631 spin_lock_irq(&port->lock);
754 632
755 desc = desc_address(port, port->txin + 1, 1); 633 desc = desc_address(port, port->txin + 1, 1);
756 if (readb(&desc->stat)) { /* allow 1 packet gap */ 634 BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
757 /* should never happen - previous xmit should stop queue */
758#ifdef DEBUG_PKT
759 printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
760#endif
761 netif_stop_queue(dev);
762 spin_unlock_irq(&port->lock);
763 return 1; /* request packet to be queued */
764 }
765 635
766#ifdef DEBUG_PKT 636#ifdef DEBUG_PKT
767 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len); 637 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
@@ -771,7 +641,6 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
771 desc = desc_address(port, port->txin, 1); 641 desc = desc_address(port, port->txin, 1);
772 buff = buffer_offset(port, port->txin, 1); 642 buff = buffer_offset(port, port->txin, 1);
773 len = skb->len; 643 len = skb->len;
774#ifndef ALL_PAGES_ALWAYS_MAPPED
775 page = buff / winsize(card); 644 page = buff / winsize(card);
776 buff = buff % winsize(card); 645 buff = buff % winsize(card);
777 maxlen = winsize(card) - buff; 646 maxlen = winsize(card) - buff;
@@ -781,12 +650,10 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
781 memcpy_toio(winbase(card) + buff, skb->data, maxlen); 650 memcpy_toio(winbase(card) + buff, skb->data, maxlen);
782 openwin(card, page + 1); 651 openwin(card, page + 1);
783 memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen); 652 memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
784 } 653 } else
785 else
786#endif
787 memcpy_toio(winbase(card) + buff, skb->data, len); 654 memcpy_toio(winbase(card) + buff, skb->data, len);
788 655
789#if !defined(PAGE0_ALWAYS_MAPPED) && !defined(ALL_PAGES_ALWAYS_MAPPED) 656#ifndef PAGE0_ALWAYS_MAPPED
790 openwin(card, 0); /* select pkt_desc table page back */ 657 openwin(card, 0); /* select pkt_desc table page back */
791#endif 658#endif
792 writew(len, &desc->len); 659 writew(len, &desc->len);
@@ -794,7 +661,7 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
794 dev->trans_start = jiffies; 661 dev->trans_start = jiffies;
795 662
796 port->txin = next_desc(port, port->txin, 1); 663 port->txin = next_desc(port, port->txin, 1);
797 sca_outa(desc_offset(port, port->txin, 1), 664 sca_outw(desc_offset(port, port->txin, 1),
798 get_dmac_tx(port) + EDAL, card); 665 get_dmac_tx(port) + EDAL, card);
799 666
800 sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */ 667 sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
@@ -810,40 +677,29 @@ static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
810} 677}
811 678
812 679
813
814#ifdef NEED_DETECT_RAM 680#ifdef NEED_DETECT_RAM
815static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize) 681static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
682 u32 ramsize)
816{ 683{
817 /* Round RAM size to 32 bits, fill from end to start */ 684 /* Round RAM size to 32 bits, fill from end to start */
818 u32 i = ramsize &= ~3; 685 u32 i = ramsize &= ~3;
819
820#ifndef ALL_PAGES_ALWAYS_MAPPED
821 u32 size = winsize(card); 686 u32 size = winsize(card);
822 687
823 openwin(card, (i - 4) / size); /* select last window */ 688 openwin(card, (i - 4) / size); /* select last window */
824#endif 689
825 do { 690 do {
826 i -= 4; 691 i -= 4;
827#ifndef ALL_PAGES_ALWAYS_MAPPED
828 if ((i + 4) % size == 0) 692 if ((i + 4) % size == 0)
829 openwin(card, i / size); 693 openwin(card, i / size);
830 writel(i ^ 0x12345678, rambase + i % size); 694 writel(i ^ 0x12345678, rambase + i % size);
831#else 695 } while (i > 0);
832 writel(i ^ 0x12345678, rambase + i);
833#endif
834 }while (i > 0);
835 696
836 for (i = 0; i < ramsize ; i += 4) { 697 for (i = 0; i < ramsize ; i += 4) {
837#ifndef ALL_PAGES_ALWAYS_MAPPED
838 if (i % size == 0) 698 if (i % size == 0)
839 openwin(card, i / size); 699 openwin(card, i / size);
840 700
841 if (readl(rambase + i % size) != (i ^ 0x12345678)) 701 if (readl(rambase + i % size) != (i ^ 0x12345678))
842 break; 702 break;
843#else
844 if (readl(rambase + i) != (i ^ 0x12345678))
845 break;
846#endif
847 } 703 }
848 704
849 return i; 705 return i;
@@ -851,7 +707,6 @@ static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsi
851#endif /* NEED_DETECT_RAM */ 707#endif /* NEED_DETECT_RAM */
852 708
853 709
854
855static void __devinit sca_init(card_t *card, int wait_states) 710static void __devinit sca_init(card_t *card, int wait_states)
856{ 711{
857 sca_out(wait_states, WCRL, card); /* Wait Control */ 712 sca_out(wait_states, WCRL, card); /* Wait Control */
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
new file mode 100644
index 000000000000..08b3536944fe
--- /dev/null
+++ b/drivers/net/wan/hd64572.c
@@ -0,0 +1,640 @@
1/*
2 * Hitachi (now Renesas) SCA-II HD64572 driver for Linux
3 *
4 * Copyright (C) 1998-2008 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * Source of information: HD64572 SCA-II User's Manual
11 *
12 * We use the following SCA memory map:
13 *
14 * Packet buffer descriptor rings - starting from card->rambase:
15 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
16 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
17 * rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
18 * tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
19 *
20 * Packet data buffers - starting from card->rambase + buff_offset:
21 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers
22 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers
23 * rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used)
24 * tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
25 */
26
27#include <linux/bitops.h>
28#include <linux/errno.h>
29#include <linux/fcntl.h>
30#include <linux/hdlc.h>
31#include <linux/in.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/ioport.h>
35#include <linux/jiffies.h>
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/netdevice.h>
39#include <linux/skbuff.h>
40#include <linux/slab.h>
41#include <linux/string.h>
42#include <linux/types.h>
43#include <asm/io.h>
44#include <asm/system.h>
45#include <asm/uaccess.h>
46#include "hd64572.h"
47
48#define NAPI_WEIGHT 16
49
50#define get_msci(port) (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET)
51#define get_dmac_rx(port) (port->chan ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
52#define get_dmac_tx(port) (port->chan ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
53
54#define sca_in(reg, card) readb(card->scabase + (reg))
55#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
56#define sca_inw(reg, card) readw(card->scabase + (reg))
57#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
58#define sca_inl(reg, card) readl(card->scabase + (reg))
59#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
60
61static int sca_poll(struct napi_struct *napi, int budget);
62
63static inline port_t* dev_to_port(struct net_device *dev)
64{
65 return dev_to_hdlc(dev)->priv;
66}
67
68static inline void enable_intr(port_t *port)
69{
70 /* enable DMIB and MSCI RXINTA interrupts */
71 sca_outl(sca_inl(IER0, port->card) |
72 (port->chan ? 0x08002200 : 0x00080022), IER0, port->card);
73}
74
75static inline void disable_intr(port_t *port)
76{
77 sca_outl(sca_inl(IER0, port->card) &
78 (port->chan ? 0x00FF00FF : 0xFF00FF00), IER0, port->card);
79}
80
81static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
82{
83 u16 rx_buffs = port->card->rx_ring_buffers;
84 u16 tx_buffs = port->card->tx_ring_buffers;
85
86 desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
87 return port->chan * (rx_buffs + tx_buffs) + transmit * rx_buffs + desc;
88}
89
90
91static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
92{
93 /* Descriptor offset always fits in 16 bits */
94 return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
95}
96
97
98static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
99 int transmit)
100{
101 return (pkt_desc __iomem *)(port->card->rambase +
102 desc_offset(port, desc, transmit));
103}
104
105
106static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
107{
108 return port->card->buff_offset +
109 desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
110}
111
112
113static inline void sca_set_carrier(port_t *port)
114{
115 if (!(sca_in(get_msci(port) + ST3, port->card) & ST3_DCD)) {
116#ifdef DEBUG_LINK
117 printk(KERN_DEBUG "%s: sca_set_carrier on\n",
118 port->netdev.name);
119#endif
120 netif_carrier_on(port->netdev);
121 } else {
122#ifdef DEBUG_LINK
123 printk(KERN_DEBUG "%s: sca_set_carrier off\n",
124 port->netdev.name);
125#endif
126 netif_carrier_off(port->netdev);
127 }
128}
129
130
131static void sca_init_port(port_t *port)
132{
133 card_t *card = port->card;
134 u16 dmac_rx = get_dmac_rx(port), dmac_tx = get_dmac_tx(port);
135 int transmit, i;
136
137 port->rxin = 0;
138 port->txin = 0;
139 port->txlast = 0;
140
141 for (transmit = 0; transmit < 2; transmit++) {
142 u16 buffs = transmit ? card->tx_ring_buffers
143 : card->rx_ring_buffers;
144
145 for (i = 0; i < buffs; i++) {
146 pkt_desc __iomem *desc = desc_address(port, i, transmit);
147 u16 chain_off = desc_offset(port, i + 1, transmit);
148 u32 buff_off = buffer_offset(port, i, transmit);
149
150 writel(chain_off, &desc->cp);
151 writel(buff_off, &desc->bp);
152 writew(0, &desc->len);
153 writeb(0, &desc->stat);
154 }
155 }
156
157 /* DMA disable - to halt state */
158 sca_out(0, DSR_RX(port->chan), card);
159 sca_out(0, DSR_TX(port->chan), card);
160
161 /* software ABORT - to initial state */
162 sca_out(DCR_ABORT, DCR_RX(port->chan), card);
163 sca_out(DCR_ABORT, DCR_TX(port->chan), card);
164
165 /* current desc addr */
166 sca_outl(desc_offset(port, 0, 0), dmac_rx + CDAL, card);
167 sca_outl(desc_offset(port, card->tx_ring_buffers - 1, 0),
168 dmac_rx + EDAL, card);
169 sca_outl(desc_offset(port, 0, 1), dmac_tx + CDAL, card);
170 sca_outl(desc_offset(port, 0, 1), dmac_tx + EDAL, card);
171
172 /* clear frame end interrupt counter */
173 sca_out(DCR_CLEAR_EOF, DCR_RX(port->chan), card);
174 sca_out(DCR_CLEAR_EOF, DCR_TX(port->chan), card);
175
176 /* Receive */
177 sca_outw(HDLC_MAX_MRU, dmac_rx + BFLL, card); /* set buffer length */
178 sca_out(0x14, DMR_RX(port->chan), card); /* Chain mode, Multi-frame */
179 sca_out(DIR_EOME, DIR_RX(port->chan), card); /* enable interrupts */
180 sca_out(DSR_DE, DSR_RX(port->chan), card); /* DMA enable */
181
182 /* Transmit */
183 sca_out(0x14, DMR_TX(port->chan), card); /* Chain mode, Multi-frame */
184 sca_out(DIR_EOME, DIR_TX(port->chan), card); /* enable interrupts */
185
186 sca_set_carrier(port);
187 netif_napi_add(port->netdev, &port->napi, sca_poll, NAPI_WEIGHT);
188}
189
190
191/* MSCI interrupt service */
192static inline void sca_msci_intr(port_t *port)
193{
194 u16 msci = get_msci(port);
195 card_t* card = port->card;
196
197 if (sca_in(msci + ST1, card) & ST1_CDCD) {
198 /* Reset MSCI CDCD status bit */
199 sca_out(ST1_CDCD, msci + ST1, card);
200 sca_set_carrier(port);
201 }
202}
203
204
205static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
206 u16 rxin)
207{
208 struct net_device *dev = port->netdev;
209 struct sk_buff *skb;
210 u16 len;
211 u32 buff;
212
213 len = readw(&desc->len);
214 skb = dev_alloc_skb(len);
215 if (!skb) {
216 dev->stats.rx_dropped++;
217 return;
218 }
219
220 buff = buffer_offset(port, rxin, 0);
221 memcpy_fromio(skb->data, card->rambase + buff, len);
222
223 skb_put(skb, len);
224#ifdef DEBUG_PKT
225 printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
226 debug_frame(skb);
227#endif
228 dev->stats.rx_packets++;
229 dev->stats.rx_bytes += skb->len;
230 skb->protocol = hdlc_type_trans(skb, dev);
231 netif_receive_skb(skb);
232}
233
234
235/* Receive DMA service */
236static inline int sca_rx_done(port_t *port, int budget)
237{
238 struct net_device *dev = port->netdev;
239 u16 dmac = get_dmac_rx(port);
240 card_t *card = port->card;
241 u8 stat = sca_in(DSR_RX(port->chan), card); /* read DMA Status */
242 int received = 0;
243
244 /* Reset DSR status bits */
245 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
246 DSR_RX(port->chan), card);
247
248 if (stat & DSR_BOF)
249 /* Dropped one or more frames */
250 dev->stats.rx_over_errors++;
251
252 while (received < budget) {
253 u32 desc_off = desc_offset(port, port->rxin, 0);
254 pkt_desc __iomem *desc;
255 u32 cda = sca_inl(dmac + CDAL, card);
256
257 if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
258 break; /* No frame received */
259
260 desc = desc_address(port, port->rxin, 0);
261 stat = readb(&desc->stat);
262 if (!(stat & ST_RX_EOM))
263 port->rxpart = 1; /* partial frame received */
264 else if ((stat & ST_ERROR_MASK) || port->rxpart) {
265 dev->stats.rx_errors++;
266 if (stat & ST_RX_OVERRUN)
267 dev->stats.rx_fifo_errors++;
268 else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
269 ST_RX_RESBIT)) || port->rxpart)
270 dev->stats.rx_frame_errors++;
271 else if (stat & ST_RX_CRC)
272 dev->stats.rx_crc_errors++;
273 if (stat & ST_RX_EOM)
274 port->rxpart = 0; /* received last fragment */
275 } else {
276 sca_rx(card, port, desc, port->rxin);
277 received++;
278 }
279
280 /* Set new error descriptor address */
281 sca_outl(desc_off, dmac + EDAL, card);
282 port->rxin = (port->rxin + 1) % card->rx_ring_buffers;
283 }
284
285 /* make sure RX DMA is enabled */
286 sca_out(DSR_DE, DSR_RX(port->chan), card);
287 return received;
288}
289
290
291/* Transmit DMA service */
292static inline void sca_tx_done(port_t *port)
293{
294 struct net_device *dev = port->netdev;
295 card_t* card = port->card;
296 u8 stat;
297
298 spin_lock(&port->lock);
299
300 stat = sca_in(DSR_TX(port->chan), card); /* read DMA Status */
301
302 /* Reset DSR status bits */
303 sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
304 DSR_TX(port->chan), card);
305
306 while (1) {
307 pkt_desc __iomem *desc = desc_address(port, port->txlast, 1);
308 u8 stat = readb(&desc->stat);
309
310 if (!(stat & ST_TX_OWNRSHP))
311 break; /* not yet transmitted */
312 if (stat & ST_TX_UNDRRUN) {
313 dev->stats.tx_errors++;
314 dev->stats.tx_fifo_errors++;
315 } else {
316 dev->stats.tx_packets++;
317 dev->stats.tx_bytes += readw(&desc->len);
318 }
319 writeb(0, &desc->stat); /* Free descriptor */
320 port->txlast = (port->txlast + 1) % card->tx_ring_buffers;
321 }
322
323 netif_wake_queue(dev);
324 spin_unlock(&port->lock);
325}
326
327
328static int sca_poll(struct napi_struct *napi, int budget)
329{
330 port_t *port = container_of(napi, port_t, napi);
331 u32 isr0 = sca_inl(ISR0, port->card);
332 int received = 0;
333
334 if (isr0 & (port->chan ? 0x08000000 : 0x00080000))
335 sca_msci_intr(port);
336
337 if (isr0 & (port->chan ? 0x00002000 : 0x00000020))
338 sca_tx_done(port);
339
340 if (isr0 & (port->chan ? 0x00000200 : 0x00000002))
341 received = sca_rx_done(port, budget);
342
343 if (received < budget) {
344 netif_rx_complete(napi);
345 enable_intr(port);
346 }
347
348 return received;
349}
350
351static irqreturn_t sca_intr(int irq, void *dev_id)
352{
353 card_t *card = dev_id;
354 u32 isr0 = sca_inl(ISR0, card);
355 int i, handled = 0;
356
357 for (i = 0; i < 2; i++) {
358 port_t *port = get_port(card, i);
359 if (port && (isr0 & (i ? 0x08002200 : 0x00080022))) {
360 handled = 1;
361 disable_intr(port);
362 netif_rx_schedule(&port->napi);
363 }
364 }
365
366 return IRQ_RETVAL(handled);
367}
368
369
370static void sca_set_port(port_t *port)
371{
372 card_t* card = port->card;
373 u16 msci = get_msci(port);
374 u8 md2 = sca_in(msci + MD2, card);
375 unsigned int tmc, br = 10, brv = 1024;
376
377
378 if (port->settings.clock_rate > 0) {
379 /* Try lower br for better accuracy*/
380 do {
381 br--;
382 brv >>= 1; /* brv = 2^9 = 512 max in specs */
383
384 /* Baud Rate = CLOCK_BASE / TMC / 2^BR */
385 tmc = CLOCK_BASE / brv / port->settings.clock_rate;
386 }while (br > 1 && tmc <= 128);
387
388 if (tmc < 1) {
389 tmc = 1;
390 br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
391 brv = 1;
392 } else if (tmc > 255)
393 tmc = 256; /* tmc=0 means 256 - low baud rates */
394
395 port->settings.clock_rate = CLOCK_BASE / brv / tmc;
396 } else {
397 br = 9; /* Minimum clock rate */
398 tmc = 256; /* 8bit = 0 */
399 port->settings.clock_rate = CLOCK_BASE / (256 * 512);
400 }
401
402 port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
403 port->txs = (port->txs & ~CLK_BRG_MASK) | br;
404 port->tmc = tmc;
405
406 /* baud divisor - time constant*/
407 sca_out(port->tmc, msci + TMCR, card);
408 sca_out(port->tmc, msci + TMCT, card);
409
410 /* Set BRG bits */
411 sca_out(port->rxs, msci + RXS, card);
412 sca_out(port->txs, msci + TXS, card);
413
414 if (port->settings.loopback)
415 md2 |= MD2_LOOPBACK;
416 else
417 md2 &= ~MD2_LOOPBACK;
418
419 sca_out(md2, msci + MD2, card);
420
421}
422
423
424static void sca_open(struct net_device *dev)
425{
426 port_t *port = dev_to_port(dev);
427 card_t* card = port->card;
428 u16 msci = get_msci(port);
429 u8 md0, md2;
430
431 switch(port->encoding) {
432 case ENCODING_NRZ: md2 = MD2_NRZ; break;
433 case ENCODING_NRZI: md2 = MD2_NRZI; break;
434 case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
435 case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
436 default: md2 = MD2_MANCHESTER;
437 }
438
439 if (port->settings.loopback)
440 md2 |= MD2_LOOPBACK;
441
442 switch(port->parity) {
443 case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
444 case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
445 case PARITY_CRC32_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU32; break;
446 case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
447 default: md0 = MD0_HDLC | MD0_CRC_NONE;
448 }
449
450 sca_out(CMD_RESET, msci + CMD, card);
451 sca_out(md0, msci + MD0, card);
452 sca_out(0x00, msci + MD1, card); /* no address field check */
453 sca_out(md2, msci + MD2, card);
454 sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
455 /* Skip the rest of underrun frame */
456 sca_out(CTL_IDLE | CTL_URCT | CTL_URSKP, msci + CTL, card);
457 sca_out(0x0F, msci + RNR, card); /* +1=RX DMA activation condition */
458 sca_out(0x3C, msci + TFS, card); /* +1 = TX start */
459 sca_out(0x38, msci + TCR, card); /* =Critical TX DMA activ condition */
460 sca_out(0x38, msci + TNR0, card); /* =TX DMA activation condition */
461 sca_out(0x3F, msci + TNR1, card); /* +1=TX DMA deactivation condition*/
462
463/* We're using the following interrupts:
464 - RXINTA (DCD changes only)
465 - DMIB (EOM - single frame transfer complete)
466*/
467 sca_outl(IE0_RXINTA | IE0_CDCD, msci + IE0, card);
468
469 sca_out(port->tmc, msci + TMCR, card);
470 sca_out(port->tmc, msci + TMCT, card);
471 sca_out(port->rxs, msci + RXS, card);
472 sca_out(port->txs, msci + TXS, card);
473 sca_out(CMD_TX_ENABLE, msci + CMD, card);
474 sca_out(CMD_RX_ENABLE, msci + CMD, card);
475
476 sca_set_carrier(port);
477 enable_intr(port);
478 napi_enable(&port->napi);
479 netif_start_queue(dev);
480}
481
482
483static void sca_close(struct net_device *dev)
484{
485 port_t *port = dev_to_port(dev);
486
487 /* reset channel */
488 sca_out(CMD_RESET, get_msci(port) + CMD, port->card);
489 disable_intr(port);
490 napi_disable(&port->napi);
491 netif_stop_queue(dev);
492}
493
494
495static int sca_attach(struct net_device *dev, unsigned short encoding,
496 unsigned short parity)
497{
498 if (encoding != ENCODING_NRZ &&
499 encoding != ENCODING_NRZI &&
500 encoding != ENCODING_FM_MARK &&
501 encoding != ENCODING_FM_SPACE &&
502 encoding != ENCODING_MANCHESTER)
503 return -EINVAL;
504
505 if (parity != PARITY_NONE &&
506 parity != PARITY_CRC16_PR0 &&
507 parity != PARITY_CRC16_PR1 &&
508 parity != PARITY_CRC32_PR1_CCITT &&
509 parity != PARITY_CRC16_PR1_CCITT)
510 return -EINVAL;
511
512 dev_to_port(dev)->encoding = encoding;
513 dev_to_port(dev)->parity = parity;
514 return 0;
515}
516
517
518#ifdef DEBUG_RINGS
519static void sca_dump_rings(struct net_device *dev)
520{
521 port_t *port = dev_to_port(dev);
522 card_t *card = port->card;
523 u16 cnt;
524
525 printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
526 sca_inl(get_dmac_rx(port) + CDAL, card),
527 sca_inl(get_dmac_rx(port) + EDAL, card),
528 sca_in(DSR_RX(port->chan), card), port->rxin,
529 sca_in(DSR_RX(port->chan), card) & DSR_DE ? "" : "in");
530 for (cnt = 0; cnt < port->card->rx_ring_buffers; cnt++)
531 printk(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
532
533 printk("\n" KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
534 "last=%u %sactive",
535 sca_inl(get_dmac_tx(port) + CDAL, card),
536 sca_inl(get_dmac_tx(port) + EDAL, card),
537 sca_in(DSR_TX(port->chan), card), port->txin, port->txlast,
538 sca_in(DSR_TX(port->chan), card) & DSR_DE ? "" : "in");
539
540 for (cnt = 0; cnt < port->card->tx_ring_buffers; cnt++)
541 printk(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
542 printk("\n");
543
544 printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x,"
545 " ST: %02x %02x %02x %02x %02x, FST: %02x CST: %02x %02x\n",
546 sca_in(get_msci(port) + MD0, card),
547 sca_in(get_msci(port) + MD1, card),
548 sca_in(get_msci(port) + MD2, card),
549 sca_in(get_msci(port) + ST0, card),
550 sca_in(get_msci(port) + ST1, card),
551 sca_in(get_msci(port) + ST2, card),
552 sca_in(get_msci(port) + ST3, card),
553 sca_in(get_msci(port) + ST4, card),
554 sca_in(get_msci(port) + FST, card),
555 sca_in(get_msci(port) + CST0, card),
556 sca_in(get_msci(port) + CST1, card));
557
558 printk(KERN_DEBUG "ILAR: %02x ISR: %08x %08x\n", sca_in(ILAR, card),
559 sca_inl(ISR0, card), sca_inl(ISR1, card));
560}
561#endif /* DEBUG_RINGS */
562
563
564static int sca_xmit(struct sk_buff *skb, struct net_device *dev)
565{
566 port_t *port = dev_to_port(dev);
567 card_t *card = port->card;
568 pkt_desc __iomem *desc;
569 u32 buff, len;
570
571 spin_lock_irq(&port->lock);
572
573 desc = desc_address(port, port->txin + 1, 1);
574 BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
575
576#ifdef DEBUG_PKT
577 printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
578 debug_frame(skb);
579#endif
580
581 desc = desc_address(port, port->txin, 1);
582 buff = buffer_offset(port, port->txin, 1);
583 len = skb->len;
584 memcpy_toio(card->rambase + buff, skb->data, len);
585
586 writew(len, &desc->len);
587 writeb(ST_TX_EOM, &desc->stat);
588 dev->trans_start = jiffies;
589
590 port->txin = (port->txin + 1) % card->tx_ring_buffers;
591 sca_outl(desc_offset(port, port->txin, 1),
592 get_dmac_tx(port) + EDAL, card);
593
594 sca_out(DSR_DE, DSR_TX(port->chan), card); /* Enable TX DMA */
595
596 desc = desc_address(port, port->txin + 1, 1);
597 if (readb(&desc->stat)) /* allow 1 packet gap */
598 netif_stop_queue(dev);
599
600 spin_unlock_irq(&port->lock);
601
602 dev_kfree_skb(skb);
603 return 0;
604}
605
606
607static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
608 u32 ramsize)
609{
610 /* Round RAM size to 32 bits, fill from end to start */
611 u32 i = ramsize &= ~3;
612
613 do {
614 i -= 4;
615 writel(i ^ 0x12345678, rambase + i);
616 } while (i > 0);
617
618 for (i = 0; i < ramsize ; i += 4) {
619 if (readl(rambase + i) != (i ^ 0x12345678))
620 break;
621 }
622
623 return i;
624}
625
626
627static void __devinit sca_init(card_t *card, int wait_states)
628{
629 sca_out(wait_states, WCRL, card); /* Wait Control */
630 sca_out(wait_states, WCRM, card);
631 sca_out(wait_states, WCRH, card);
632
633 sca_out(0, DMER, card); /* DMA Master disable */
634 sca_out(0x03, PCR, card); /* DMA priority */
635 sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
636 sca_out(0, DSR_TX(0), card);
637 sca_out(0, DSR_RX(1), card);
638 sca_out(0, DSR_TX(1), card);
639 sca_out(DMER_DME, DMER, card); /* DMA Master enable */
640}
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index d3d5055741ad..f1ddd7c3459c 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -342,7 +342,7 @@ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
342 342
343static int pvc_open(struct net_device *dev) 343static int pvc_open(struct net_device *dev)
344{ 344{
345 pvc_device *pvc = dev->priv; 345 pvc_device *pvc = dev->ml_priv;
346 346
347 if ((pvc->frad->flags & IFF_UP) == 0) 347 if ((pvc->frad->flags & IFF_UP) == 0)
348 return -EIO; /* Frad must be UP in order to activate PVC */ 348 return -EIO; /* Frad must be UP in order to activate PVC */
@@ -362,7 +362,7 @@ static int pvc_open(struct net_device *dev)
362 362
363static int pvc_close(struct net_device *dev) 363static int pvc_close(struct net_device *dev)
364{ 364{
365 pvc_device *pvc = dev->priv; 365 pvc_device *pvc = dev->ml_priv;
366 366
367 if (--pvc->open_count == 0) { 367 if (--pvc->open_count == 0) {
368 hdlc_device *hdlc = dev_to_hdlc(pvc->frad); 368 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
@@ -381,7 +381,7 @@ static int pvc_close(struct net_device *dev)
381 381
382static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 382static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
383{ 383{
384 pvc_device *pvc = dev->priv; 384 pvc_device *pvc = dev->ml_priv;
385 fr_proto_pvc_info info; 385 fr_proto_pvc_info info;
386 386
387 if (ifr->ifr_settings.type == IF_GET_PROTO) { 387 if (ifr->ifr_settings.type == IF_GET_PROTO) {
@@ -409,7 +409,7 @@ static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
409 409
410static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) 410static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
411{ 411{
412 pvc_device *pvc = dev->priv; 412 pvc_device *pvc = dev->ml_priv;
413 413
414 if (pvc->state.active) { 414 if (pvc->state.active) {
415 if (dev->type == ARPHRD_ETHER) { 415 if (dev->type == ARPHRD_ETHER) {
@@ -1111,7 +1111,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1111 dev->change_mtu = pvc_change_mtu; 1111 dev->change_mtu = pvc_change_mtu;
1112 dev->mtu = HDLC_MAX_MTU; 1112 dev->mtu = HDLC_MAX_MTU;
1113 dev->tx_queue_len = 0; 1113 dev->tx_queue_len = 0;
1114 dev->priv = pvc; 1114 dev->ml_priv = pvc;
1115 1115
1116 result = dev_alloc_name(dev, dev->name); 1116 result = dev_alloc_name(dev, dev->name);
1117 if (result < 0) { 1117 if (result < 0) {
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 4efe9e6d32d5..57fe714c1c7f 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -2,7 +2,7 @@
2 * Generic HDLC support routines for Linux 2 * Generic HDLC support routines for Linux
3 * Point-to-point protocol support 3 * Point-to-point protocol support
4 * 4 *
5 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl> 5 * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License 8 * under the terms of version 2 of the GNU General Public License
@@ -18,87 +18,633 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/pkt_sched.h> 19#include <linux/pkt_sched.h>
20#include <linux/poll.h> 20#include <linux/poll.h>
21#include <linux/rtnetlink.h>
22#include <linux/skbuff.h> 21#include <linux/skbuff.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <net/syncppp.h> 23#include <linux/spinlock.h>
24
25#define DEBUG_CP 0 /* also bytes# to dump */
26#define DEBUG_STATE 0
27#define DEBUG_HARD_HEADER 0
28
29#define HDLC_ADDR_ALLSTATIONS 0xFF
30#define HDLC_CTRL_UI 0x03
31
32#define PID_LCP 0xC021
33#define PID_IP 0x0021
34#define PID_IPCP 0x8021
35#define PID_IPV6 0x0057
36#define PID_IPV6CP 0x8057
37
38enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT};
39enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ,
40 CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY,
41 LCP_DISC_REQ, CP_CODES};
42#if DEBUG_CP
43static const char *const code_names[CP_CODES] = {
44 "0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq",
45 "TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard"
46};
47static char debug_buffer[64 + 3 * DEBUG_CP];
48#endif
49
50enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5};
51
52struct hdlc_header {
53 u8 address;
54 u8 control;
55 __be16 protocol;
56};
57
58struct cp_header {
59 u8 code;
60 u8 id;
61 __be16 len;
62};
63
25 64
26struct ppp_state { 65struct proto {
27 struct ppp_device pppdev; 66 struct net_device *dev;
28 struct ppp_device *syncppp_ptr; 67 struct timer_list timer;
29 int (*old_change_mtu)(struct net_device *dev, int new_mtu); 68 unsigned long timeout;
69 u16 pid; /* protocol ID */
70 u8 state;
71 u8 cr_id; /* ID of last Configuration-Request */
72 u8 restart_counter;
30}; 73};
31 74
75struct ppp {
76 struct proto protos[IDX_COUNT];
77 spinlock_t lock;
78 unsigned long last_pong;
79 unsigned int req_timeout, cr_retries, term_retries;
80 unsigned int keepalive_interval, keepalive_timeout;
81 u8 seq; /* local sequence number for requests */
82 u8 echo_id; /* ID of last Echo-Request (LCP) */
83};
84
85enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED,
86 STATES, STATE_MASK = 0xF};
87enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA,
88 RUC, RXJ_GOOD, RXJ_BAD, EVENTS};
89enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100,
90 SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000};
91
92#if DEBUG_STATE
93static const char *const state_names[STATES] = {
94 "Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent",
95 "Opened"
96};
97static const char *const event_names[EVENTS] = {
98 "Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN",
99 "RTR", "RTA", "RUC", "RXJ+", "RXJ-"
100};
101#endif
102
103static struct sk_buff_head tx_queue; /* used when holding the spin lock */
104
32static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr); 105static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr);
33 106
107static inline struct ppp* get_ppp(struct net_device *dev)
108{
109 return (struct ppp *)dev_to_hdlc(dev)->state;
110}
34 111
35static inline struct ppp_state* state(hdlc_device *hdlc) 112static inline struct proto* get_proto(struct net_device *dev, u16 pid)
36{ 113{
37 return(struct ppp_state *)(hdlc->state); 114 struct ppp *ppp = get_ppp(dev);
115
116 switch (pid) {
117 case PID_LCP:
118 return &ppp->protos[IDX_LCP];
119 case PID_IPCP:
120 return &ppp->protos[IDX_IPCP];
121 case PID_IPV6CP:
122 return &ppp->protos[IDX_IPV6CP];
123 default:
124 return NULL;
125 }
38} 126}
39 127
128static inline const char* proto_name(u16 pid)
129{
130 switch (pid) {
131 case PID_LCP:
132 return "LCP";
133 case PID_IPCP:
134 return "IPCP";
135 case PID_IPV6CP:
136 return "IPV6CP";
137 default:
138 return NULL;
139 }
140}
40 141
41static int ppp_open(struct net_device *dev) 142static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
42{ 143{
43 hdlc_device *hdlc = dev_to_hdlc(dev); 144 struct hdlc_header *data = (struct hdlc_header*)skb->data;
44 int (*old_ioctl)(struct net_device *, struct ifreq *, int); 145
45 int result; 146 if (skb->len < sizeof(struct hdlc_header))
147 return htons(ETH_P_HDLC);
148 if (data->address != HDLC_ADDR_ALLSTATIONS ||
149 data->control != HDLC_CTRL_UI)
150 return htons(ETH_P_HDLC);
151
152 switch (data->protocol) {
153 case __constant_htons(PID_IP):
154 skb_pull(skb, sizeof(struct hdlc_header));
155 return htons(ETH_P_IP);
46 156
47 dev->ml_priv = &state(hdlc)->syncppp_ptr; 157 case __constant_htons(PID_IPV6):
48 state(hdlc)->syncppp_ptr = &state(hdlc)->pppdev; 158 skb_pull(skb, sizeof(struct hdlc_header));
49 state(hdlc)->pppdev.dev = dev; 159 return htons(ETH_P_IPV6);
50 160
51 old_ioctl = dev->do_ioctl; 161 default:
52 state(hdlc)->old_change_mtu = dev->change_mtu; 162 return htons(ETH_P_HDLC);
53 sppp_attach(&state(hdlc)->pppdev);
54 /* sppp_attach nukes them. We don't need syncppp's ioctl */
55 dev->do_ioctl = old_ioctl;
56 state(hdlc)->pppdev.sppp.pp_flags &= ~PP_CISCO;
57 dev->type = ARPHRD_PPP;
58 result = sppp_open(dev);
59 if (result) {
60 sppp_detach(dev);
61 return result;
62 } 163 }
164}
63 165
64 return 0; 166
167static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
168 u16 type, const void *daddr, const void *saddr,
169 unsigned int len)
170{
171 struct hdlc_header *data;
172#if DEBUG_HARD_HEADER
173 printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name);
174#endif
175
176 skb_push(skb, sizeof(struct hdlc_header));
177 data = (struct hdlc_header*)skb->data;
178
179 data->address = HDLC_ADDR_ALLSTATIONS;
180 data->control = HDLC_CTRL_UI;
181 switch (type) {
182 case ETH_P_IP:
183 data->protocol = htons(PID_IP);
184 break;
185 case ETH_P_IPV6:
186 data->protocol = htons(PID_IPV6);
187 break;
188 case PID_LCP:
189 case PID_IPCP:
190 case PID_IPV6CP:
191 data->protocol = htons(type);
192 break;
193 default: /* unknown protocol */
194 data->protocol = 0;
195 }
196 return sizeof(struct hdlc_header);
65} 197}
66 198
67 199
200static void ppp_tx_flush(void)
201{
202 struct sk_buff *skb;
203 while ((skb = skb_dequeue(&tx_queue)) != NULL)
204 dev_queue_xmit(skb);
205}
68 206
69static void ppp_close(struct net_device *dev) 207static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
208 u8 id, unsigned int len, const void *data)
70{ 209{
71 hdlc_device *hdlc = dev_to_hdlc(dev); 210 struct sk_buff *skb;
211 struct cp_header *cp;
212 unsigned int magic_len = 0;
213 static u32 magic;
214
215#if DEBUG_CP
216 int i;
217 char *ptr;
218#endif
219
220 if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY))
221 magic_len = sizeof(magic);
222
223 skb = dev_alloc_skb(sizeof(struct hdlc_header) +
224 sizeof(struct cp_header) + magic_len + len);
225 if (!skb) {
226 printk(KERN_WARNING "%s: out of memory in ppp_tx_cp()\n",
227 dev->name);
228 return;
229 }
230 skb_reserve(skb, sizeof(struct hdlc_header));
231
232 cp = (struct cp_header *)skb_put(skb, sizeof(struct cp_header));
233 cp->code = code;
234 cp->id = id;
235 cp->len = htons(sizeof(struct cp_header) + magic_len + len);
236
237 if (magic_len)
238 memcpy(skb_put(skb, magic_len), &magic, magic_len);
239 if (len)
240 memcpy(skb_put(skb, len), data, len);
241
242#if DEBUG_CP
243 BUG_ON(code >= CP_CODES);
244 ptr = debug_buffer;
245 *ptr = '\x0';
246 for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) {
247 sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]);
248 ptr += strlen(ptr);
249 }
250 printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name,
251 proto_name(pid), code_names[code], id, debug_buffer);
252#endif
72 253
73 sppp_close(dev); 254 ppp_hard_header(skb, dev, pid, NULL, NULL, 0);
74 sppp_detach(dev);
75 255
76 dev->change_mtu = state(hdlc)->old_change_mtu; 256 skb->priority = TC_PRIO_CONTROL;
77 dev->mtu = HDLC_MAX_MTU; 257 skb->dev = dev;
78 dev->hard_header_len = 16; 258 skb_reset_network_header(skb);
259 skb_queue_tail(&tx_queue, skb);
79} 260}
80 261
81 262
263/* State transition table (compare STD-51)
264 Events Actions
265 TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count
266 TO- = Timeout with counter expired zrc = Zero-Restart-Count
267
268 RCR+ = Receive-Configure-Request (Good) scr = Send-Configure-Request
269 RCR- = Receive-Configure-Request (Bad)
270 RCA = Receive-Configure-Ack sca = Send-Configure-Ack
271 RCN = Receive-Configure-Nak/Rej scn = Send-Configure-Nak/Rej
272
273 RTR = Receive-Terminate-Request str = Send-Terminate-Request
274 RTA = Receive-Terminate-Ack sta = Send-Terminate-Ack
275
276 RUC = Receive-Unknown-Code scj = Send-Code-Reject
277 RXJ+ = Receive-Code-Reject (permitted)
278 or Receive-Protocol-Reject
279 RXJ- = Receive-Code-Reject (catastrophic)
280 or Receive-Protocol-Reject
281*/
282static int cp_table[EVENTS][STATES] = {
283 /* CLOSED STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED
284 0 1 2 3 4 5 6 */
285 {IRC|SCR|3, INV , INV , INV , INV , INV , INV }, /* START */
286 { INV , 0 , 0 , 0 , 0 , 0 , 0 }, /* STOP */
287 { INV , INV ,STR|2, SCR|3 ,SCR|3, SCR|5 , INV }, /* TO+ */
288 { INV , INV , 1 , 1 , 1 , 1 , INV }, /* TO- */
289 { STA|0 ,IRC|SCR|SCA|5, 2 , SCA|5 ,SCA|6, SCA|5 ,SCR|SCA|5}, /* RCR+ */
290 { STA|0 ,IRC|SCR|SCN|3, 2 , SCN|3 ,SCN|4, SCN|3 ,SCR|SCN|3}, /* RCR- */
291 { STA|0 , STA|1 , 2 , IRC|4 ,SCR|3, 6 , SCR|3 }, /* RCA */
292 { STA|0 , STA|1 , 2 ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3 }, /* RCN */
293 { STA|0 , STA|1 ,STA|2, STA|3 ,STA|3, STA|3 ,ZRC|STA|2}, /* RTR */
294 { 0 , 1 , 1 , 3 , 3 , 5 , SCR|3 }, /* RTA */
295 { SCJ|0 , SCJ|1 ,SCJ|2, SCJ|3 ,SCJ|4, SCJ|5 , SCJ|6 }, /* RUC */
296 { 0 , 1 , 2 , 3 , 3 , 5 , 6 }, /* RXJ+ */
297 { 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */
298};
299
82 300
83static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev) 301/* SCA: RCR+ must supply id, len and data
302 SCN: RCR- must supply code, id, len and data
303 STA: RTR must supply id
304 SCJ: RUC must supply CP packet len and data */
305static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
306 u8 id, unsigned int len, const void *data)
84{ 307{
85 return __constant_htons(ETH_P_WAN_PPP); 308 int old_state, action;
309 struct ppp *ppp = get_ppp(dev);
310 struct proto *proto = get_proto(dev, pid);
311
312 old_state = proto->state;
313 BUG_ON(old_state >= STATES);
314 BUG_ON(event >= EVENTS);
315
316#if DEBUG_STATE
317 printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name,
318 proto_name(pid), event_names[event], state_names[proto->state]);
319#endif
320
321 action = cp_table[event][old_state];
322
323 proto->state = action & STATE_MASK;
324 if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */
325 mod_timer(&proto->timer, proto->timeout =
326 jiffies + ppp->req_timeout * HZ);
327 if (action & ZRC)
328 proto->restart_counter = 0;
329 if (action & IRC)
330 proto->restart_counter = (proto->state == STOPPING) ?
331 ppp->term_retries : ppp->cr_retries;
332
333 if (action & SCR) /* send Configure-Request */
334 ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq,
335 0, NULL);
336 if (action & SCA) /* send Configure-Ack */
337 ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data);
338 if (action & SCN) /* send Configure-Nak/Reject */
339 ppp_tx_cp(dev, pid, code, id, len, data);
340 if (action & STR) /* send Terminate-Request */
341 ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL);
342 if (action & STA) /* send Terminate-Ack */
343 ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL);
344 if (action & SCJ) /* send Code-Reject */
345 ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);
346
347 if (old_state != OPENED && proto->state == OPENED) {
348 printk(KERN_INFO "%s: %s up\n", dev->name, proto_name(pid));
349 if (pid == PID_LCP) {
350 netif_dormant_off(dev);
351 ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
352 ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL);
353 ppp->last_pong = jiffies;
354 mod_timer(&proto->timer, proto->timeout =
355 jiffies + ppp->keepalive_interval * HZ);
356 }
357 }
358 if (old_state == OPENED && proto->state != OPENED) {
359 printk(KERN_INFO "%s: %s down\n", dev->name, proto_name(pid));
360 if (pid == PID_LCP) {
361 netif_dormant_on(dev);
362 ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
363 ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL);
364 }
365 }
366 if (old_state != CLOSED && proto->state == CLOSED)
367 del_timer(&proto->timer);
368
369#if DEBUG_STATE
370 printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
371 proto_name(pid), event_names[event], state_names[proto->state]);
372#endif
373}
374
375
376static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
377 unsigned int req_len, const u8 *data)
378{
379 static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 };
380 const u8 *opt;
381 u8 *out;
382 unsigned int len = req_len, nak_len = 0, rej_len = 0;
383
384 if (!(out = kmalloc(len, GFP_ATOMIC))) {
385 dev->stats.rx_dropped++;
386 return; /* out of memory, ignore CR packet */
387 }
388
389 for (opt = data; len; len -= opt[1], opt += opt[1]) {
390 if (len < 2 || len < opt[1]) {
391 dev->stats.rx_errors++;
392 return; /* bad packet, drop silently */
393 }
394
395 if (pid == PID_LCP)
396 switch (opt[0]) {
397 case LCP_OPTION_MRU:
398 continue; /* MRU always OK and > 1500 bytes? */
399
400 case LCP_OPTION_ACCM: /* async control character map */
401 if (!memcmp(opt, valid_accm,
402 sizeof(valid_accm)))
403 continue;
404 if (!rej_len) { /* NAK it */
405 memcpy(out + nak_len, valid_accm,
406 sizeof(valid_accm));
407 nak_len += sizeof(valid_accm);
408 continue;
409 }
410 break;
411 case LCP_OPTION_MAGIC:
412 if (opt[1] != 6 || (!opt[2] && !opt[3] &&
413 !opt[4] && !opt[5]))
414 break; /* reject invalid magic number */
415 continue;
416 }
417 /* reject this option */
418 memcpy(out + rej_len, opt, opt[1]);
419 rej_len += opt[1];
420 }
421
422 if (rej_len)
423 ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out);
424 else if (nak_len)
425 ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out);
426 else
427 ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
428
429 kfree(out);
430}
431
432static int ppp_rx(struct sk_buff *skb)
433{
434 struct hdlc_header *hdr = (struct hdlc_header*)skb->data;
435 struct net_device *dev = skb->dev;
436 struct ppp *ppp = get_ppp(dev);
437 struct proto *proto;
438 struct cp_header *cp;
439 unsigned long flags;
440 unsigned int len;
441 u16 pid;
442#if DEBUG_CP
443 int i;
444 char *ptr;
445#endif
446
447 spin_lock_irqsave(&ppp->lock, flags);
448 /* Check HDLC header */
449 if (skb->len < sizeof(struct hdlc_header))
450 goto rx_error;
451 cp = (struct cp_header*)skb_pull(skb, sizeof(struct hdlc_header));
452 if (hdr->address != HDLC_ADDR_ALLSTATIONS ||
453 hdr->control != HDLC_CTRL_UI)
454 goto rx_error;
455
456 pid = ntohs(hdr->protocol);
457 proto = get_proto(dev, pid);
458 if (!proto) {
459 if (ppp->protos[IDX_LCP].state == OPENED)
460 ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ,
461 ++ppp->seq, skb->len + 2, &hdr->protocol);
462 goto rx_error;
463 }
464
465 len = ntohs(cp->len);
466 if (len < sizeof(struct cp_header) /* no complete CP header? */ ||
467 skb->len < len /* truncated packet? */)
468 goto rx_error;
469 skb_pull(skb, sizeof(struct cp_header));
470 len -= sizeof(struct cp_header);
471
472 /* HDLC and CP headers stripped from skb */
473#if DEBUG_CP
474 if (cp->code < CP_CODES)
475 sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code],
476 cp->id);
477 else
478 sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id);
479 ptr = debug_buffer + strlen(debug_buffer);
480 for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) {
481 sprintf(ptr, " %02X", skb->data[i]);
482 ptr += strlen(ptr);
483 }
484 printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid),
485 debug_buffer);
486#endif
487
488 /* LCP only */
489 if (pid == PID_LCP)
490 switch (cp->code) {
491 case LCP_PROTO_REJ:
492 pid = ntohs(*(__be16*)skb->data);
493 if (pid == PID_LCP || pid == PID_IPCP ||
494 pid == PID_IPV6CP)
495 ppp_cp_event(dev, pid, RXJ_BAD, 0, 0,
496 0, NULL);
497 goto out;
498
499 case LCP_ECHO_REQ: /* send Echo-Reply */
500 if (len >= 4 && proto->state == OPENED)
501 ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY,
502 cp->id, len - 4, skb->data + 4);
503 goto out;
504
505 case LCP_ECHO_REPLY:
506 if (cp->id == ppp->echo_id)
507 ppp->last_pong = jiffies;
508 goto out;
509
510 case LCP_DISC_REQ: /* discard */
511 goto out;
512 }
513
514 /* LCP, IPCP and IPV6CP */
515 switch (cp->code) {
516 case CP_CONF_REQ:
517 ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data);
518 goto out;
519
520 case CP_CONF_ACK:
521 if (cp->id == proto->cr_id)
522 ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL);
523 goto out;
524
525 case CP_CONF_REJ:
526 case CP_CONF_NAK:
527 if (cp->id == proto->cr_id)
528 ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL);
529 goto out;
530
531 case CP_TERM_REQ:
532 ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL);
533 goto out;
534
535 case CP_TERM_ACK:
536 ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL);
537 goto out;
538
539 case CP_CODE_REJ:
540 ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL);
541 goto out;
542
543 default:
544 len += sizeof(struct cp_header);
545 if (len > dev->mtu)
546 len = dev->mtu;
547 ppp_cp_event(dev, pid, RUC, 0, 0, len, cp);
548 goto out;
549 }
550 goto out;
551
552rx_error:
553 dev->stats.rx_errors++;
554out:
555 spin_unlock_irqrestore(&ppp->lock, flags);
556 dev_kfree_skb_any(skb);
557 ppp_tx_flush();
558 return NET_RX_DROP;
86} 559}
87 560
88 561
562static void ppp_timer(unsigned long arg)
563{
564 struct proto *proto = (struct proto *)arg;
565 struct ppp *ppp = get_ppp(proto->dev);
566 unsigned long flags;
567
568 spin_lock_irqsave(&ppp->lock, flags);
569 switch (proto->state) {
570 case STOPPING:
571 case REQ_SENT:
572 case ACK_RECV:
573 case ACK_SENT:
574 if (proto->restart_counter) {
575 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
576 0, NULL);
577 proto->restart_counter--;
578 } else
579 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
580 0, NULL);
581 break;
582
583 case OPENED:
584 if (proto->pid != PID_LCP)
585 break;
586 if (time_after(jiffies, ppp->last_pong +
587 ppp->keepalive_timeout * HZ)) {
588 printk(KERN_INFO "%s: Link down\n", proto->dev->name);
589 ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL);
590 ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL);
591 } else { /* send keep-alive packet */
592 ppp->echo_id = ++ppp->seq;
593 ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ,
594 ppp->echo_id, 0, NULL);
595 proto->timer.expires = jiffies +
596 ppp->keepalive_interval * HZ;
597 add_timer(&proto->timer);
598 }
599 break;
600 }
601 spin_unlock_irqrestore(&ppp->lock, flags);
602 ppp_tx_flush();
603}
604
605
606static void ppp_start(struct net_device *dev)
607{
608 struct ppp *ppp = get_ppp(dev);
609 int i;
610
611 for (i = 0; i < IDX_COUNT; i++) {
612 struct proto *proto = &ppp->protos[i];
613 proto->dev = dev;
614 init_timer(&proto->timer);
615 proto->timer.function = ppp_timer;
616 proto->timer.data = (unsigned long)proto;
617 proto->state = CLOSED;
618 }
619 ppp->protos[IDX_LCP].pid = PID_LCP;
620 ppp->protos[IDX_IPCP].pid = PID_IPCP;
621 ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP;
622
623 ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL);
624}
625
626static void ppp_stop(struct net_device *dev)
627{
628 ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
629}
89 630
90static struct hdlc_proto proto = { 631static struct hdlc_proto proto = {
91 .open = ppp_open, 632 .start = ppp_start,
92 .close = ppp_close, 633 .stop = ppp_stop,
93 .type_trans = ppp_type_trans, 634 .type_trans = ppp_type_trans,
94 .ioctl = ppp_ioctl, 635 .ioctl = ppp_ioctl,
636 .netif_rx = ppp_rx,
95 .module = THIS_MODULE, 637 .module = THIS_MODULE,
96}; 638};
97 639
640static const struct header_ops ppp_header_ops = {
641 .create = ppp_hard_header,
642};
98 643
99static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) 644static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
100{ 645{
101 hdlc_device *hdlc = dev_to_hdlc(dev); 646 hdlc_device *hdlc = dev_to_hdlc(dev);
647 struct ppp *ppp;
102 int result; 648 int result;
103 649
104 switch (ifr->ifr_settings.type) { 650 switch (ifr->ifr_settings.type) {
@@ -109,25 +655,35 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
109 return 0; /* return protocol only, no settable parameters */ 655 return 0; /* return protocol only, no settable parameters */
110 656
111 case IF_PROTO_PPP: 657 case IF_PROTO_PPP:
112 if(!capable(CAP_NET_ADMIN)) 658 if (!capable(CAP_NET_ADMIN))
113 return -EPERM; 659 return -EPERM;
114 660
115 if(dev->flags & IFF_UP) 661 if (dev->flags & IFF_UP)
116 return -EBUSY; 662 return -EBUSY;
117 663
118 /* no settable parameters */ 664 /* no settable parameters */
119 665
120 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 666 result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
121 if (result) 667 if (result)
122 return result; 668 return result;
123 669
124 result = attach_hdlc_protocol(dev, &proto, 670 result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp));
125 sizeof(struct ppp_state));
126 if (result) 671 if (result)
127 return result; 672 return result;
673
674 ppp = get_ppp(dev);
675 spin_lock_init(&ppp->lock);
676 ppp->req_timeout = 2;
677 ppp->cr_retries = 10;
678 ppp->term_retries = 2;
679 ppp->keepalive_interval = 10;
680 ppp->keepalive_timeout = 60;
681
128 dev->hard_start_xmit = hdlc->xmit; 682 dev->hard_start_xmit = hdlc->xmit;
683 dev->hard_header_len = sizeof(struct hdlc_header);
684 dev->header_ops = &ppp_header_ops;
129 dev->type = ARPHRD_PPP; 685 dev->type = ARPHRD_PPP;
130 netif_dormant_off(dev); 686 netif_dormant_on(dev);
131 return 0; 687 return 0;
132 } 688 }
133 689
@@ -137,12 +693,11 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
137 693
138static int __init mod_init(void) 694static int __init mod_init(void)
139{ 695{
696 skb_queue_head_init(&tx_queue);
140 register_hdlc_protocol(&proto); 697 register_hdlc_protocol(&proto);
141 return 0; 698 return 0;
142} 699}
143 700
144
145
146static void __exit mod_exit(void) 701static void __exit mod_exit(void)
147{ 702{
148 unregister_hdlc_protocol(&proto); 703 unregister_hdlc_protocol(&proto);
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index e299313f828a..af54f0cf1b35 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -66,7 +66,6 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb)
66 * it right now. 66 * it right now.
67 */ 67 */
68 netif_rx(skb); 68 netif_rx(skb);
69 c->netdevice->last_rx = jiffies;
70} 69}
71 70
72/* 71/*
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
new file mode 100644
index 000000000000..0c6802507a79
--- /dev/null
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -0,0 +1,1325 @@
1/*
2 * Intel IXP4xx HSS (synchronous serial port) driver for Linux
3 *
4 * Copyright (C) 2007-2008 Krzysztof Hałasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 */
10
11#include <linux/bitops.h>
12#include <linux/cdev.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/fs.h>
16#include <linux/hdlc.h>
17#include <linux/io.h>
18#include <linux/kernel.h>
19#include <linux/platform_device.h>
20#include <linux/poll.h>
21#include <mach/npe.h>
22#include <mach/qmgr.h>
23
24#define DEBUG_DESC 0
25#define DEBUG_RX 0
26#define DEBUG_TX 0
27#define DEBUG_PKT_BYTES 0
28#define DEBUG_CLOSE 0
29
30#define DRV_NAME "ixp4xx_hss"
31
32#define PKT_EXTRA_FLAGS 0 /* orig 1 */
33#define PKT_NUM_PIPES 1 /* 1, 2 or 4 */
34#define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */
35
36#define RX_DESCS 16 /* also length of all RX queues */
37#define TX_DESCS 16 /* also length of all TX queues */
38
39#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
40#define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */
41#define MAX_CLOSE_WAIT 1000 /* microseconds */
42#define HSS_COUNT 2
43#define FRAME_SIZE 256 /* doesn't matter at this point */
44#define FRAME_OFFSET 0
45#define MAX_CHANNELS (FRAME_SIZE / 8)
46
47#define NAPI_WEIGHT 16
48
49/* Queue IDs */
50#define HSS0_CHL_RXTRIG_QUEUE 12 /* orig size = 32 dwords */
51#define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */
52#define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */
53#define HSS0_PKT_TX1_QUEUE 15
54#define HSS0_PKT_TX2_QUEUE 16
55#define HSS0_PKT_TX3_QUEUE 17
56#define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */
57#define HSS0_PKT_RXFREE1_QUEUE 19
58#define HSS0_PKT_RXFREE2_QUEUE 20
59#define HSS0_PKT_RXFREE3_QUEUE 21
60#define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */
61
62#define HSS1_CHL_RXTRIG_QUEUE 10
63#define HSS1_PKT_RX_QUEUE 0
64#define HSS1_PKT_TX0_QUEUE 5
65#define HSS1_PKT_TX1_QUEUE 6
66#define HSS1_PKT_TX2_QUEUE 7
67#define HSS1_PKT_TX3_QUEUE 8
68#define HSS1_PKT_RXFREE0_QUEUE 1
69#define HSS1_PKT_RXFREE1_QUEUE 2
70#define HSS1_PKT_RXFREE2_QUEUE 3
71#define HSS1_PKT_RXFREE3_QUEUE 4
72#define HSS1_PKT_TXDONE_QUEUE 9
73
74#define NPE_PKT_MODE_HDLC 0
75#define NPE_PKT_MODE_RAW 1
76#define NPE_PKT_MODE_56KMODE 2
77#define NPE_PKT_MODE_56KENDIAN_MSB 4
78
79/* PKT_PIPE_HDLC_CFG_WRITE flags */
80#define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */
81#define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
82#define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
83
84
85/* hss_config, PCRs */
86/* Frame sync sampling, default = active low */
87#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
88#define PCR_FRM_SYNC_FALLINGEDGE 0x80000000
89#define PCR_FRM_SYNC_RISINGEDGE 0xC0000000
90
91/* Frame sync pin: input (default) or output generated off a given clk edge */
92#define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000
93#define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000
94
95/* Frame and data clock sampling on edge, default = falling */
96#define PCR_FCLK_EDGE_RISING 0x08000000
97#define PCR_DCLK_EDGE_RISING 0x04000000
98
99/* Clock direction, default = input */
100#define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000
101
102/* Generate/Receive frame pulses, default = enabled */
103#define PCR_FRM_PULSE_DISABLED 0x01000000
104
105 /* Data rate is full (default) or half the configured clk speed */
106#define PCR_HALF_CLK_RATE 0x00200000
107
108/* Invert data between NPE and HSS FIFOs? (default = no) */
109#define PCR_DATA_POLARITY_INVERT 0x00100000
110
111/* TX/RX endianness, default = LSB */
112#define PCR_MSB_ENDIAN 0x00080000
113
114/* Normal (default) / open drain mode (TX only) */
115#define PCR_TX_PINS_OPEN_DRAIN 0x00040000
116
117/* No framing bit transmitted and expected on RX? (default = framing bit) */
118#define PCR_SOF_NO_FBIT 0x00020000
119
120/* Drive data pins? */
121#define PCR_TX_DATA_ENABLE 0x00010000
122
123/* Voice 56k type: drive the data pins low (default), high, high Z */
124#define PCR_TX_V56K_HIGH 0x00002000
125#define PCR_TX_V56K_HIGH_IMP 0x00004000
126
127/* Unassigned type: drive the data pins low (default), high, high Z */
128#define PCR_TX_UNASS_HIGH 0x00000800
129#define PCR_TX_UNASS_HIGH_IMP 0x00001000
130
131/* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
132#define PCR_TX_FB_HIGH_IMP 0x00000400
133
134/* 56k data endiannes - which bit unused: high (default) or low */
135#define PCR_TX_56KE_BIT_0_UNUSED 0x00000200
136
137/* 56k data transmission type: 32/8 bit data (default) or 56K data */
138#define PCR_TX_56KS_56K_DATA 0x00000100
139
140/* hss_config, cCR */
141/* Number of packetized clients, default = 1 */
142#define CCR_NPE_HFIFO_2_HDLC 0x04000000
143#define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000
144
145/* default = no loopback */
146#define CCR_LOOPBACK 0x02000000
147
148/* HSS number, default = 0 (first) */
149#define CCR_SECOND_HSS 0x01000000
150
151
152/* hss_config, clkCR: main:10, num:10, denom:12 */
153#define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/
154
155#define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
156#define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
157#define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
158#define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
159#define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
160#define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
161
162#define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
163#define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
164#define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
165#define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
166#define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
167#define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
168
169
170/* hss_config, LUT entries */
171#define TDMMAP_UNASSIGNED 0
172#define TDMMAP_HDLC 1 /* HDLC - packetized */
173#define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */
174#define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */
175
176/* offsets into HSS config */
177#define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */
178#define HSS_CONFIG_RX_PCR 0x04
179#define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */
180#define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */
181#define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */
182#define HSS_CONFIG_RX_FCR 0x14
183#define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
184#define HSS_CONFIG_RX_LUT 0x38
185
186
187/* NPE command codes */
188/* writes the ConfigWord value to the location specified by offset */
189#define PORT_CONFIG_WRITE 0x40
190
191/* triggers the NPE to load the contents of the configuration table */
192#define PORT_CONFIG_LOAD 0x41
193
194/* triggers the NPE to return an HssErrorReadResponse message */
195#define PORT_ERROR_READ 0x42
196
197/* triggers the NPE to reset internal status and enable the HssPacketized
198 operation for the flow specified by pPipe */
199#define PKT_PIPE_FLOW_ENABLE 0x50
200#define PKT_PIPE_FLOW_DISABLE 0x51
201#define PKT_NUM_PIPES_WRITE 0x52
202#define PKT_PIPE_FIFO_SIZEW_WRITE 0x53
203#define PKT_PIPE_HDLC_CFG_WRITE 0x54
204#define PKT_PIPE_IDLE_PATTERN_WRITE 0x55
205#define PKT_PIPE_RX_SIZE_WRITE 0x56
206#define PKT_PIPE_MODE_WRITE 0x57
207
208/* HDLC packet status values - desc->status */
209#define ERR_SHUTDOWN 1 /* stop or shutdown occurrance */
210#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
211#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
212#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
213 this packet (if buf_len < pkt_len) */
214#define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
215#define ERR_HDLC_ABORT 6 /* abort sequence received */
216#define ERR_DISCONNECTING 7 /* disconnect is in progress */
217
218
219#ifdef __ARMEB__
220typedef struct sk_buff buffer_t;
221#define free_buffer dev_kfree_skb
222#define free_buffer_irq dev_kfree_skb_irq
223#else
224typedef void buffer_t;
225#define free_buffer kfree
226#define free_buffer_irq kfree
227#endif
228
229struct port {
230 struct device *dev;
231 struct npe *npe;
232 struct net_device *netdev;
233 struct napi_struct napi;
234 struct hss_plat_info *plat;
235 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
236 struct desc *desc_tab; /* coherent */
237 u32 desc_tab_phys;
238 unsigned int id;
239 unsigned int clock_type, clock_rate, loopback;
240 unsigned int initialized, carrier;
241 u8 hdlc_cfg;
242};
243
244/* NPE message structure */
245struct msg {
246#ifdef __ARMEB__
247 u8 cmd, unused, hss_port, index;
248 union {
249 struct { u8 data8a, data8b, data8c, data8d; };
250 struct { u16 data16a, data16b; };
251 struct { u32 data32; };
252 };
253#else
254 u8 index, hss_port, unused, cmd;
255 union {
256 struct { u8 data8d, data8c, data8b, data8a; };
257 struct { u16 data16b, data16a; };
258 struct { u32 data32; };
259 };
260#endif
261};
262
263/* HDLC packet descriptor */
264struct desc {
265 u32 next; /* pointer to next buffer, unused */
266
267#ifdef __ARMEB__
268 u16 buf_len; /* buffer length */
269 u16 pkt_len; /* packet length */
270 u32 data; /* pointer to data buffer in RAM */
271 u8 status;
272 u8 error_count;
273 u16 __reserved;
274#else
275 u16 pkt_len; /* packet length */
276 u16 buf_len; /* buffer length */
277 u32 data; /* pointer to data buffer in RAM */
278 u16 __reserved;
279 u8 error_count;
280 u8 status;
281#endif
282 u32 __reserved1[4];
283};
284
285
286#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
287 (n) * sizeof(struct desc))
288#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
289
290#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
291 ((n) + RX_DESCS) * sizeof(struct desc))
292#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
293
294/*****************************************************************************
295 * global variables
296 ****************************************************************************/
297
298static int ports_open;
299static struct dma_pool *dma_pool;
300static spinlock_t npe_lock;
301
302static const struct {
303 int tx, txdone, rx, rxfree;
304}queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
305 HSS0_PKT_RXFREE0_QUEUE},
306 {HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
307 HSS1_PKT_RXFREE0_QUEUE},
308};
309
310/*****************************************************************************
311 * utility functions
312 ****************************************************************************/
313
314static inline struct port* dev_to_port(struct net_device *dev)
315{
316 return dev_to_hdlc(dev)->priv;
317}
318
319#ifndef __ARMEB__
320static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
321{
322 int i;
323 for (i = 0; i < cnt; i++)
324 dest[i] = swab32(src[i]);
325}
326#endif
327
328/*****************************************************************************
329 * HSS access
330 ****************************************************************************/
331
332static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
333{
334 u32 *val = (u32*)msg;
335 if (npe_send_message(port->npe, msg, what)) {
336 printk(KERN_CRIT "HSS-%i: unable to send command [%08X:%08X]"
337 " to %s\n", port->id, val[0], val[1],
338 npe_name(port->npe));
339 BUG();
340 }
341}
342
343static void hss_config_set_lut(struct port *port)
344{
345 struct msg msg;
346 int ch;
347
348 memset(&msg, 0, sizeof(msg));
349 msg.cmd = PORT_CONFIG_WRITE;
350 msg.hss_port = port->id;
351
352 for (ch = 0; ch < MAX_CHANNELS; ch++) {
353 msg.data32 >>= 2;
354 msg.data32 |= TDMMAP_HDLC << 30;
355
356 if (ch % 16 == 15) {
357 msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3);
358 hss_npe_send(port, &msg, "HSS_SET_TX_LUT");
359
360 msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT;
361 hss_npe_send(port, &msg, "HSS_SET_RX_LUT");
362 }
363 }
364}
365
366static void hss_config(struct port *port)
367{
368 struct msg msg;
369
370 memset(&msg, 0, sizeof(msg));
371 msg.cmd = PORT_CONFIG_WRITE;
372 msg.hss_port = port->id;
373 msg.index = HSS_CONFIG_TX_PCR;
374 msg.data32 = PCR_FRM_SYNC_OUTPUT_RISING | PCR_MSB_ENDIAN |
375 PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
376 if (port->clock_type == CLOCK_INT)
377 msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
378 hss_npe_send(port, &msg, "HSS_SET_TX_PCR");
379
380 msg.index = HSS_CONFIG_RX_PCR;
381 msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING;
382 hss_npe_send(port, &msg, "HSS_SET_RX_PCR");
383
384 memset(&msg, 0, sizeof(msg));
385 msg.cmd = PORT_CONFIG_WRITE;
386 msg.hss_port = port->id;
387 msg.index = HSS_CONFIG_CORE_CR;
388 msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) |
389 (port->id ? CCR_SECOND_HSS : 0);
390 hss_npe_send(port, &msg, "HSS_SET_CORE_CR");
391
392 memset(&msg, 0, sizeof(msg));
393 msg.cmd = PORT_CONFIG_WRITE;
394 msg.hss_port = port->id;
395 msg.index = HSS_CONFIG_CLOCK_CR;
396 msg.data32 = CLK42X_SPEED_2048KHZ /* FIXME */;
397 hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR");
398
399 memset(&msg, 0, sizeof(msg));
400 msg.cmd = PORT_CONFIG_WRITE;
401 msg.hss_port = port->id;
402 msg.index = HSS_CONFIG_TX_FCR;
403 msg.data16a = FRAME_OFFSET;
404 msg.data16b = FRAME_SIZE - 1;
405 hss_npe_send(port, &msg, "HSS_SET_TX_FCR");
406
407 memset(&msg, 0, sizeof(msg));
408 msg.cmd = PORT_CONFIG_WRITE;
409 msg.hss_port = port->id;
410 msg.index = HSS_CONFIG_RX_FCR;
411 msg.data16a = FRAME_OFFSET;
412 msg.data16b = FRAME_SIZE - 1;
413 hss_npe_send(port, &msg, "HSS_SET_RX_FCR");
414
415 hss_config_set_lut(port);
416
417 memset(&msg, 0, sizeof(msg));
418 msg.cmd = PORT_CONFIG_LOAD;
419 msg.hss_port = port->id;
420 hss_npe_send(port, &msg, "HSS_LOAD_CONFIG");
421
422 if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") ||
423 /* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
424 msg.cmd != PORT_CONFIG_LOAD || msg.data32) {
425 printk(KERN_CRIT "HSS-%i: HSS_LOAD_CONFIG failed\n",
426 port->id);
427 BUG();
428 }
429
430 /* HDLC may stop working without this - check FIXME */
431 npe_recv_message(port->npe, &msg, "FLUSH_IT");
432}
433
434static void hss_set_hdlc_cfg(struct port *port)
435{
436 struct msg msg;
437
438 memset(&msg, 0, sizeof(msg));
439 msg.cmd = PKT_PIPE_HDLC_CFG_WRITE;
440 msg.hss_port = port->id;
441 msg.data8a = port->hdlc_cfg; /* rx_cfg */
442 msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
443 hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG");
444}
445
446static u32 hss_get_status(struct port *port)
447{
448 struct msg msg;
449
450 memset(&msg, 0, sizeof(msg));
451 msg.cmd = PORT_ERROR_READ;
452 msg.hss_port = port->id;
453 hss_npe_send(port, &msg, "PORT_ERROR_READ");
454 if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) {
455 printk(KERN_CRIT "HSS-%i: unable to read HSS status\n",
456 port->id);
457 BUG();
458 }
459
460 return msg.data32;
461}
462
463static void hss_start_hdlc(struct port *port)
464{
465 struct msg msg;
466
467 memset(&msg, 0, sizeof(msg));
468 msg.cmd = PKT_PIPE_FLOW_ENABLE;
469 msg.hss_port = port->id;
470 msg.data32 = 0;
471 hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE");
472}
473
474static void hss_stop_hdlc(struct port *port)
475{
476 struct msg msg;
477
478 memset(&msg, 0, sizeof(msg));
479 msg.cmd = PKT_PIPE_FLOW_DISABLE;
480 msg.hss_port = port->id;
481 hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE");
482 hss_get_status(port); /* make sure it's halted */
483}
484
485static int hss_load_firmware(struct port *port)
486{
487 struct msg msg;
488 int err;
489
490 if (port->initialized)
491 return 0;
492
493 if (!npe_running(port->npe) &&
494 (err = npe_load_firmware(port->npe, npe_name(port->npe),
495 port->dev)))
496 return err;
497
498 /* HDLC mode configuration */
499 memset(&msg, 0, sizeof(msg));
500 msg.cmd = PKT_NUM_PIPES_WRITE;
501 msg.hss_port = port->id;
502 msg.data8a = PKT_NUM_PIPES;
503 hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES");
504
505 msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE;
506 msg.data8a = PKT_PIPE_FIFO_SIZEW;
507 hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO");
508
509 msg.cmd = PKT_PIPE_MODE_WRITE;
510 msg.data8a = NPE_PKT_MODE_HDLC;
511 /* msg.data8b = inv_mask */
512 /* msg.data8c = or_mask */
513 hss_npe_send(port, &msg, "HSS_SET_PKT_MODE");
514
515 msg.cmd = PKT_PIPE_RX_SIZE_WRITE;
516 msg.data16a = HDLC_MAX_MRU; /* including CRC */
517 hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE");
518
519 msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE;
520 msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
521 hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
522
523 port->initialized = 1;
524 return 0;
525}
526
527/*****************************************************************************
528 * packetized (HDLC) operation
529 ****************************************************************************/
530
531static inline void debug_pkt(struct net_device *dev, const char *func,
532 u8 *data, int len)
533{
534#if DEBUG_PKT_BYTES
535 int i;
536
537 printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len);
538 for (i = 0; i < len; i++) {
539 if (i >= DEBUG_PKT_BYTES)
540 break;
541 printk("%s%02X", !(i % 4) ? " " : "", data[i]);
542 }
543 printk("\n");
544#endif
545}
546
547
548static inline void debug_desc(u32 phys, struct desc *desc)
549{
550#if DEBUG_DESC
551 printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n",
552 phys, desc->next, desc->buf_len, desc->pkt_len,
553 desc->data, desc->status, desc->error_count);
554#endif
555}
556
557static inline int queue_get_desc(unsigned int queue, struct port *port,
558 int is_tx)
559{
560 u32 phys, tab_phys, n_desc;
561 struct desc *tab;
562
563 if (!(phys = qmgr_get_entry(queue)))
564 return -1;
565
566 BUG_ON(phys & 0x1F);
567 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
568 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
569 n_desc = (phys - tab_phys) / sizeof(struct desc);
570 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
571 debug_desc(phys, &tab[n_desc]);
572 BUG_ON(tab[n_desc].next);
573 return n_desc;
574}
575
576static inline void queue_put_desc(unsigned int queue, u32 phys,
577 struct desc *desc)
578{
579 debug_desc(phys, desc);
580 BUG_ON(phys & 0x1F);
581 qmgr_put_entry(queue, phys);
582 BUG_ON(qmgr_stat_overflow(queue));
583}
584
585
586static inline void dma_unmap_tx(struct port *port, struct desc *desc)
587{
588#ifdef __ARMEB__
589 dma_unmap_single(&port->netdev->dev, desc->data,
590 desc->buf_len, DMA_TO_DEVICE);
591#else
592 dma_unmap_single(&port->netdev->dev, desc->data & ~3,
593 ALIGN((desc->data & 3) + desc->buf_len, 4),
594 DMA_TO_DEVICE);
595#endif
596}
597
598
599static void hss_hdlc_set_carrier(void *pdev, int carrier)
600{
601 struct net_device *netdev = pdev;
602 struct port *port = dev_to_port(netdev);
603 unsigned long flags;
604
605 spin_lock_irqsave(&npe_lock, flags);
606 port->carrier = carrier;
607 if (!port->loopback) {
608 if (carrier)
609 netif_carrier_on(netdev);
610 else
611 netif_carrier_off(netdev);
612 }
613 spin_unlock_irqrestore(&npe_lock, flags);
614}
615
616static void hss_hdlc_rx_irq(void *pdev)
617{
618 struct net_device *dev = pdev;
619 struct port *port = dev_to_port(dev);
620
621#if DEBUG_RX
622 printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
623#endif
624 qmgr_disable_irq(queue_ids[port->id].rx);
625 netif_rx_schedule(dev, &port->napi);
626}
627
628static int hss_hdlc_poll(struct napi_struct *napi, int budget)
629{
630 struct port *port = container_of(napi, struct port, napi);
631 struct net_device *dev = port->netdev;
632 unsigned int rxq = queue_ids[port->id].rx;
633 unsigned int rxfreeq = queue_ids[port->id].rxfree;
634 int received = 0;
635
636#if DEBUG_RX
637 printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
638#endif
639
640 while (received < budget) {
641 struct sk_buff *skb;
642 struct desc *desc;
643 int n;
644#ifdef __ARMEB__
645 struct sk_buff *temp;
646 u32 phys;
647#endif
648
649 if ((n = queue_get_desc(rxq, port, 0)) < 0) {
650#if DEBUG_RX
651 printk(KERN_DEBUG "%s: hss_hdlc_poll"
652 " netif_rx_complete\n", dev->name);
653#endif
654 netif_rx_complete(dev, napi);
655 qmgr_enable_irq(rxq);
656 if (!qmgr_stat_empty(rxq) &&
657 netif_rx_reschedule(dev, napi)) {
658#if DEBUG_RX
659 printk(KERN_DEBUG "%s: hss_hdlc_poll"
660 " netif_rx_reschedule succeeded\n",
661 dev->name);
662#endif
663 qmgr_disable_irq(rxq);
664 continue;
665 }
666#if DEBUG_RX
667 printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
668 dev->name);
669#endif
670 return received; /* all work done */
671 }
672
673 desc = rx_desc_ptr(port, n);
674#if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
675 if (desc->error_count)
676 printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
677 " errors %u\n", dev->name, desc->status,
678 desc->error_count);
679#endif
680 skb = NULL;
681 switch (desc->status) {
682 case 0:
683#ifdef __ARMEB__
684 if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
685 phys = dma_map_single(&dev->dev, skb->data,
686 RX_SIZE,
687 DMA_FROM_DEVICE);
688 if (dma_mapping_error(&dev->dev, phys)) {
689 dev_kfree_skb(skb);
690 skb = NULL;
691 }
692 }
693#else
694 skb = netdev_alloc_skb(dev, desc->pkt_len);
695#endif
696 if (!skb)
697 dev->stats.rx_dropped++;
698 break;
699 case ERR_HDLC_ALIGN:
700 case ERR_HDLC_ABORT:
701 dev->stats.rx_frame_errors++;
702 dev->stats.rx_errors++;
703 break;
704 case ERR_HDLC_FCS:
705 dev->stats.rx_crc_errors++;
706 dev->stats.rx_errors++;
707 break;
708 case ERR_HDLC_TOO_LONG:
709 dev->stats.rx_length_errors++;
710 dev->stats.rx_errors++;
711 break;
712 default: /* FIXME - remove printk */
713 printk(KERN_ERR "%s: hss_hdlc_poll: status 0x%02X"
714 " errors %u\n", dev->name, desc->status,
715 desc->error_count);
716 dev->stats.rx_errors++;
717 }
718
719 if (!skb) {
720 /* put the desc back on RX-ready queue */
721 desc->buf_len = RX_SIZE;
722 desc->pkt_len = desc->status = 0;
723 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
724 continue;
725 }
726
727 /* process received frame */
728#ifdef __ARMEB__
729 temp = skb;
730 skb = port->rx_buff_tab[n];
731 dma_unmap_single(&dev->dev, desc->data,
732 RX_SIZE, DMA_FROM_DEVICE);
733#else
734 dma_sync_single(&dev->dev, desc->data,
735 RX_SIZE, DMA_FROM_DEVICE);
736 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
737 ALIGN(desc->pkt_len, 4) / 4);
738#endif
739 skb_put(skb, desc->pkt_len);
740
741 debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);
742
743 skb->protocol = hdlc_type_trans(skb, dev);
744 dev->stats.rx_packets++;
745 dev->stats.rx_bytes += skb->len;
746 netif_receive_skb(skb);
747
748 /* put the new buffer on RX-free queue */
749#ifdef __ARMEB__
750 port->rx_buff_tab[n] = temp;
751 desc->data = phys;
752#endif
753 desc->buf_len = RX_SIZE;
754 desc->pkt_len = 0;
755 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
756 received++;
757 }
758#if DEBUG_RX
759 printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
760#endif
761 return received; /* not all work done */
762}
763
764
765static void hss_hdlc_txdone_irq(void *pdev)
766{
767 struct net_device *dev = pdev;
768 struct port *port = dev_to_port(dev);
769 int n_desc;
770
771#if DEBUG_TX
772 printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
773#endif
774 while ((n_desc = queue_get_desc(queue_ids[port->id].txdone,
775 port, 1)) >= 0) {
776 struct desc *desc;
777 int start;
778
779 desc = tx_desc_ptr(port, n_desc);
780
781 dev->stats.tx_packets++;
782 dev->stats.tx_bytes += desc->pkt_len;
783
784 dma_unmap_tx(port, desc);
785#if DEBUG_TX
786 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n",
787 dev->name, port->tx_buff_tab[n_desc]);
788#endif
789 free_buffer_irq(port->tx_buff_tab[n_desc]);
790 port->tx_buff_tab[n_desc] = NULL;
791
792 start = qmgr_stat_empty(port->plat->txreadyq);
793 queue_put_desc(port->plat->txreadyq,
794 tx_desc_phys(port, n_desc), desc);
795 if (start) {
796#if DEBUG_TX
797 printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
798 " ready\n", dev->name);
799#endif
800 netif_wake_queue(dev);
801 }
802 }
803}
804
805static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
806{
807 struct port *port = dev_to_port(dev);
808 unsigned int txreadyq = port->plat->txreadyq;
809 int len, offset, bytes, n;
810 void *mem;
811 u32 phys;
812 struct desc *desc;
813
814#if DEBUG_TX
815 printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name);
816#endif
817
818 if (unlikely(skb->len > HDLC_MAX_MRU)) {
819 dev_kfree_skb(skb);
820 dev->stats.tx_errors++;
821 return NETDEV_TX_OK;
822 }
823
824 debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len);
825
826 len = skb->len;
827#ifdef __ARMEB__
828 offset = 0; /* no need to keep alignment */
829 bytes = len;
830 mem = skb->data;
831#else
832 offset = (int)skb->data & 3; /* keep 32-bit alignment */
833 bytes = ALIGN(offset + len, 4);
834 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
835 dev_kfree_skb(skb);
836 dev->stats.tx_dropped++;
837 return NETDEV_TX_OK;
838 }
839 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
840 dev_kfree_skb(skb);
841#endif
842
843 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
844 if (dma_mapping_error(&dev->dev, phys)) {
845#ifdef __ARMEB__
846 dev_kfree_skb(skb);
847#else
848 kfree(mem);
849#endif
850 dev->stats.tx_dropped++;
851 return NETDEV_TX_OK;
852 }
853
854 n = queue_get_desc(txreadyq, port, 1);
855 BUG_ON(n < 0);
856 desc = tx_desc_ptr(port, n);
857
858#ifdef __ARMEB__
859 port->tx_buff_tab[n] = skb;
860#else
861 port->tx_buff_tab[n] = mem;
862#endif
863 desc->data = phys + offset;
864 desc->buf_len = desc->pkt_len = len;
865
866 wmb();
867 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
868 dev->trans_start = jiffies;
869
870 if (qmgr_stat_empty(txreadyq)) {
871#if DEBUG_TX
872 printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
873#endif
874 netif_stop_queue(dev);
875 /* we could miss TX ready interrupt */
876 if (!qmgr_stat_empty(txreadyq)) {
877#if DEBUG_TX
878 printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
879 dev->name);
880#endif
881 netif_wake_queue(dev);
882 }
883 }
884
885#if DEBUG_TX
886 printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name);
887#endif
888 return NETDEV_TX_OK;
889}
890
891
892static int request_hdlc_queues(struct port *port)
893{
894 int err;
895
896 err = qmgr_request_queue(queue_ids[port->id].rxfree, RX_DESCS, 0, 0,
897 "%s:RX-free", port->netdev->name);
898 if (err)
899 return err;
900
901 err = qmgr_request_queue(queue_ids[port->id].rx, RX_DESCS, 0, 0,
902 "%s:RX", port->netdev->name);
903 if (err)
904 goto rel_rxfree;
905
906 err = qmgr_request_queue(queue_ids[port->id].tx, TX_DESCS, 0, 0,
907 "%s:TX", port->netdev->name);
908 if (err)
909 goto rel_rx;
910
911 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
912 "%s:TX-ready", port->netdev->name);
913 if (err)
914 goto rel_tx;
915
916 err = qmgr_request_queue(queue_ids[port->id].txdone, TX_DESCS, 0, 0,
917 "%s:TX-done", port->netdev->name);
918 if (err)
919 goto rel_txready;
920 return 0;
921
922rel_txready:
923 qmgr_release_queue(port->plat->txreadyq);
924rel_tx:
925 qmgr_release_queue(queue_ids[port->id].tx);
926rel_rx:
927 qmgr_release_queue(queue_ids[port->id].rx);
928rel_rxfree:
929 qmgr_release_queue(queue_ids[port->id].rxfree);
930 printk(KERN_DEBUG "%s: unable to request hardware queues\n",
931 port->netdev->name);
932 return err;
933}
934
935static void release_hdlc_queues(struct port *port)
936{
937 qmgr_release_queue(queue_ids[port->id].rxfree);
938 qmgr_release_queue(queue_ids[port->id].rx);
939 qmgr_release_queue(queue_ids[port->id].txdone);
940 qmgr_release_queue(queue_ids[port->id].tx);
941 qmgr_release_queue(port->plat->txreadyq);
942}
943
944static int init_hdlc_queues(struct port *port)
945{
946 int i;
947
948 if (!ports_open)
949 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
950 POOL_ALLOC_SIZE, 32, 0)))
951 return -ENOMEM;
952
953 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
954 &port->desc_tab_phys)))
955 return -ENOMEM;
956 memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
957 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
958 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
959
960 /* Setup RX buffers */
961 for (i = 0; i < RX_DESCS; i++) {
962 struct desc *desc = rx_desc_ptr(port, i);
963 buffer_t *buff;
964 void *data;
965#ifdef __ARMEB__
966 if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
967 return -ENOMEM;
968 data = buff->data;
969#else
970 if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
971 return -ENOMEM;
972 data = buff;
973#endif
974 desc->buf_len = RX_SIZE;
975 desc->data = dma_map_single(&port->netdev->dev, data,
976 RX_SIZE, DMA_FROM_DEVICE);
977 if (dma_mapping_error(&port->netdev->dev, desc->data)) {
978 free_buffer(buff);
979 return -EIO;
980 }
981 port->rx_buff_tab[i] = buff;
982 }
983
984 return 0;
985}
986
987static void destroy_hdlc_queues(struct port *port)
988{
989 int i;
990
991 if (port->desc_tab) {
992 for (i = 0; i < RX_DESCS; i++) {
993 struct desc *desc = rx_desc_ptr(port, i);
994 buffer_t *buff = port->rx_buff_tab[i];
995 if (buff) {
996 dma_unmap_single(&port->netdev->dev,
997 desc->data, RX_SIZE,
998 DMA_FROM_DEVICE);
999 free_buffer(buff);
1000 }
1001 }
1002 for (i = 0; i < TX_DESCS; i++) {
1003 struct desc *desc = tx_desc_ptr(port, i);
1004 buffer_t *buff = port->tx_buff_tab[i];
1005 if (buff) {
1006 dma_unmap_tx(port, desc);
1007 free_buffer(buff);
1008 }
1009 }
1010 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
1011 port->desc_tab = NULL;
1012 }
1013
1014 if (!ports_open && dma_pool) {
1015 dma_pool_destroy(dma_pool);
1016 dma_pool = NULL;
1017 }
1018}
1019
1020static int hss_hdlc_open(struct net_device *dev)
1021{
1022 struct port *port = dev_to_port(dev);
1023 unsigned long flags;
1024 int i, err = 0;
1025
1026 if ((err = hdlc_open(dev)))
1027 return err;
1028
1029 if ((err = hss_load_firmware(port)))
1030 goto err_hdlc_close;
1031
1032 if ((err = request_hdlc_queues(port)))
1033 goto err_hdlc_close;
1034
1035 if ((err = init_hdlc_queues(port)))
1036 goto err_destroy_queues;
1037
1038 spin_lock_irqsave(&npe_lock, flags);
1039 if (port->plat->open)
1040 if ((err = port->plat->open(port->id, dev,
1041 hss_hdlc_set_carrier)))
1042 goto err_unlock;
1043 spin_unlock_irqrestore(&npe_lock, flags);
1044
1045 /* Populate queues with buffers, no failure after this point */
1046 for (i = 0; i < TX_DESCS; i++)
1047 queue_put_desc(port->plat->txreadyq,
1048 tx_desc_phys(port, i), tx_desc_ptr(port, i));
1049
1050 for (i = 0; i < RX_DESCS; i++)
1051 queue_put_desc(queue_ids[port->id].rxfree,
1052 rx_desc_phys(port, i), rx_desc_ptr(port, i));
1053
1054 napi_enable(&port->napi);
1055 netif_start_queue(dev);
1056
1057 qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
1058 hss_hdlc_rx_irq, dev);
1059
1060 qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
1061 hss_hdlc_txdone_irq, dev);
1062 qmgr_enable_irq(queue_ids[port->id].txdone);
1063
1064 ports_open++;
1065
1066 hss_set_hdlc_cfg(port);
1067 hss_config(port);
1068
1069 hss_start_hdlc(port);
1070
1071 /* we may already have RX data, enables IRQ */
1072 netif_rx_schedule(dev, &port->napi);
1073 return 0;
1074
1075err_unlock:
1076 spin_unlock_irqrestore(&npe_lock, flags);
1077err_destroy_queues:
1078 destroy_hdlc_queues(port);
1079 release_hdlc_queues(port);
1080err_hdlc_close:
1081 hdlc_close(dev);
1082 return err;
1083}
1084
1085static int hss_hdlc_close(struct net_device *dev)
1086{
1087 struct port *port = dev_to_port(dev);
1088 unsigned long flags;
1089 int i, buffs = RX_DESCS; /* allocated RX buffers */
1090
1091 spin_lock_irqsave(&npe_lock, flags);
1092 ports_open--;
1093 qmgr_disable_irq(queue_ids[port->id].rx);
1094 netif_stop_queue(dev);
1095 napi_disable(&port->napi);
1096
1097 hss_stop_hdlc(port);
1098
1099 while (queue_get_desc(queue_ids[port->id].rxfree, port, 0) >= 0)
1100 buffs--;
1101 while (queue_get_desc(queue_ids[port->id].rx, port, 0) >= 0)
1102 buffs--;
1103
1104 if (buffs)
1105 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
1106 " left in NPE\n", dev->name, buffs);
1107
1108 buffs = TX_DESCS;
1109 while (queue_get_desc(queue_ids[port->id].tx, port, 1) >= 0)
1110 buffs--; /* cancel TX */
1111
1112 i = 0;
1113 do {
1114 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
1115 buffs--;
1116 if (!buffs)
1117 break;
1118 } while (++i < MAX_CLOSE_WAIT);
1119
1120 if (buffs)
1121 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
1122 "left in NPE\n", dev->name, buffs);
1123#if DEBUG_CLOSE
1124 if (!buffs)
1125 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
1126#endif
1127 qmgr_disable_irq(queue_ids[port->id].txdone);
1128
1129 if (port->plat->close)
1130 port->plat->close(port->id, dev);
1131 spin_unlock_irqrestore(&npe_lock, flags);
1132
1133 destroy_hdlc_queues(port);
1134 release_hdlc_queues(port);
1135 hdlc_close(dev);
1136 return 0;
1137}
1138
1139
1140static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
1141 unsigned short parity)
1142{
1143 struct port *port = dev_to_port(dev);
1144
1145 if (encoding != ENCODING_NRZ)
1146 return -EINVAL;
1147
1148 switch(parity) {
1149 case PARITY_CRC16_PR1_CCITT:
1150 port->hdlc_cfg = 0;
1151 return 0;
1152
1153 case PARITY_CRC32_PR1_CCITT:
1154 port->hdlc_cfg = PKT_HDLC_CRC_32;
1155 return 0;
1156
1157 default:
1158 return -EINVAL;
1159 }
1160}
1161
1162
1163static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1164{
1165 const size_t size = sizeof(sync_serial_settings);
1166 sync_serial_settings new_line;
1167 sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
1168 struct port *port = dev_to_port(dev);
1169 unsigned long flags;
1170 int clk;
1171
1172 if (cmd != SIOCWANDEV)
1173 return hdlc_ioctl(dev, ifr, cmd);
1174
1175 switch(ifr->ifr_settings.type) {
1176 case IF_GET_IFACE:
1177 ifr->ifr_settings.type = IF_IFACE_V35;
1178 if (ifr->ifr_settings.size < size) {
1179 ifr->ifr_settings.size = size; /* data size wanted */
1180 return -ENOBUFS;
1181 }
1182 memset(&new_line, 0, sizeof(new_line));
1183 new_line.clock_type = port->clock_type;
1184 new_line.clock_rate = 2048000; /* FIXME */
1185 new_line.loopback = port->loopback;
1186 if (copy_to_user(line, &new_line, size))
1187 return -EFAULT;
1188 return 0;
1189
1190 case IF_IFACE_SYNC_SERIAL:
1191 case IF_IFACE_V35:
1192 if(!capable(CAP_NET_ADMIN))
1193 return -EPERM;
1194 if (copy_from_user(&new_line, line, size))
1195 return -EFAULT;
1196
1197 clk = new_line.clock_type;
1198 if (port->plat->set_clock)
1199 clk = port->plat->set_clock(port->id, clk);
1200
1201 if (clk != CLOCK_EXT && clk != CLOCK_INT)
1202 return -EINVAL; /* No such clock setting */
1203
1204 if (new_line.loopback != 0 && new_line.loopback != 1)
1205 return -EINVAL;
1206
1207 port->clock_type = clk; /* Update settings */
1208 /* FIXME port->clock_rate = new_line.clock_rate */;
1209 port->loopback = new_line.loopback;
1210
1211 spin_lock_irqsave(&npe_lock, flags);
1212
1213 if (dev->flags & IFF_UP)
1214 hss_config(port);
1215
1216 if (port->loopback || port->carrier)
1217 netif_carrier_on(port->netdev);
1218 else
1219 netif_carrier_off(port->netdev);
1220 spin_unlock_irqrestore(&npe_lock, flags);
1221
1222 return 0;
1223
1224 default:
1225 return hdlc_ioctl(dev, ifr, cmd);
1226 }
1227}
1228
1229/*****************************************************************************
1230 * initialization
1231 ****************************************************************************/
1232
1233static int __devinit hss_init_one(struct platform_device *pdev)
1234{
1235 struct port *port;
1236 struct net_device *dev;
1237 hdlc_device *hdlc;
1238 int err;
1239
1240 if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
1241 return -ENOMEM;
1242
1243 if ((port->npe = npe_request(0)) == NULL) {
1244 err = -ENOSYS;
1245 goto err_free;
1246 }
1247
1248 if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
1249 err = -ENOMEM;
1250 goto err_plat;
1251 }
1252
1253 SET_NETDEV_DEV(dev, &pdev->dev);
1254 hdlc = dev_to_hdlc(dev);
1255 hdlc->attach = hss_hdlc_attach;
1256 hdlc->xmit = hss_hdlc_xmit;
1257 dev->open = hss_hdlc_open;
1258 dev->stop = hss_hdlc_close;
1259 dev->do_ioctl = hss_hdlc_ioctl;
1260 dev->tx_queue_len = 100;
1261 port->clock_type = CLOCK_EXT;
1262 port->clock_rate = 2048000;
1263 port->id = pdev->id;
1264 port->dev = &pdev->dev;
1265 port->plat = pdev->dev.platform_data;
1266 netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
1267
1268 if ((err = register_hdlc_device(dev)))
1269 goto err_free_netdev;
1270
1271 platform_set_drvdata(pdev, port);
1272
1273 printk(KERN_INFO "%s: HSS-%i\n", dev->name, port->id);
1274 return 0;
1275
1276err_free_netdev:
1277 free_netdev(dev);
1278err_plat:
1279 npe_release(port->npe);
1280err_free:
1281 kfree(port);
1282 return err;
1283}
1284
1285static int __devexit hss_remove_one(struct platform_device *pdev)
1286{
1287 struct port *port = platform_get_drvdata(pdev);
1288
1289 unregister_hdlc_device(port->netdev);
1290 free_netdev(port->netdev);
1291 npe_release(port->npe);
1292 platform_set_drvdata(pdev, NULL);
1293 kfree(port);
1294 return 0;
1295}
1296
1297static struct platform_driver ixp4xx_hss_driver = {
1298 .driver.name = DRV_NAME,
1299 .probe = hss_init_one,
1300 .remove = hss_remove_one,
1301};
1302
1303static int __init hss_init_module(void)
1304{
1305 if ((ixp4xx_read_feature_bits() &
1306 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
1307 (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS))
1308 return -ENOSYS;
1309
1310 spin_lock_init(&npe_lock);
1311
1312 return platform_driver_register(&ixp4xx_hss_driver);
1313}
1314
1315static void __exit hss_cleanup_module(void)
1316{
1317 platform_driver_unregister(&ixp4xx_hss_driver);
1318}
1319
1320MODULE_AUTHOR("Krzysztof Halasa");
1321MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
1322MODULE_LICENSE("GPL v2");
1323MODULE_ALIAS("platform:ixp4xx_hss");
1324module_init(hss_init_module);
1325module_exit(hss_cleanup_module);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 24fd613466b7..5b61b3eef45f 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -143,7 +143,6 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
143 *ptr = 0x00; 143 *ptr = 0x00;
144 144
145 skb->protocol = x25_type_trans(skb, dev); 145 skb->protocol = x25_type_trans(skb, dev);
146 skb->dev->last_rx = jiffies;
147 return netif_rx(skb); 146 return netif_rx(skb);
148} 147}
149 148
@@ -235,7 +234,6 @@ static void lapbeth_connected(struct net_device *dev, int reason)
235 *ptr = 0x01; 234 *ptr = 0x01;
236 235
237 skb->protocol = x25_type_trans(skb, dev); 236 skb->protocol = x25_type_trans(skb, dev);
238 skb->dev->last_rx = jiffies;
239 netif_rx(skb); 237 netif_rx(skb);
240} 238}
241 239
@@ -253,7 +251,6 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
253 *ptr = 0x02; 251 *ptr = 0x02;
254 252
255 skb->protocol = x25_type_trans(skb, dev); 253 skb->protocol = x25_type_trans(skb, dev);
256 skb->dev->last_rx = jiffies;
257 netif_rx(skb); 254 netif_rx(skb);
258} 255}
259 256
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index d7bb63e616b5..feac3b99f8fe 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1594,7 +1594,6 @@ static int lmc_rx(struct net_device *dev)
1594 goto skip_packet; 1594 goto skip_packet;
1595 } 1595 }
1596 1596
1597 dev->last_rx = jiffies;
1598 sc->lmc_device->stats.rx_packets++; 1597 sc->lmc_device->stats.rx_packets++;
1599 sc->lmc_device->stats.rx_bytes += len; 1598 sc->lmc_device->stats.rx_bytes += len;
1600 1599
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index be9877ff551e..94b4c208b013 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -142,7 +142,6 @@ void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
142 case LMC_PPP: 142 case LMC_PPP:
143 case LMC_NET: 143 case LMC_NET:
144 default: 144 default:
145 skb->dev->last_rx = jiffies;
146 netif_rx(skb); 145 netif_rx(skb);
147 break; 146 break;
148 case LMC_RAW: 147 case LMC_RAW:
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 0a566b0daacb..697715ae80f4 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -53,7 +53,7 @@ static const char* devname = "RISCom/N2";
53#define NEED_SCA_MSCI_INTR 53#define NEED_SCA_MSCI_INTR
54#define MAX_TX_BUFFERS 10 54#define MAX_TX_BUFFERS 10
55 55
56static char *hw = NULL; /* pointer to hw=xxx command line string */ 56static char *hw; /* pointer to hw=xxx command line string */
57 57
58/* RISCom/N2 Board Registers */ 58/* RISCom/N2 Board Registers */
59 59
@@ -145,7 +145,6 @@ static card_t **new_card = &first_card;
145 &(card)->ports[port] : NULL) 145 &(card)->ports[port] : NULL)
146 146
147 147
148
149static __inline__ u8 sca_get_page(card_t *card) 148static __inline__ u8 sca_get_page(card_t *card)
150{ 149{
151 return inb(card->io + N2_PSR) & PSR_PAGEBITS; 150 return inb(card->io + N2_PSR) & PSR_PAGEBITS;
@@ -159,9 +158,7 @@ static __inline__ void openwin(card_t *card, u8 page)
159} 158}
160 159
161 160
162 161#include "hd64570.c"
163#include "hd6457x.c"
164
165 162
166 163
167static void n2_set_iface(port_t *port) 164static void n2_set_iface(port_t *port)
@@ -478,7 +475,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
478 n2_destroy_card(card); 475 n2_destroy_card(card);
479 return -ENOBUFS; 476 return -ENOBUFS;
480 } 477 }
481 sca_init_sync_port(port); /* Set up SCA memory */ 478 sca_init_port(port); /* Set up SCA memory */
482 479
483 printk(KERN_INFO "%s: RISCom/N2 node %d\n", 480 printk(KERN_INFO "%s: RISCom/N2 node %d\n",
484 dev->name, port->phy_node); 481 dev->name, port->phy_node);
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index d0a8d1e352ac..c23fde0c0344 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -1769,7 +1769,7 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx)
1769 1769
1770static void cpc_tx_timeout(struct net_device *dev) 1770static void cpc_tx_timeout(struct net_device *dev)
1771{ 1771{
1772 pc300dev_t *d = (pc300dev_t *) dev->priv; 1772 pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
1773 pc300ch_t *chan = (pc300ch_t *) d->chan; 1773 pc300ch_t *chan = (pc300ch_t *) d->chan;
1774 pc300_t *card = (pc300_t *) chan->card; 1774 pc300_t *card = (pc300_t *) chan->card;
1775 int ch = chan->channel; 1775 int ch = chan->channel;
@@ -1796,7 +1796,7 @@ static void cpc_tx_timeout(struct net_device *dev)
1796 1796
1797static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) 1797static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1798{ 1798{
1799 pc300dev_t *d = (pc300dev_t *) dev->priv; 1799 pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
1800 pc300ch_t *chan = (pc300ch_t *) d->chan; 1800 pc300ch_t *chan = (pc300ch_t *) d->chan;
1801 pc300_t *card = (pc300_t *) chan->card; 1801 pc300_t *card = (pc300_t *) chan->card;
1802 int ch = chan->channel; 1802 int ch = chan->channel;
@@ -1874,7 +1874,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1874 1874
1875static void cpc_net_rx(struct net_device *dev) 1875static void cpc_net_rx(struct net_device *dev)
1876{ 1876{
1877 pc300dev_t *d = (pc300dev_t *) dev->priv; 1877 pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
1878 pc300ch_t *chan = (pc300ch_t *) d->chan; 1878 pc300ch_t *chan = (pc300ch_t *) d->chan;
1879 pc300_t *card = (pc300_t *) chan->card; 1879 pc300_t *card = (pc300_t *) chan->card;
1880 int ch = chan->channel; 1880 int ch = chan->channel;
@@ -2522,7 +2522,7 @@ static int cpc_change_mtu(struct net_device *dev, int new_mtu)
2522 2522
2523static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2523static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2524{ 2524{
2525 pc300dev_t *d = (pc300dev_t *) dev->priv; 2525 pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
2526 pc300ch_t *chan = (pc300ch_t *) d->chan; 2526 pc300ch_t *chan = (pc300ch_t *) d->chan;
2527 pc300_t *card = (pc300_t *) chan->card; 2527 pc300_t *card = (pc300_t *) chan->card;
2528 pc300conf_t conf_aux; 2528 pc300conf_t conf_aux;
@@ -2718,9 +2718,8 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2718 } 2718 }
2719 pc300patrntst.num_errors = 2719 pc300patrntst.num_errors =
2720 falc_pattern_test_error(card, ch); 2720 falc_pattern_test_error(card, ch);
2721 if (!arg 2721 if (copy_to_user(arg, &pc300patrntst,
2722 || copy_to_user(arg, &pc300patrntst, 2722 sizeof(pc300patterntst_t)))
2723 sizeof (pc300patterntst_t)))
2724 return -EINVAL; 2723 return -EINVAL;
2725 } else { 2724 } else {
2726 falc_pattern_test(card, ch, pc300patrntst.patrntst_on); 2725 falc_pattern_test(card, ch, pc300patrntst.patrntst_on);
@@ -3058,7 +3057,7 @@ static int tx_config(pc300dev_t * d)
3058static int cpc_attach(struct net_device *dev, unsigned short encoding, 3057static int cpc_attach(struct net_device *dev, unsigned short encoding,
3059 unsigned short parity) 3058 unsigned short parity)
3060{ 3059{
3061 pc300dev_t *d = (pc300dev_t *)dev->priv; 3060 pc300dev_t *d = (pc300dev_t *)dev_to_hdlc(dev)->priv;
3062 pc300ch_t *chan = (pc300ch_t *)d->chan; 3061 pc300ch_t *chan = (pc300ch_t *)d->chan;
3063 pc300_t *card = (pc300_t *)chan->card; 3062 pc300_t *card = (pc300_t *)chan->card;
3064 pc300chconf_t *conf = (pc300chconf_t *)&chan->conf; 3063 pc300chconf_t *conf = (pc300chconf_t *)&chan->conf;
@@ -3138,7 +3137,7 @@ static void cpc_closech(pc300dev_t * d)
3138 3137
3139int cpc_open(struct net_device *dev) 3138int cpc_open(struct net_device *dev)
3140{ 3139{
3141 pc300dev_t *d = (pc300dev_t *) dev->priv; 3140 pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
3142 struct ifreq ifr; 3141 struct ifreq ifr;
3143 int result; 3142 int result;
3144 3143
@@ -3166,7 +3165,7 @@ err_out:
3166 3165
3167static int cpc_close(struct net_device *dev) 3166static int cpc_close(struct net_device *dev)
3168{ 3167{
3169 pc300dev_t *d = (pc300dev_t *) dev->priv; 3168 pc300dev_t *d = (pc300dev_t *) dev_to_hdlc(dev)->priv;
3170 pc300ch_t *chan = (pc300ch_t *) d->chan; 3169 pc300ch_t *chan = (pc300ch_t *) d->chan;
3171 pc300_t *card = (pc300_t *) chan->card; 3170 pc300_t *card = (pc300_t *) chan->card;
3172 unsigned long flags; 3171 unsigned long flags;
@@ -3347,7 +3346,7 @@ static void cpc_init_card(pc300_t * card)
3347 d->line_on = 0; 3346 d->line_on = 0;
3348 d->line_off = 0; 3347 d->line_off = 0;
3349 3348
3350 dev = alloc_hdlcdev(NULL); 3349 dev = alloc_hdlcdev(d);
3351 if (dev == NULL) 3350 if (dev == NULL)
3352 continue; 3351 continue;
3353 3352
@@ -3372,7 +3371,6 @@ static void cpc_init_card(pc300_t * card)
3372 dev->do_ioctl = cpc_ioctl; 3371 dev->do_ioctl = cpc_ioctl;
3373 3372
3374 if (register_hdlc_device(dev) == 0) { 3373 if (register_hdlc_device(dev) == 0) {
3375 dev->priv = d; /* We need 'priv', hdlc doesn't */
3376 printk("%s: Cyclades-PC300/", dev->name); 3374 printk("%s: Cyclades-PC300/", dev->name);
3377 switch (card->hw.type) { 3375 switch (card->hw.type) {
3378 case PC300_TE: 3376 case PC300_TE:
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index bf1b01590429..f247e5d9002a 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Cyclades PC300 synchronous serial card driver for Linux 2 * Cyclades PC300 synchronous serial card driver for Linux
3 * 3 *
4 * Copyright (C) 2000-2007 Krzysztof Halasa <khc@pm.waw.pl> 4 * Copyright (C) 2000-2008 Krzysztof Halasa <khc@pm.waw.pl>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
@@ -11,7 +11,7 @@
11 * 11 *
12 * Sources of information: 12 * Sources of information:
13 * Hitachi HD64572 SCA-II User's Manual 13 * Hitachi HD64572 SCA-II User's Manual
14 * Cyclades PC300 Linux driver 14 * Original Cyclades PC300 Linux driver
15 * 15 *
16 * This driver currently supports only PC300/RSV (V.24/V.35) and 16 * This driver currently supports only PC300/RSV (V.24/V.35) and
17 * PC300/X21 cards. 17 * PC300/X21 cards.
@@ -37,17 +37,11 @@
37 37
38#include "hd64572.h" 38#include "hd64572.h"
39 39
40static const char* version = "Cyclades PC300 driver version: 1.17";
41static const char* devname = "PC300";
42
43#undef DEBUG_PKT 40#undef DEBUG_PKT
44#define DEBUG_RINGS 41#define DEBUG_RINGS
45 42
46#define PC300_PLX_SIZE 0x80 /* PLX control window size (128 B) */ 43#define PC300_PLX_SIZE 0x80 /* PLX control window size (128 B) */
47#define PC300_SCA_SIZE 0x400 /* SCA window size (1 KB) */ 44#define PC300_SCA_SIZE 0x400 /* SCA window size (1 KB) */
48#define ALL_PAGES_ALWAYS_MAPPED
49#define NEED_DETECT_RAM
50#define NEED_SCA_MSCI_INTR
51#define MAX_TX_BUFFERS 10 45#define MAX_TX_BUFFERS 10
52 46
53static int pci_clock_freq = 33000000; 47static int pci_clock_freq = 33000000;
@@ -81,7 +75,8 @@ typedef struct {
81 75
82 76
83typedef struct port_s { 77typedef struct port_s {
84 struct net_device *dev; 78 struct napi_struct napi;
79 struct net_device *netdev;
85 struct card_s *card; 80 struct card_s *card;
86 spinlock_t lock; /* TX lock */ 81 spinlock_t lock; /* TX lock */
87 sync_serial_settings settings; 82 sync_serial_settings settings;
@@ -93,7 +88,7 @@ typedef struct port_s {
93 u16 txin; /* tx ring buffer 'in' and 'last' pointers */ 88 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
94 u16 txlast; 89 u16 txlast;
95 u8 rxs, txs, tmc; /* SCA registers */ 90 u8 rxs, txs, tmc; /* SCA registers */
96 u8 phy_node; /* physical port # - 0 or 1 */ 91 u8 chan; /* physical port # - 0 or 1 */
97}port_t; 92}port_t;
98 93
99 94
@@ -114,21 +109,10 @@ typedef struct card_s {
114}card_t; 109}card_t;
115 110
116 111
117#define sca_in(reg, card) readb(card->scabase + (reg))
118#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
119#define sca_inw(reg, card) readw(card->scabase + (reg))
120#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
121#define sca_inl(reg, card) readl(card->scabase + (reg))
122#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
123
124#define port_to_card(port) (port->card)
125#define log_node(port) (port->phy_node)
126#define phy_node(port) (port->phy_node)
127#define winbase(card) (card->rambase)
128#define get_port(card, port) ((port) < (card)->n_ports ? \ 112#define get_port(card, port) ((port) < (card)->n_ports ? \
129 (&(card)->ports[port]) : (NULL)) 113 (&(card)->ports[port]) : (NULL))
130 114
131#include "hd6457x.c" 115#include "hd64572.c"
132 116
133 117
134static void pc300_set_iface(port_t *port) 118static void pc300_set_iface(port_t *port)
@@ -139,8 +123,8 @@ static void pc300_set_iface(port_t *port)
139 u8 rxs = port->rxs & CLK_BRG_MASK; 123 u8 rxs = port->rxs & CLK_BRG_MASK;
140 u8 txs = port->txs & CLK_BRG_MASK; 124 u8 txs = port->txs & CLK_BRG_MASK;
141 125
142 sca_out(EXS_TES1, (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS, 126 sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
143 port_to_card(port)); 127 port->card);
144 switch(port->settings.clock_type) { 128 switch(port->settings.clock_type) {
145 case CLOCK_INT: 129 case CLOCK_INT:
146 rxs |= CLK_BRG; /* BRG output */ 130 rxs |= CLK_BRG; /* BRG output */
@@ -172,10 +156,10 @@ static void pc300_set_iface(port_t *port)
172 if (port->card->type == PC300_RSV) { 156 if (port->card->type == PC300_RSV) {
173 if (port->iface == IF_IFACE_V35) 157 if (port->iface == IF_IFACE_V35)
174 writel(card->init_ctrl_value | 158 writel(card->init_ctrl_value |
175 PC300_CHMEDIA_MASK(port->phy_node), init_ctrl); 159 PC300_CHMEDIA_MASK(port->chan), init_ctrl);
176 else 160 else
177 writel(card->init_ctrl_value & 161 writel(card->init_ctrl_value &
178 ~PC300_CHMEDIA_MASK(port->phy_node), init_ctrl); 162 ~PC300_CHMEDIA_MASK(port->chan), init_ctrl);
179 } 163 }
180} 164}
181 165
@@ -280,10 +264,8 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
280 card_t *card = pci_get_drvdata(pdev); 264 card_t *card = pci_get_drvdata(pdev);
281 265
282 for (i = 0; i < 2; i++) 266 for (i = 0; i < 2; i++)
283 if (card->ports[i].card) { 267 if (card->ports[i].card)
284 struct net_device *dev = port_to_dev(&card->ports[i]); 268 unregister_hdlc_device(card->ports[i].netdev);
285 unregister_hdlc_device(dev);
286 }
287 269
288 if (card->irq) 270 if (card->irq)
289 free_irq(card->irq, card); 271 free_irq(card->irq, card);
@@ -298,10 +280,10 @@ static void pc300_pci_remove_one(struct pci_dev *pdev)
298 pci_release_regions(pdev); 280 pci_release_regions(pdev);
299 pci_disable_device(pdev); 281 pci_disable_device(pdev);
300 pci_set_drvdata(pdev, NULL); 282 pci_set_drvdata(pdev, NULL);
301 if (card->ports[0].dev) 283 if (card->ports[0].netdev)
302 free_netdev(card->ports[0].dev); 284 free_netdev(card->ports[0].netdev);
303 if (card->ports[1].dev) 285 if (card->ports[1].netdev)
304 free_netdev(card->ports[1].dev); 286 free_netdev(card->ports[1].netdev);
305 kfree(card); 287 kfree(card);
306} 288}
307 289
@@ -318,12 +300,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
318 u32 scaphys; /* SCA memory base */ 300 u32 scaphys; /* SCA memory base */
319 u32 plxphys; /* PLX registers memory base */ 301 u32 plxphys; /* PLX registers memory base */
320 302
321#ifndef MODULE
322 static int printed_version;
323 if (!printed_version++)
324 printk(KERN_INFO "%s\n", version);
325#endif
326
327 i = pci_enable_device(pdev); 303 i = pci_enable_device(pdev);
328 if (i) 304 if (i)
329 return i; 305 return i;
@@ -343,27 +319,6 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
343 } 319 }
344 pci_set_drvdata(pdev, card); 320 pci_set_drvdata(pdev, card);
345 321
346 if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
347 pdev->device == PCI_DEVICE_ID_PC300_TE_2)
348 card->type = PC300_TE; /* not fully supported */
349 else if (card->init_ctrl_value & PC300_CTYPE_MASK)
350 card->type = PC300_X21;
351 else
352 card->type = PC300_RSV;
353
354 if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
355 pdev->device == PCI_DEVICE_ID_PC300_TE_1)
356 card->n_ports = 1;
357 else
358 card->n_ports = 2;
359
360 for (i = 0; i < card->n_ports; i++)
361 if (!(card->ports[i].dev = alloc_hdlcdev(&card->ports[i]))) {
362 printk(KERN_ERR "pc300: unable to allocate memory\n");
363 pc300_pci_remove_one(pdev);
364 return -ENOMEM;
365 }
366
367 if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE || 322 if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
368 pci_resource_len(pdev, 2) != PC300_SCA_SIZE || 323 pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
369 pci_resource_len(pdev, 3) < 16384) { 324 pci_resource_len(pdev, 3) < 16384) {
@@ -372,14 +327,14 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
372 return -EFAULT; 327 return -EFAULT;
373 } 328 }
374 329
375 plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK; 330 plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK;
376 card->plxbase = ioremap(plxphys, PC300_PLX_SIZE); 331 card->plxbase = ioremap(plxphys, PC300_PLX_SIZE);
377 332
378 scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK; 333 scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK;
379 card->scabase = ioremap(scaphys, PC300_SCA_SIZE); 334 card->scabase = ioremap(scaphys, PC300_SCA_SIZE);
380 335
381 ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK; 336 ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK;
382 card->rambase = ioremap(ramphys, pci_resource_len(pdev,3)); 337 card->rambase = pci_ioremap_bar(pdev, 3);
383 338
384 if (card->plxbase == NULL || 339 if (card->plxbase == NULL ||
385 card->scabase == NULL || 340 card->scabase == NULL ||
@@ -393,6 +348,27 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
393 card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl); 348 card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl);
394 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys); 349 pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys);
395 350
351 if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
352 pdev->device == PCI_DEVICE_ID_PC300_TE_2)
353 card->type = PC300_TE; /* not fully supported */
354 else if (card->init_ctrl_value & PC300_CTYPE_MASK)
355 card->type = PC300_X21;
356 else
357 card->type = PC300_RSV;
358
359 if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
360 pdev->device == PCI_DEVICE_ID_PC300_TE_1)
361 card->n_ports = 1;
362 else
363 card->n_ports = 2;
364
365 for (i = 0; i < card->n_ports; i++)
366 if (!(card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]))) {
367 printk(KERN_ERR "pc300: unable to allocate memory\n");
368 pc300_pci_remove_one(pdev);
369 return -ENOMEM;
370 }
371
396 /* Reset PLX */ 372 /* Reset PLX */
397 p = &card->plxbase->init_ctrl; 373 p = &card->plxbase->init_ctrl;
398 writel(card->init_ctrl_value | 0x40000000, p); 374 writel(card->init_ctrl_value | 0x40000000, p);
@@ -446,7 +422,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
446 writew(0x0041, &card->plxbase->intr_ctrl_stat); 422 writew(0x0041, &card->plxbase->intr_ctrl_stat);
447 423
448 /* Allocate IRQ */ 424 /* Allocate IRQ */
449 if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, devname, card)) { 425 if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pc300", card)) {
450 printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n", 426 printk(KERN_WARNING "pc300: could not allocate IRQ%d.\n",
451 pdev->irq); 427 pdev->irq);
452 pc300_pci_remove_one(pdev); 428 pc300_pci_remove_one(pdev);
@@ -463,9 +439,9 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
463 439
464 for (i = 0; i < card->n_ports; i++) { 440 for (i = 0; i < card->n_ports; i++) {
465 port_t *port = &card->ports[i]; 441 port_t *port = &card->ports[i];
466 struct net_device *dev = port_to_dev(port); 442 struct net_device *dev = port->netdev;
467 hdlc_device *hdlc = dev_to_hdlc(dev); 443 hdlc_device *hdlc = dev_to_hdlc(dev);
468 port->phy_node = i; 444 port->chan = i;
469 445
470 spin_lock_init(&port->lock); 446 spin_lock_init(&port->lock);
471 dev->irq = card->irq; 447 dev->irq = card->irq;
@@ -484,6 +460,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
484 else 460 else
485 port->iface = IF_IFACE_V35; 461 port->iface = IF_IFACE_V35;
486 462
463 sca_init_port(port);
487 if (register_hdlc_device(dev)) { 464 if (register_hdlc_device(dev)) {
488 printk(KERN_ERR "pc300: unable to register hdlc " 465 printk(KERN_ERR "pc300: unable to register hdlc "
489 "device\n"); 466 "device\n");
@@ -491,10 +468,9 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
491 pc300_pci_remove_one(pdev); 468 pc300_pci_remove_one(pdev);
492 return -ENOBUFS; 469 return -ENOBUFS;
493 } 470 }
494 sca_init_sync_port(port); /* Set up SCA memory */
495 471
496 printk(KERN_INFO "%s: PC300 node %d\n", 472 printk(KERN_INFO "%s: PC300 channel %d\n",
497 dev->name, port->phy_node); 473 dev->name, port->chan);
498 } 474 }
499 return 0; 475 return 0;
500} 476}
@@ -524,9 +500,6 @@ static struct pci_driver pc300_pci_driver = {
524 500
525static int __init pc300_init_module(void) 501static int __init pc300_init_module(void)
526{ 502{
527#ifdef MODULE
528 printk(KERN_INFO "%s\n", version);
529#endif
530 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) { 503 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
531 printk(KERN_ERR "pc300: Invalid PCI clock frequency\n"); 504 printk(KERN_ERR "pc300: Invalid PCI clock frequency\n");
532 return -EINVAL; 505 return -EINVAL;
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index b595b64e7538..1104d3a692f7 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Goramo PCI200SYN synchronous serial card driver for Linux 2 * Goramo PCI200SYN synchronous serial card driver for Linux
3 * 3 *
4 * Copyright (C) 2002-2003 Krzysztof Halasa <khc@pm.waw.pl> 4 * Copyright (C) 2002-2008 Krzysztof Halasa <khc@pm.waw.pl>
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License 7 * under the terms of version 2 of the GNU General Public License
@@ -33,17 +33,11 @@
33 33
34#include "hd64572.h" 34#include "hd64572.h"
35 35
36static const char* version = "Goramo PCI200SYN driver version: 1.16";
37static const char* devname = "PCI200SYN";
38
39#undef DEBUG_PKT 36#undef DEBUG_PKT
40#define DEBUG_RINGS 37#define DEBUG_RINGS
41 38
42#define PCI200SYN_PLX_SIZE 0x80 /* PLX control window size (128b) */ 39#define PCI200SYN_PLX_SIZE 0x80 /* PLX control window size (128b) */
43#define PCI200SYN_SCA_SIZE 0x400 /* SCA window size (1Kb) */ 40#define PCI200SYN_SCA_SIZE 0x400 /* SCA window size (1Kb) */
44#define ALL_PAGES_ALWAYS_MAPPED
45#define NEED_DETECT_RAM
46#define NEED_SCA_MSCI_INTR
47#define MAX_TX_BUFFERS 10 41#define MAX_TX_BUFFERS 10
48 42
49static int pci_clock_freq = 33000000; 43static int pci_clock_freq = 33000000;
@@ -68,7 +62,8 @@ typedef struct {
68 62
69 63
70typedef struct port_s { 64typedef struct port_s {
71 struct net_device *dev; 65 struct napi_struct napi;
66 struct net_device *netdev;
72 struct card_s *card; 67 struct card_s *card;
73 spinlock_t lock; /* TX lock */ 68 spinlock_t lock; /* TX lock */
74 sync_serial_settings settings; 69 sync_serial_settings settings;
@@ -79,7 +74,7 @@ typedef struct port_s {
79 u16 txin; /* tx ring buffer 'in' and 'last' pointers */ 74 u16 txin; /* tx ring buffer 'in' and 'last' pointers */
80 u16 txlast; 75 u16 txlast;
81 u8 rxs, txs, tmc; /* SCA registers */ 76 u8 rxs, txs, tmc; /* SCA registers */
82 u8 phy_node; /* physical port # - 0 or 1 */ 77 u8 chan; /* physical port # - 0 or 1 */
83}port_t; 78}port_t;
84 79
85 80
@@ -97,17 +92,6 @@ typedef struct card_s {
97}card_t; 92}card_t;
98 93
99 94
100#define sca_in(reg, card) readb(card->scabase + (reg))
101#define sca_out(value, reg, card) writeb(value, card->scabase + (reg))
102#define sca_inw(reg, card) readw(card->scabase + (reg))
103#define sca_outw(value, reg, card) writew(value, card->scabase + (reg))
104#define sca_inl(reg, card) readl(card->scabase + (reg))
105#define sca_outl(value, reg, card) writel(value, card->scabase + (reg))
106
107#define port_to_card(port) (port->card)
108#define log_node(port) (port->phy_node)
109#define phy_node(port) (port->phy_node)
110#define winbase(card) (card->rambase)
111#define get_port(card, port) (&card->ports[port]) 95#define get_port(card, port) (&card->ports[port])
112#define sca_flush(card) (sca_in(IER0, card)); 96#define sca_flush(card) (sca_in(IER0, card));
113 97
@@ -127,7 +111,7 @@ static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
127#undef memcpy_toio 111#undef memcpy_toio
128#define memcpy_toio new_memcpy_toio 112#define memcpy_toio new_memcpy_toio
129 113
130#include "hd6457x.c" 114#include "hd64572.c"
131 115
132 116
133static void pci200_set_iface(port_t *port) 117static void pci200_set_iface(port_t *port)
@@ -137,8 +121,8 @@ static void pci200_set_iface(port_t *port)
137 u8 rxs = port->rxs & CLK_BRG_MASK; 121 u8 rxs = port->rxs & CLK_BRG_MASK;
138 u8 txs = port->txs & CLK_BRG_MASK; 122 u8 txs = port->txs & CLK_BRG_MASK;
139 123
140 sca_out(EXS_TES1, (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS, 124 sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
141 port_to_card(port)); 125 port->card);
142 switch(port->settings.clock_type) { 126 switch(port->settings.clock_type) {
143 case CLOCK_INT: 127 case CLOCK_INT:
144 rxs |= CLK_BRG; /* BRG output */ 128 rxs |= CLK_BRG; /* BRG output */
@@ -180,7 +164,7 @@ static int pci200_open(struct net_device *dev)
180 164
181 sca_open(dev); 165 sca_open(dev);
182 pci200_set_iface(port); 166 pci200_set_iface(port);
183 sca_flush(port_to_card(port)); 167 sca_flush(port->card);
184 return 0; 168 return 0;
185} 169}
186 170
@@ -189,7 +173,7 @@ static int pci200_open(struct net_device *dev)
189static int pci200_close(struct net_device *dev) 173static int pci200_close(struct net_device *dev)
190{ 174{
191 sca_close(dev); 175 sca_close(dev);
192 sca_flush(port_to_card(dev_to_port(dev))); 176 sca_flush(dev_to_port(dev)->card);
193 hdlc_close(dev); 177 hdlc_close(dev);
194 return 0; 178 return 0;
195} 179}
@@ -242,7 +226,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
242 226
243 memcpy(&port->settings, &new_line, size); /* Update settings */ 227 memcpy(&port->settings, &new_line, size); /* Update settings */
244 pci200_set_iface(port); 228 pci200_set_iface(port);
245 sca_flush(port_to_card(port)); 229 sca_flush(port->card);
246 return 0; 230 return 0;
247 231
248 default: 232 default:
@@ -258,10 +242,8 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
258 card_t *card = pci_get_drvdata(pdev); 242 card_t *card = pci_get_drvdata(pdev);
259 243
260 for (i = 0; i < 2; i++) 244 for (i = 0; i < 2; i++)
261 if (card->ports[i].card) { 245 if (card->ports[i].card)
262 struct net_device *dev = port_to_dev(&card->ports[i]); 246 unregister_hdlc_device(card->ports[i].netdev);
263 unregister_hdlc_device(dev);
264 }
265 247
266 if (card->irq) 248 if (card->irq)
267 free_irq(card->irq, card); 249 free_irq(card->irq, card);
@@ -276,10 +258,10 @@ static void pci200_pci_remove_one(struct pci_dev *pdev)
276 pci_release_regions(pdev); 258 pci_release_regions(pdev);
277 pci_disable_device(pdev); 259 pci_disable_device(pdev);
278 pci_set_drvdata(pdev, NULL); 260 pci_set_drvdata(pdev, NULL);
279 if (card->ports[0].dev) 261 if (card->ports[0].netdev)
280 free_netdev(card->ports[0].dev); 262 free_netdev(card->ports[0].netdev);
281 if (card->ports[1].dev) 263 if (card->ports[1].netdev)
282 free_netdev(card->ports[1].dev); 264 free_netdev(card->ports[1].netdev);
283 kfree(card); 265 kfree(card);
284} 266}
285 267
@@ -296,12 +278,6 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
296 u32 scaphys; /* SCA memory base */ 278 u32 scaphys; /* SCA memory base */
297 u32 plxphys; /* PLX registers memory base */ 279 u32 plxphys; /* PLX registers memory base */
298 280
299#ifndef MODULE
300 static int printed_version;
301 if (!printed_version++)
302 printk(KERN_INFO "%s\n", version);
303#endif
304
305 i = pci_enable_device(pdev); 281 i = pci_enable_device(pdev);
306 if (i) 282 if (i)
307 return i; 283 return i;
@@ -320,9 +296,9 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
320 return -ENOBUFS; 296 return -ENOBUFS;
321 } 297 }
322 pci_set_drvdata(pdev, card); 298 pci_set_drvdata(pdev, card);
323 card->ports[0].dev = alloc_hdlcdev(&card->ports[0]); 299 card->ports[0].netdev = alloc_hdlcdev(&card->ports[0]);
324 card->ports[1].dev = alloc_hdlcdev(&card->ports[1]); 300 card->ports[1].netdev = alloc_hdlcdev(&card->ports[1]);
325 if (!card->ports[0].dev || !card->ports[1].dev) { 301 if (!card->ports[0].netdev || !card->ports[1].netdev) {
326 printk(KERN_ERR "pci200syn: unable to allocate memory\n"); 302 printk(KERN_ERR "pci200syn: unable to allocate memory\n");
327 pci200_pci_remove_one(pdev); 303 pci200_pci_remove_one(pdev);
328 return -ENOMEM; 304 return -ENOMEM;
@@ -343,7 +319,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
343 card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE); 319 card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);
344 320
345 ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK; 321 ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
346 card->rambase = ioremap(ramphys, pci_resource_len(pdev,3)); 322 card->rambase = pci_ioremap_bar(pdev, 3);
347 323
348 if (card->plxbase == NULL || 324 if (card->plxbase == NULL ||
349 card->scabase == NULL || 325 card->scabase == NULL ||
@@ -398,7 +374,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
398 writew(readw(p) | 0x0040, p); 374 writew(readw(p) | 0x0040, p);
399 375
400 /* Allocate IRQ */ 376 /* Allocate IRQ */
401 if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, devname, card)) { 377 if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pci200syn", card)) {
402 printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n", 378 printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
403 pdev->irq); 379 pdev->irq);
404 pci200_pci_remove_one(pdev); 380 pci200_pci_remove_one(pdev);
@@ -410,9 +386,9 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
410 386
411 for (i = 0; i < 2; i++) { 387 for (i = 0; i < 2; i++) {
412 port_t *port = &card->ports[i]; 388 port_t *port = &card->ports[i];
413 struct net_device *dev = port_to_dev(port); 389 struct net_device *dev = port->netdev;
414 hdlc_device *hdlc = dev_to_hdlc(dev); 390 hdlc_device *hdlc = dev_to_hdlc(dev);
415 port->phy_node = i; 391 port->chan = i;
416 392
417 spin_lock_init(&port->lock); 393 spin_lock_init(&port->lock);
418 dev->irq = card->irq; 394 dev->irq = card->irq;
@@ -426,6 +402,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
426 hdlc->xmit = sca_xmit; 402 hdlc->xmit = sca_xmit;
427 port->settings.clock_type = CLOCK_EXT; 403 port->settings.clock_type = CLOCK_EXT;
428 port->card = card; 404 port->card = card;
405 sca_init_port(port);
429 if (register_hdlc_device(dev)) { 406 if (register_hdlc_device(dev)) {
430 printk(KERN_ERR "pci200syn: unable to register hdlc " 407 printk(KERN_ERR "pci200syn: unable to register hdlc "
431 "device\n"); 408 "device\n");
@@ -433,10 +410,9 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
433 pci200_pci_remove_one(pdev); 410 pci200_pci_remove_one(pdev);
434 return -ENOBUFS; 411 return -ENOBUFS;
435 } 412 }
436 sca_init_sync_port(port); /* Set up SCA memory */
437 413
438 printk(KERN_INFO "%s: PCI200SYN node %d\n", 414 printk(KERN_INFO "%s: PCI200SYN channel %d\n",
439 dev->name, port->phy_node); 415 dev->name, port->chan);
440 } 416 }
441 417
442 sca_flush(card); 418 sca_flush(card);
@@ -464,9 +440,6 @@ static struct pci_driver pci200_pci_driver = {
464 440
465static int __init pci200_init_module(void) 441static int __init pci200_init_module(void)
466{ 442{
467#ifdef MODULE
468 printk(KERN_INFO "%s\n", version);
469#endif
470 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) { 443 if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
471 printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n"); 444 printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n");
472 return -EINVAL; 445 return -EINVAL;
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index ee51b6a5e605..0aa28e1d4366 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -186,6 +186,7 @@ static unsigned int netcard_portlist[ ] __initdata = {
186 0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4, 186 0x2b0, 0x2b4, 0x2c0, 0x2c4, 0x2d0, 0x2d4, 0x2e0, 0x2e4, 0x2f0, 0x2f4,
187 0 }; 187 0 };
188 188
189#define NET_LOCAL_LOCK(dev) (((struct net_local *)netdev_priv(dev))->lock)
189 190
190/* 191/*
191 * Look for SBNI card which addr stored in dev->base_addr, if nonzero. 192 * Look for SBNI card which addr stored in dev->base_addr, if nonzero.
@@ -287,7 +288,7 @@ static int __init sbni_init(struct net_device *dev)
287} 288}
288 289
289 290
290int __init 291static int __init
291sbni_pci_probe( struct net_device *dev ) 292sbni_pci_probe( struct net_device *dev )
292{ 293{
293 struct pci_dev *pdev = NULL; 294 struct pci_dev *pdev = NULL;
@@ -378,22 +379,23 @@ sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
378 dev->irq = irq; 379 dev->irq = irq;
379 dev->base_addr = ioaddr; 380 dev->base_addr = ioaddr;
380 381
381 /* Allocate dev->priv and fill in sbni-specific dev fields. */ 382 /* Fill in sbni-specific dev fields. */
382 nl = dev->priv; 383 nl = netdev_priv(dev);
383 if( !nl ) { 384 if( !nl ) {
384 printk( KERN_ERR "%s: unable to get memory!\n", dev->name ); 385 printk( KERN_ERR "%s: unable to get memory!\n", dev->name );
385 release_region( ioaddr, SBNI_IO_EXTENT ); 386 release_region( ioaddr, SBNI_IO_EXTENT );
386 return NULL; 387 return NULL;
387 } 388 }
388 389
389 dev->priv = nl;
390 memset( nl, 0, sizeof(struct net_local) ); 390 memset( nl, 0, sizeof(struct net_local) );
391 spin_lock_init( &nl->lock ); 391 spin_lock_init( &nl->lock );
392 392
393 /* store MAC address (generate if that isn't known) */ 393 /* store MAC address (generate if that isn't known) */
394 *(__be16 *)dev->dev_addr = htons( 0x00ff ); 394 *(__be16 *)dev->dev_addr = htons( 0x00ff );
395 *(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 | 395 *(__be32 *)(dev->dev_addr + 2) = htonl( 0x01000000 |
396 ( (mac[num] ? mac[num] : (u32)((long)dev->priv)) & 0x00ffffff) ); 396 ((mac[num] ?
397 mac[num] :
398 (u32)((long)netdev_priv(dev))) & 0x00ffffff));
397 399
398 /* store link settings (speed, receive level ) */ 400 /* store link settings (speed, receive level ) */
399 nl->maxframe = DEFAULT_FRAME_LEN; 401 nl->maxframe = DEFAULT_FRAME_LEN;
@@ -447,7 +449,7 @@ sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
447 449
448 /* Looking for idle device in the list */ 450 /* Looking for idle device in the list */
449 for( p = dev; p; ) { 451 for( p = dev; p; ) {
450 struct net_local *nl = (struct net_local *) p->priv; 452 struct net_local *nl = netdev_priv(p);
451 spin_lock( &nl->lock ); 453 spin_lock( &nl->lock );
452 if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) { 454 if( nl->tx_buf_p || (nl->state & FL_LINE_DOWN) ) {
453 p = nl->link; 455 p = nl->link;
@@ -469,7 +471,7 @@ sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
469static int 471static int
470sbni_start_xmit( struct sk_buff *skb, struct net_device *dev ) 472sbni_start_xmit( struct sk_buff *skb, struct net_device *dev )
471{ 473{
472 struct net_local *nl = (struct net_local *) dev->priv; 474 struct net_local *nl = netdev_priv(dev);
473 475
474 netif_stop_queue( dev ); 476 netif_stop_queue( dev );
475 spin_lock( &nl->lock ); 477 spin_lock( &nl->lock );
@@ -503,12 +505,12 @@ static irqreturn_t
503sbni_interrupt( int irq, void *dev_id ) 505sbni_interrupt( int irq, void *dev_id )
504{ 506{
505 struct net_device *dev = dev_id; 507 struct net_device *dev = dev_id;
506 struct net_local *nl = dev->priv; 508 struct net_local *nl = netdev_priv(dev);
507 int repeat; 509 int repeat;
508 510
509 spin_lock( &nl->lock ); 511 spin_lock( &nl->lock );
510 if( nl->second ) 512 if( nl->second )
511 spin_lock( &((struct net_local *) nl->second->priv)->lock ); 513 spin_lock(&NET_LOCAL_LOCK(nl->second));
512 514
513 do { 515 do {
514 repeat = 0; 516 repeat = 0;
@@ -522,7 +524,7 @@ sbni_interrupt( int irq, void *dev_id )
522 } while( repeat ); 524 } while( repeat );
523 525
524 if( nl->second ) 526 if( nl->second )
525 spin_unlock( &((struct net_local *)nl->second->priv)->lock ); 527 spin_unlock(&NET_LOCAL_LOCK(nl->second));
526 spin_unlock( &nl->lock ); 528 spin_unlock( &nl->lock );
527 return IRQ_HANDLED; 529 return IRQ_HANDLED;
528} 530}
@@ -531,7 +533,7 @@ sbni_interrupt( int irq, void *dev_id )
531static void 533static void
532handle_channel( struct net_device *dev ) 534handle_channel( struct net_device *dev )
533{ 535{
534 struct net_local *nl = (struct net_local *) dev->priv; 536 struct net_local *nl = netdev_priv(dev);
535 unsigned long ioaddr = dev->base_addr; 537 unsigned long ioaddr = dev->base_addr;
536 538
537 int req_ans; 539 int req_ans;
@@ -540,7 +542,7 @@ handle_channel( struct net_device *dev )
540#ifdef CONFIG_SBNI_MULTILINE 542#ifdef CONFIG_SBNI_MULTILINE
541 /* Lock the master device because we going to change its local data */ 543 /* Lock the master device because we going to change its local data */
542 if( nl->state & FL_SLAVE ) 544 if( nl->state & FL_SLAVE )
543 spin_lock( &((struct net_local *) nl->master->priv)->lock ); 545 spin_lock(&NET_LOCAL_LOCK(nl->master));
544#endif 546#endif
545 547
546 outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 ); 548 outb( (inb( ioaddr + CSR0 ) & ~EN_INT) | TR_REQ, ioaddr + CSR0 );
@@ -576,7 +578,7 @@ handle_channel( struct net_device *dev )
576 578
577#ifdef CONFIG_SBNI_MULTILINE 579#ifdef CONFIG_SBNI_MULTILINE
578 if( nl->state & FL_SLAVE ) 580 if( nl->state & FL_SLAVE )
579 spin_unlock( &((struct net_local *) nl->master->priv)->lock ); 581 spin_unlock(&NET_LOCAL_LOCK(nl->master));
580#endif 582#endif
581} 583}
582 584
@@ -589,7 +591,7 @@ handle_channel( struct net_device *dev )
589static int 591static int
590recv_frame( struct net_device *dev ) 592recv_frame( struct net_device *dev )
591{ 593{
592 struct net_local *nl = (struct net_local *) dev->priv; 594 struct net_local *nl = netdev_priv(dev);
593 unsigned long ioaddr = dev->base_addr; 595 unsigned long ioaddr = dev->base_addr;
594 596
595 u32 crc = CRC32_INITIAL; 597 u32 crc = CRC32_INITIAL;
@@ -623,7 +625,7 @@ recv_frame( struct net_device *dev )
623static void 625static void
624send_frame( struct net_device *dev ) 626send_frame( struct net_device *dev )
625{ 627{
626 struct net_local *nl = (struct net_local *) dev->priv; 628 struct net_local *nl = netdev_priv(dev);
627 629
628 u32 crc = CRC32_INITIAL; 630 u32 crc = CRC32_INITIAL;
629 631
@@ -680,7 +682,7 @@ do_send:
680static void 682static void
681download_data( struct net_device *dev, u32 *crc_p ) 683download_data( struct net_device *dev, u32 *crc_p )
682{ 684{
683 struct net_local *nl = (struct net_local *) dev->priv; 685 struct net_local *nl = netdev_priv(dev);
684 struct sk_buff *skb = nl->tx_buf_p; 686 struct sk_buff *skb = nl->tx_buf_p;
685 687
686 unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen); 688 unsigned len = min_t(unsigned int, skb->len - nl->outpos, nl->framelen);
@@ -699,7 +701,7 @@ static int
699upload_data( struct net_device *dev, unsigned framelen, unsigned frameno, 701upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
700 unsigned is_first, u32 crc ) 702 unsigned is_first, u32 crc )
701{ 703{
702 struct net_local *nl = (struct net_local *) dev->priv; 704 struct net_local *nl = netdev_priv(dev);
703 705
704 int frame_ok; 706 int frame_ok;
705 707
@@ -721,9 +723,9 @@ upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
721 nl->wait_frameno = 0, 723 nl->wait_frameno = 0,
722 nl->inppos = 0, 724 nl->inppos = 0,
723#ifdef CONFIG_SBNI_MULTILINE 725#ifdef CONFIG_SBNI_MULTILINE
724 ((struct net_local *) nl->master->priv) 726 ((struct net_local *)netdev_priv(nl->master))
725 ->stats.rx_errors++, 727 ->stats.rx_errors++,
726 ((struct net_local *) nl->master->priv) 728 ((struct net_local *)netdev_priv(nl->master))
727 ->stats.rx_missed_errors++; 729 ->stats.rx_missed_errors++;
728#else 730#else
729 nl->stats.rx_errors++, 731 nl->stats.rx_errors++,
@@ -740,8 +742,10 @@ upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
740 */ 742 */
741 nl->wait_frameno = 0, 743 nl->wait_frameno = 0,
742#ifdef CONFIG_SBNI_MULTILINE 744#ifdef CONFIG_SBNI_MULTILINE
743 ((struct net_local *) nl->master->priv)->stats.rx_errors++, 745 ((struct net_local *)netdev_priv(nl->master))
744 ((struct net_local *) nl->master->priv)->stats.rx_crc_errors++; 746 ->stats.rx_errors++,
747 ((struct net_local *)netdev_priv(nl->master))
748 ->stats.rx_crc_errors++;
745#else 749#else
746 nl->stats.rx_errors++, 750 nl->stats.rx_errors++,
747 nl->stats.rx_crc_errors++; 751 nl->stats.rx_crc_errors++;
@@ -755,8 +759,8 @@ static inline void
755send_complete( struct net_local *nl ) 759send_complete( struct net_local *nl )
756{ 760{
757#ifdef CONFIG_SBNI_MULTILINE 761#ifdef CONFIG_SBNI_MULTILINE
758 ((struct net_local *) nl->master->priv)->stats.tx_packets++; 762 ((struct net_local *)netdev_priv(nl->master))->stats.tx_packets++;
759 ((struct net_local *) nl->master->priv)->stats.tx_bytes 763 ((struct net_local *)netdev_priv(nl->master))->stats.tx_bytes
760 += nl->tx_buf_p->len; 764 += nl->tx_buf_p->len;
761#else 765#else
762 nl->stats.tx_packets++; 766 nl->stats.tx_packets++;
@@ -775,7 +779,7 @@ send_complete( struct net_local *nl )
775static void 779static void
776interpret_ack( struct net_device *dev, unsigned ack ) 780interpret_ack( struct net_device *dev, unsigned ack )
777{ 781{
778 struct net_local *nl = (struct net_local *) dev->priv; 782 struct net_local *nl = netdev_priv(dev);
779 783
780 if( ack == FRAME_SENT_OK ) { 784 if( ack == FRAME_SENT_OK ) {
781 nl->state &= ~FL_NEED_RESEND; 785 nl->state &= ~FL_NEED_RESEND;
@@ -809,7 +813,7 @@ interpret_ack( struct net_device *dev, unsigned ack )
809static int 813static int
810append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc ) 814append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
811{ 815{
812 struct net_local *nl = (struct net_local *) dev->priv; 816 struct net_local *nl = netdev_priv(dev);
813 817
814 u8 *p; 818 u8 *p;
815 819
@@ -840,7 +844,7 @@ append_frame_to_pkt( struct net_device *dev, unsigned framelen, u32 crc )
840static void 844static void
841prepare_to_send( struct sk_buff *skb, struct net_device *dev ) 845prepare_to_send( struct sk_buff *skb, struct net_device *dev )
842{ 846{
843 struct net_local *nl = (struct net_local *) dev->priv; 847 struct net_local *nl = netdev_priv(dev);
844 848
845 unsigned int len; 849 unsigned int len;
846 850
@@ -871,15 +875,15 @@ prepare_to_send( struct sk_buff *skb, struct net_device *dev )
871static void 875static void
872drop_xmit_queue( struct net_device *dev ) 876drop_xmit_queue( struct net_device *dev )
873{ 877{
874 struct net_local *nl = (struct net_local *) dev->priv; 878 struct net_local *nl = netdev_priv(dev);
875 879
876 if( nl->tx_buf_p ) 880 if( nl->tx_buf_p )
877 dev_kfree_skb_any( nl->tx_buf_p ), 881 dev_kfree_skb_any( nl->tx_buf_p ),
878 nl->tx_buf_p = NULL, 882 nl->tx_buf_p = NULL,
879#ifdef CONFIG_SBNI_MULTILINE 883#ifdef CONFIG_SBNI_MULTILINE
880 ((struct net_local *) nl->master->priv) 884 ((struct net_local *)netdev_priv(nl->master))
881 ->stats.tx_errors++, 885 ->stats.tx_errors++,
882 ((struct net_local *) nl->master->priv) 886 ((struct net_local *)netdev_priv(nl->master))
883 ->stats.tx_carrier_errors++; 887 ->stats.tx_carrier_errors++;
884#else 888#else
885 nl->stats.tx_errors++, 889 nl->stats.tx_errors++,
@@ -903,7 +907,7 @@ drop_xmit_queue( struct net_device *dev )
903static void 907static void
904send_frame_header( struct net_device *dev, u32 *crc_p ) 908send_frame_header( struct net_device *dev, u32 *crc_p )
905{ 909{
906 struct net_local *nl = (struct net_local *) dev->priv; 910 struct net_local *nl = netdev_priv(dev);
907 911
908 u32 crc = *crc_p; 912 u32 crc = *crc_p;
909 u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */ 913 u32 len_field = nl->framelen + 6; /* CRC + frameno + reserved */
@@ -1005,7 +1009,7 @@ get_rx_buf( struct net_device *dev )
1005static void 1009static void
1006indicate_pkt( struct net_device *dev ) 1010indicate_pkt( struct net_device *dev )
1007{ 1011{
1008 struct net_local *nl = (struct net_local *) dev->priv; 1012 struct net_local *nl = netdev_priv(dev);
1009 struct sk_buff *skb = nl->rx_buf_p; 1013 struct sk_buff *skb = nl->rx_buf_p;
1010 1014
1011 skb_put( skb, nl->inppos ); 1015 skb_put( skb, nl->inppos );
@@ -1013,13 +1017,12 @@ indicate_pkt( struct net_device *dev )
1013#ifdef CONFIG_SBNI_MULTILINE 1017#ifdef CONFIG_SBNI_MULTILINE
1014 skb->protocol = eth_type_trans( skb, nl->master ); 1018 skb->protocol = eth_type_trans( skb, nl->master );
1015 netif_rx( skb ); 1019 netif_rx( skb );
1016 dev->last_rx = jiffies; 1020 ++((struct net_local *)netdev_priv(nl->master))->stats.rx_packets;
1017 ++((struct net_local *) nl->master->priv)->stats.rx_packets; 1021 ((struct net_local *)netdev_priv(nl->master))->stats.rx_bytes +=
1018 ((struct net_local *) nl->master->priv)->stats.rx_bytes += nl->inppos; 1022 nl->inppos;
1019#else 1023#else
1020 skb->protocol = eth_type_trans( skb, dev ); 1024 skb->protocol = eth_type_trans( skb, dev );
1021 netif_rx( skb ); 1025 netif_rx( skb );
1022 dev->last_rx = jiffies;
1023 ++nl->stats.rx_packets; 1026 ++nl->stats.rx_packets;
1024 nl->stats.rx_bytes += nl->inppos; 1027 nl->stats.rx_bytes += nl->inppos;
1025#endif 1028#endif
@@ -1038,7 +1041,7 @@ static void
1038sbni_watchdog( unsigned long arg ) 1041sbni_watchdog( unsigned long arg )
1039{ 1042{
1040 struct net_device *dev = (struct net_device *) arg; 1043 struct net_device *dev = (struct net_device *) arg;
1041 struct net_local *nl = (struct net_local *) dev->priv; 1044 struct net_local *nl = netdev_priv(dev);
1042 struct timer_list *w = &nl->watchdog; 1045 struct timer_list *w = &nl->watchdog;
1043 unsigned long flags; 1046 unsigned long flags;
1044 unsigned char csr0; 1047 unsigned char csr0;
@@ -1091,7 +1094,7 @@ static unsigned char timeout_rxl_tab[] = {
1091static void 1094static void
1092card_start( struct net_device *dev ) 1095card_start( struct net_device *dev )
1093{ 1096{
1094 struct net_local *nl = (struct net_local *) dev->priv; 1097 struct net_local *nl = netdev_priv(dev);
1095 1098
1096 nl->timer_ticks = CHANGE_LEVEL_START_TICKS; 1099 nl->timer_ticks = CHANGE_LEVEL_START_TICKS;
1097 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND); 1100 nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
@@ -1113,7 +1116,7 @@ card_start( struct net_device *dev )
1113static void 1116static void
1114change_level( struct net_device *dev ) 1117change_level( struct net_device *dev )
1115{ 1118{
1116 struct net_local *nl = (struct net_local *) dev->priv; 1119 struct net_local *nl = netdev_priv(dev);
1117 1120
1118 if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */ 1121 if( nl->delta_rxl == 0 ) /* do not auto-negotiate RxL */
1119 return; 1122 return;
@@ -1137,7 +1140,7 @@ change_level( struct net_device *dev )
1137static void 1140static void
1138timeout_change_level( struct net_device *dev ) 1141timeout_change_level( struct net_device *dev )
1139{ 1142{
1140 struct net_local *nl = (struct net_local *) dev->priv; 1143 struct net_local *nl = netdev_priv(dev);
1141 1144
1142 nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ]; 1145 nl->cur_rxl_index = timeout_rxl_tab[ nl->timeout_rxl ];
1143 if( ++nl->timeout_rxl >= 4 ) 1146 if( ++nl->timeout_rxl >= 4 )
@@ -1160,7 +1163,7 @@ timeout_change_level( struct net_device *dev )
1160static int 1163static int
1161sbni_open( struct net_device *dev ) 1164sbni_open( struct net_device *dev )
1162{ 1165{
1163 struct net_local *nl = (struct net_local *) dev->priv; 1166 struct net_local *nl = netdev_priv(dev);
1164 struct timer_list *w = &nl->watchdog; 1167 struct timer_list *w = &nl->watchdog;
1165 1168
1166 /* 1169 /*
@@ -1176,7 +1179,7 @@ sbni_open( struct net_device *dev )
1176 || (*p)->base_addr == dev->base_addr - 4) 1179 || (*p)->base_addr == dev->base_addr - 4)
1177 && (*p)->flags & IFF_UP ) { 1180 && (*p)->flags & IFF_UP ) {
1178 1181
1179 ((struct net_local *) ((*p)->priv)) 1182 ((struct net_local *) (netdev_priv(*p)))
1180 ->second = dev; 1183 ->second = dev;
1181 printk( KERN_NOTICE "%s: using shared irq " 1184 printk( KERN_NOTICE "%s: using shared irq "
1182 "with %s\n", dev->name, (*p)->name ); 1185 "with %s\n", dev->name, (*p)->name );
@@ -1216,7 +1219,7 @@ handler_attached:
1216static int 1219static int
1217sbni_close( struct net_device *dev ) 1220sbni_close( struct net_device *dev )
1218{ 1221{
1219 struct net_local *nl = (struct net_local *) dev->priv; 1222 struct net_local *nl = netdev_priv(dev);
1220 1223
1221 if( nl->second && nl->second->flags & IFF_UP ) { 1224 if( nl->second && nl->second->flags & IFF_UP ) {
1222 printk( KERN_NOTICE "Secondary channel (%s) is active!\n", 1225 printk( KERN_NOTICE "Secondary channel (%s) is active!\n",
@@ -1300,7 +1303,7 @@ sbni_card_probe( unsigned long ioaddr )
1300static int 1303static int
1301sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd ) 1304sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
1302{ 1305{
1303 struct net_local *nl = (struct net_local *) dev->priv; 1306 struct net_local *nl = netdev_priv(dev);
1304 struct sbni_flags flags; 1307 struct sbni_flags flags;
1305 int error = 0; 1308 int error = 0;
1306 1309
@@ -1390,8 +1393,8 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
1390static int 1393static int
1391enslave( struct net_device *dev, struct net_device *slave_dev ) 1394enslave( struct net_device *dev, struct net_device *slave_dev )
1392{ 1395{
1393 struct net_local *nl = (struct net_local *) dev->priv; 1396 struct net_local *nl = netdev_priv(dev);
1394 struct net_local *snl = (struct net_local *) slave_dev->priv; 1397 struct net_local *snl = netdev_priv(slave_dev);
1395 1398
1396 if( nl->state & FL_SLAVE ) /* This isn't master or free device */ 1399 if( nl->state & FL_SLAVE ) /* This isn't master or free device */
1397 return -EBUSY; 1400 return -EBUSY;
@@ -1425,9 +1428,9 @@ enslave( struct net_device *dev, struct net_device *slave_dev )
1425static int 1428static int
1426emancipate( struct net_device *dev ) 1429emancipate( struct net_device *dev )
1427{ 1430{
1428 struct net_local *snl = (struct net_local *) dev->priv; 1431 struct net_local *snl = netdev_priv(dev);
1429 struct net_device *p = snl->master; 1432 struct net_device *p = snl->master;
1430 struct net_local *nl = (struct net_local *) p->priv; 1433 struct net_local *nl = netdev_priv(p);
1431 1434
1432 if( !(snl->state & FL_SLAVE) ) 1435 if( !(snl->state & FL_SLAVE) )
1433 return -EINVAL; 1436 return -EINVAL;
@@ -1438,7 +1441,7 @@ emancipate( struct net_device *dev )
1438 1441
1439 /* exclude from list */ 1442 /* exclude from list */
1440 for(;;) { /* must be in list */ 1443 for(;;) { /* must be in list */
1441 struct net_local *t = (struct net_local *) p->priv; 1444 struct net_local *t = netdev_priv(p);
1442 if( t->link == dev ) { 1445 if( t->link == dev ) {
1443 t->link = snl->link; 1446 t->link = snl->link;
1444 break; 1447 break;
@@ -1465,7 +1468,7 @@ emancipate( struct net_device *dev )
1465static struct net_device_stats * 1468static struct net_device_stats *
1466sbni_get_stats( struct net_device *dev ) 1469sbni_get_stats( struct net_device *dev )
1467{ 1470{
1468 return &((struct net_local *) dev->priv)->stats; 1471 return &((struct net_local *)netdev_priv(dev))->stats;
1469} 1472}
1470 1473
1471 1474
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 73e2f2780932..6a07ba9371db 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -185,7 +185,7 @@ static void sdla_stop(struct net_device *dev)
185{ 185{
186 struct frad_local *flp; 186 struct frad_local *flp;
187 187
188 flp = dev->priv; 188 flp = netdev_priv(dev);
189 switch(flp->type) 189 switch(flp->type)
190 { 190 {
191 case SDLA_S502A: 191 case SDLA_S502A:
@@ -212,7 +212,7 @@ static void sdla_start(struct net_device *dev)
212{ 212{
213 struct frad_local *flp; 213 struct frad_local *flp;
214 214
215 flp = dev->priv; 215 flp = netdev_priv(dev);
216 switch(flp->type) 216 switch(flp->type)
217 { 217 {
218 case SDLA_S502A: 218 case SDLA_S502A:
@@ -432,7 +432,7 @@ static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
432 int ret, waiting, len; 432 int ret, waiting, len;
433 long window; 433 long window;
434 434
435 flp = dev->priv; 435 flp = netdev_priv(dev);
436 window = flp->type == SDLA_S508 ? SDLA_508_CMD_BUF : SDLA_502_CMD_BUF; 436 window = flp->type == SDLA_S508 ? SDLA_508_CMD_BUF : SDLA_502_CMD_BUF;
437 cmd_buf = (struct sdla_cmd *)(dev->mem_start + (window & SDLA_ADDR_MASK)); 437 cmd_buf = (struct sdla_cmd *)(dev->mem_start + (window & SDLA_ADDR_MASK));
438 ret = 0; 438 ret = 0;
@@ -509,7 +509,7 @@ static int sdla_activate(struct net_device *slave, struct net_device *master)
509 struct frad_local *flp; 509 struct frad_local *flp;
510 int i; 510 int i;
511 511
512 flp = slave->priv; 512 flp = netdev_priv(slave);
513 513
514 for(i=0;i<CONFIG_DLCI_MAX;i++) 514 for(i=0;i<CONFIG_DLCI_MAX;i++)
515 if (flp->master[i] == master) 515 if (flp->master[i] == master)
@@ -531,7 +531,7 @@ static int sdla_deactivate(struct net_device *slave, struct net_device *master)
531 struct frad_local *flp; 531 struct frad_local *flp;
532 int i; 532 int i;
533 533
534 flp = slave->priv; 534 flp = netdev_priv(slave);
535 535
536 for(i=0;i<CONFIG_DLCI_MAX;i++) 536 for(i=0;i<CONFIG_DLCI_MAX;i++)
537 if (flp->master[i] == master) 537 if (flp->master[i] == master)
@@ -556,7 +556,7 @@ static int sdla_assoc(struct net_device *slave, struct net_device *master)
556 if (master->type != ARPHRD_DLCI) 556 if (master->type != ARPHRD_DLCI)
557 return(-EINVAL); 557 return(-EINVAL);
558 558
559 flp = slave->priv; 559 flp = netdev_priv(slave);
560 560
561 for(i=0;i<CONFIG_DLCI_MAX;i++) 561 for(i=0;i<CONFIG_DLCI_MAX;i++)
562 { 562 {
@@ -589,7 +589,7 @@ static int sdla_deassoc(struct net_device *slave, struct net_device *master)
589 struct frad_local *flp; 589 struct frad_local *flp;
590 int i; 590 int i;
591 591
592 flp = slave->priv; 592 flp = netdev_priv(slave);
593 593
594 for(i=0;i<CONFIG_DLCI_MAX;i++) 594 for(i=0;i<CONFIG_DLCI_MAX;i++)
595 if (flp->master[i] == master) 595 if (flp->master[i] == master)
@@ -619,7 +619,7 @@ static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, i
619 int i; 619 int i;
620 short len, ret; 620 short len, ret;
621 621
622 flp = slave->priv; 622 flp = netdev_priv(slave);
623 623
624 for(i=0;i<CONFIG_DLCI_MAX;i++) 624 for(i=0;i<CONFIG_DLCI_MAX;i++)
625 if (flp->master[i] == master) 625 if (flp->master[i] == master)
@@ -628,7 +628,7 @@ static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, i
628 if (i == CONFIG_DLCI_MAX) 628 if (i == CONFIG_DLCI_MAX)
629 return(-ENODEV); 629 return(-ENODEV);
630 630
631 dlp = master->priv; 631 dlp = netdev_priv(master);
632 632
633 ret = SDLA_RET_OK; 633 ret = SDLA_RET_OK;
634 len = sizeof(struct dlci_conf); 634 len = sizeof(struct dlci_conf);
@@ -659,7 +659,7 @@ static int sdla_transmit(struct sk_buff *skb, struct net_device *dev)
659 unsigned long flags; 659 unsigned long flags;
660 struct buf_entry *pbuf; 660 struct buf_entry *pbuf;
661 661
662 flp = dev->priv; 662 flp = netdev_priv(dev);
663 ret = 0; 663 ret = 0;
664 accept = 1; 664 accept = 1;
665 665
@@ -755,7 +755,7 @@ static void sdla_receive(struct net_device *dev)
755 int i=0, received, success, addr, buf_base, buf_top; 755 int i=0, received, success, addr, buf_base, buf_top;
756 short dlci, len, len2, split; 756 short dlci, len, len2, split;
757 757
758 flp = dev->priv; 758 flp = netdev_priv(dev);
759 success = 1; 759 success = 1;
760 received = addr = buf_top = buf_base = 0; 760 received = addr = buf_top = buf_base = 0;
761 len = dlci = 0; 761 len = dlci = 0;
@@ -860,7 +860,7 @@ static void sdla_receive(struct net_device *dev)
860 if (success) 860 if (success)
861 { 861 {
862 flp->stats.rx_packets++; 862 flp->stats.rx_packets++;
863 dlp = master->priv; 863 dlp = netdev_priv(master);
864 (*dlp->receive)(skb, master); 864 (*dlp->receive)(skb, master);
865 } 865 }
866 866
@@ -925,7 +925,7 @@ static void sdla_poll(unsigned long device)
925 struct frad_local *flp; 925 struct frad_local *flp;
926 926
927 dev = (struct net_device *) device; 927 dev = (struct net_device *) device;
928 flp = dev->priv; 928 flp = netdev_priv(dev);
929 929
930 if (sdla_byte(dev, SDLA_502_RCV_BUF)) 930 if (sdla_byte(dev, SDLA_502_RCV_BUF))
931 sdla_receive(dev); 931 sdla_receive(dev);
@@ -941,7 +941,7 @@ static int sdla_close(struct net_device *dev)
941 int len, i; 941 int len, i;
942 short dlcis[CONFIG_DLCI_MAX]; 942 short dlcis[CONFIG_DLCI_MAX];
943 943
944 flp = dev->priv; 944 flp = netdev_priv(dev);
945 945
946 len = 0; 946 len = 0;
947 for(i=0;i<CONFIG_DLCI_MAX;i++) 947 for(i=0;i<CONFIG_DLCI_MAX;i++)
@@ -1002,7 +1002,7 @@ static int sdla_open(struct net_device *dev)
1002 int len, i; 1002 int len, i;
1003 char byte; 1003 char byte;
1004 1004
1005 flp = dev->priv; 1005 flp = netdev_priv(dev);
1006 1006
1007 if (!flp->initialized) 1007 if (!flp->initialized)
1008 return(-EPERM); 1008 return(-EPERM);
@@ -1079,7 +1079,7 @@ static int sdla_open(struct net_device *dev)
1079 for(i=0;i<CONFIG_DLCI_MAX;i++) 1079 for(i=0;i<CONFIG_DLCI_MAX;i++)
1080 if (flp->dlci[i]) 1080 if (flp->dlci[i])
1081 { 1081 {
1082 dlp = flp->master[i]->priv; 1082 dlp = netdev_priv(flp->master[i]);
1083 if (dlp->configured) 1083 if (dlp->configured)
1084 sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, &dlp->config, sizeof(struct dlci_conf), NULL, NULL); 1084 sdla_cmd(dev, SDLA_SET_DLCI_CONFIGURATION, abs(flp->dlci[i]), 0, &dlp->config, sizeof(struct dlci_conf), NULL, NULL);
1085 } 1085 }
@@ -1099,7 +1099,7 @@ static int sdla_config(struct net_device *dev, struct frad_conf __user *conf, in
1099 if (dev->type == 0xFFFF) 1099 if (dev->type == 0xFFFF)
1100 return(-EUNATCH); 1100 return(-EUNATCH);
1101 1101
1102 flp = dev->priv; 1102 flp = netdev_priv(dev);
1103 1103
1104 if (!get) 1104 if (!get)
1105 { 1105 {
@@ -1230,7 +1230,7 @@ static int sdla_reconfig(struct net_device *dev)
1230 struct conf_data data; 1230 struct conf_data data;
1231 int i, len; 1231 int i, len;
1232 1232
1233 flp = dev->priv; 1233 flp = netdev_priv(dev);
1234 1234
1235 len = 0; 1235 len = 0;
1236 for(i=0;i<CONFIG_DLCI_MAX;i++) 1236 for(i=0;i<CONFIG_DLCI_MAX;i++)
@@ -1255,7 +1255,7 @@ static int sdla_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1255 if(!capable(CAP_NET_ADMIN)) 1255 if(!capable(CAP_NET_ADMIN))
1256 return -EPERM; 1256 return -EPERM;
1257 1257
1258 flp = dev->priv; 1258 flp = netdev_priv(dev);
1259 1259
1260 if (!flp->initialized) 1260 if (!flp->initialized)
1261 return(-EINVAL); 1261 return(-EINVAL);
@@ -1321,7 +1321,7 @@ static int sdla_change_mtu(struct net_device *dev, int new_mtu)
1321{ 1321{
1322 struct frad_local *flp; 1322 struct frad_local *flp;
1323 1323
1324 flp = dev->priv; 1324 flp = netdev_priv(dev);
1325 1325
1326 if (netif_running(dev)) 1326 if (netif_running(dev))
1327 return(-EBUSY); 1327 return(-EBUSY);
@@ -1338,7 +1338,7 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
1338 unsigned base; 1338 unsigned base;
1339 int err = -EINVAL; 1339 int err = -EINVAL;
1340 1340
1341 flp = dev->priv; 1341 flp = netdev_priv(dev);
1342 1342
1343 if (flp->initialized) 1343 if (flp->initialized)
1344 return(-EINVAL); 1344 return(-EINVAL);
@@ -1593,14 +1593,14 @@ fail:
1593static struct net_device_stats *sdla_stats(struct net_device *dev) 1593static struct net_device_stats *sdla_stats(struct net_device *dev)
1594{ 1594{
1595 struct frad_local *flp; 1595 struct frad_local *flp;
1596 flp = dev->priv; 1596 flp = netdev_priv(dev);
1597 1597
1598 return(&flp->stats); 1598 return(&flp->stats);
1599} 1599}
1600 1600
1601static void setup_sdla(struct net_device *dev) 1601static void setup_sdla(struct net_device *dev)
1602{ 1602{
1603 struct frad_local *flp = dev->priv; 1603 struct frad_local *flp = netdev_priv(dev);
1604 1604
1605 netdev_boot_setup_check(dev); 1605 netdev_boot_setup_check(dev);
1606 1606
@@ -1651,7 +1651,7 @@ static int __init init_sdla(void)
1651 1651
1652static void __exit exit_sdla(void) 1652static void __exit exit_sdla(void)
1653{ 1653{
1654 struct frad_local *flp = sdla->priv; 1654 struct frad_local *flp = netdev_priv(sdla);
1655 1655
1656 unregister_netdev(sdla); 1656 unregister_netdev(sdla);
1657 if (flp->initialized) { 1657 if (flp->initialized) {
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index c0235844a4d5..0941a26f6e3f 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -68,7 +68,6 @@ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
68 skb_reset_mac_header(skb); 68 skb_reset_mac_header(skb);
69 skb->dev = c->netdevice; 69 skb->dev = c->netdevice;
70 netif_rx(skb); 70 netif_rx(skb);
71 c->netdevice->last_rx = jiffies;
72} 71}
73 72
74/* 73/*
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
deleted file mode 100644
index 6e92f7b44b1a..000000000000
--- a/drivers/net/wan/syncppp.c
+++ /dev/null
@@ -1,1480 +0,0 @@
1/*
2 * NET3: A (fairly minimal) implementation of synchronous PPP for Linux
3 * as well as a CISCO HDLC implementation. See the copyright
4 * message below for the original source.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the license, or (at your option) any later version.
10 *
11 * Note however. This code is also used in a different form by FreeBSD.
12 * Therefore when making any non OS specific change please consider
13 * contributing it back to the original author under the terms
14 * below in addition.
15 * -- Alan
16 *
17 * Port for Linux-2.1 by Jan "Yenya" Kasprzak <kas@fi.muni.cz>
18 */
19
20/*
21 * Synchronous PPP/Cisco link level subroutines.
22 * Keepalive protocol implemented in both Cisco and PPP modes.
23 *
24 * Copyright (C) 1994 Cronyx Ltd.
25 * Author: Serge Vakulenko, <vak@zebub.msk.su>
26 *
27 * This software is distributed with NO WARRANTIES, not even the implied
28 * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Authors grant any other persons or organisations permission to use
31 * or modify this software as long as this message is kept with the software,
32 * all derivative works or modified versions.
33 *
34 * Version 1.9, Wed Oct 4 18:58:15 MSK 1995
35 *
36 * $Id: syncppp.c,v 1.18 2000/04/11 05:25:31 asj Exp $
37 */
38#undef DEBUG
39
40#include <linux/module.h>
41#include <linux/kernel.h>
42#include <linux/errno.h>
43#include <linux/init.h>
44#include <linux/if_arp.h>
45#include <linux/skbuff.h>
46#include <linux/route.h>
47#include <linux/netdevice.h>
48#include <linux/inetdevice.h>
49#include <linux/random.h>
50#include <linux/pkt_sched.h>
51#include <linux/spinlock.h>
52#include <linux/rcupdate.h>
53
54#include <net/net_namespace.h>
55#include <net/syncppp.h>
56
57#include <asm/byteorder.h>
58#include <asm/uaccess.h>
59
60#define MAXALIVECNT 6 /* max. alive packets */
61
62#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */
63#define PPP_UI 0x03 /* Unnumbered Information */
64#define PPP_IP 0x0021 /* Internet Protocol */
65#define PPP_ISO 0x0023 /* ISO OSI Protocol */
66#define PPP_XNS 0x0025 /* Xerox NS Protocol */
67#define PPP_IPX 0x002b /* Novell IPX Protocol */
68#define PPP_LCP 0xc021 /* Link Control Protocol */
69#define PPP_IPCP 0x8021 /* Internet Protocol Control Protocol */
70
71#define LCP_CONF_REQ 1 /* PPP LCP configure request */
72#define LCP_CONF_ACK 2 /* PPP LCP configure acknowledge */
73#define LCP_CONF_NAK 3 /* PPP LCP configure negative ack */
74#define LCP_CONF_REJ 4 /* PPP LCP configure reject */
75#define LCP_TERM_REQ 5 /* PPP LCP terminate request */
76#define LCP_TERM_ACK 6 /* PPP LCP terminate acknowledge */
77#define LCP_CODE_REJ 7 /* PPP LCP code reject */
78#define LCP_PROTO_REJ 8 /* PPP LCP protocol reject */
79#define LCP_ECHO_REQ 9 /* PPP LCP echo request */
80#define LCP_ECHO_REPLY 10 /* PPP LCP echo reply */
81#define LCP_DISC_REQ 11 /* PPP LCP discard request */
82
83#define LCP_OPT_MRU 1 /* maximum receive unit */
84#define LCP_OPT_ASYNC_MAP 2 /* async control character map */
85#define LCP_OPT_AUTH_PROTO 3 /* authentication protocol */
86#define LCP_OPT_QUAL_PROTO 4 /* quality protocol */
87#define LCP_OPT_MAGIC 5 /* magic number */
88#define LCP_OPT_RESERVED 6 /* reserved */
89#define LCP_OPT_PROTO_COMP 7 /* protocol field compression */
90#define LCP_OPT_ADDR_COMP 8 /* address/control field compression */
91
92#define IPCP_CONF_REQ LCP_CONF_REQ /* PPP IPCP configure request */
93#define IPCP_CONF_ACK LCP_CONF_ACK /* PPP IPCP configure acknowledge */
94#define IPCP_CONF_NAK LCP_CONF_NAK /* PPP IPCP configure negative ack */
95#define IPCP_CONF_REJ LCP_CONF_REJ /* PPP IPCP configure reject */
96#define IPCP_TERM_REQ LCP_TERM_REQ /* PPP IPCP terminate request */
97#define IPCP_TERM_ACK LCP_TERM_ACK /* PPP IPCP terminate acknowledge */
98#define IPCP_CODE_REJ LCP_CODE_REJ /* PPP IPCP code reject */
99
100#define CISCO_MULTICAST 0x8f /* Cisco multicast address */
101#define CISCO_UNICAST 0x0f /* Cisco unicast address */
102#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
103#define CISCO_ADDR_REQ 0 /* Cisco address request */
104#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
105#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
106
107struct ppp_header {
108 u8 address;
109 u8 control;
110 __be16 protocol;
111};
112#define PPP_HEADER_LEN sizeof (struct ppp_header)
113
114struct lcp_header {
115 u8 type;
116 u8 ident;
117 __be16 len;
118};
119#define LCP_HEADER_LEN sizeof (struct lcp_header)
120
121struct cisco_packet {
122 __be32 type;
123 __be32 par1;
124 __be32 par2;
125 __be16 rel;
126 __be16 time0;
127 __be16 time1;
128};
129#define CISCO_PACKET_LEN 18
130#define CISCO_BIG_PACKET_LEN 20
131
132static struct sppp *spppq;
133static struct timer_list sppp_keepalive_timer;
134static DEFINE_SPINLOCK(spppq_lock);
135
136/* global xmit queue for sending packets while spinlock is held */
137static struct sk_buff_head tx_queue;
138
139static void sppp_keepalive (unsigned long dummy);
140static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
141 u8 ident, u16 len, void *data);
142static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2);
143static void sppp_lcp_input (struct sppp *sp, struct sk_buff *m);
144static void sppp_cisco_input (struct sppp *sp, struct sk_buff *m);
145static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *m);
146static void sppp_lcp_open (struct sppp *sp);
147static void sppp_ipcp_open (struct sppp *sp);
148static int sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
149 int len, u32 *magic);
150static void sppp_cp_timeout (unsigned long arg);
151static char *sppp_lcp_type_name (u8 type);
152static char *sppp_ipcp_type_name (u8 type);
153static void sppp_print_bytes (u8 *p, u16 len);
154
155static int debug;
156
157/* Flush global outgoing packet queue to dev_queue_xmit().
158 *
159 * dev_queue_xmit() must be called with interrupts enabled
160 * which means it can't be called with spinlocks held.
161 * If a packet needs to be sent while a spinlock is held,
162 * then put the packet into tx_queue, and call sppp_flush_xmit()
163 * after spinlock is released.
164 */
165static void sppp_flush_xmit(void)
166{
167 struct sk_buff *skb;
168 while ((skb = skb_dequeue(&tx_queue)) != NULL)
169 dev_queue_xmit(skb);
170}
171
172/*
173 * Interface down stub
174 */
175
176static void if_down(struct net_device *dev)
177{
178 struct sppp *sp = (struct sppp *)sppp_of(dev);
179
180 sp->pp_link_state=SPPP_LINK_DOWN;
181}
182
183/*
184 * Timeout routine activations.
185 */
186
187static void sppp_set_timeout(struct sppp *p,int s)
188{
189 if (! (p->pp_flags & PP_TIMO))
190 {
191 init_timer(&p->pp_timer);
192 p->pp_timer.function=sppp_cp_timeout;
193 p->pp_timer.expires=jiffies+s*HZ;
194 p->pp_timer.data=(unsigned long)p;
195 p->pp_flags |= PP_TIMO;
196 add_timer(&p->pp_timer);
197 }
198}
199
200static void sppp_clear_timeout(struct sppp *p)
201{
202 if (p->pp_flags & PP_TIMO)
203 {
204 del_timer(&p->pp_timer);
205 p->pp_flags &= ~PP_TIMO;
206 }
207}
208
209/**
210 * sppp_input - receive and process a WAN PPP frame
211 * @skb: The buffer to process
212 * @dev: The device it arrived on
213 *
214 * This can be called directly by cards that do not have
215 * timing constraints but is normally called from the network layer
216 * after interrupt servicing to process frames queued via netif_rx().
217 *
218 * We process the options in the card. If the frame is destined for
219 * the protocol stacks then it requeues the frame for the upper level
220 * protocol. If it is a control from it is processed and discarded
221 * here.
222 */
223
224static void sppp_input (struct net_device *dev, struct sk_buff *skb)
225{
226 struct ppp_header *h;
227 struct sppp *sp = (struct sppp *)sppp_of(dev);
228 unsigned long flags;
229
230 skb->dev=dev;
231 skb_reset_mac_header(skb);
232
233 if (!pskb_may_pull(skb, PPP_HEADER_LEN)) {
234 /* Too small packet, drop it. */
235 if (sp->pp_flags & PP_DEBUG)
236 printk (KERN_DEBUG "%s: input packet is too small, %d bytes\n",
237 dev->name, skb->len);
238 kfree_skb(skb);
239 return;
240 }
241
242 /* Get PPP header. */
243 h = (struct ppp_header *)skb->data;
244 skb_pull(skb,sizeof(struct ppp_header));
245
246 spin_lock_irqsave(&sp->lock, flags);
247
248 switch (h->address) {
249 default: /* Invalid PPP packet. */
250 goto invalid;
251 case PPP_ALLSTATIONS:
252 if (h->control != PPP_UI)
253 goto invalid;
254 if (sp->pp_flags & PP_CISCO) {
255 if (sp->pp_flags & PP_DEBUG)
256 printk (KERN_WARNING "%s: PPP packet in Cisco mode <0x%x 0x%x 0x%x>\n",
257 dev->name,
258 h->address, h->control, ntohs (h->protocol));
259 goto drop;
260 }
261 switch (ntohs (h->protocol)) {
262 default:
263 if (sp->lcp.state == LCP_STATE_OPENED)
264 sppp_cp_send (sp, PPP_LCP, LCP_PROTO_REJ,
265 ++sp->pp_seq, skb->len + 2,
266 &h->protocol);
267 if (sp->pp_flags & PP_DEBUG)
268 printk (KERN_WARNING "%s: invalid input protocol <0x%x 0x%x 0x%x>\n",
269 dev->name,
270 h->address, h->control, ntohs (h->protocol));
271 goto drop;
272 case PPP_LCP:
273 sppp_lcp_input (sp, skb);
274 goto drop;
275 case PPP_IPCP:
276 if (sp->lcp.state == LCP_STATE_OPENED)
277 sppp_ipcp_input (sp, skb);
278 else
279 printk(KERN_DEBUG "IPCP when still waiting LCP finish.\n");
280 goto drop;
281 case PPP_IP:
282 if (sp->ipcp.state == IPCP_STATE_OPENED) {
283 if(sp->pp_flags&PP_DEBUG)
284 printk(KERN_DEBUG "Yow an IP frame.\n");
285 skb->protocol=htons(ETH_P_IP);
286 netif_rx(skb);
287 dev->last_rx = jiffies;
288 goto done;
289 }
290 break;
291#ifdef IPX
292 case PPP_IPX:
293 /* IPX IPXCP not implemented yet */
294 if (sp->lcp.state == LCP_STATE_OPENED) {
295 skb->protocol=htons(ETH_P_IPX);
296 netif_rx(skb);
297 dev->last_rx = jiffies;
298 goto done;
299 }
300 break;
301#endif
302 }
303 break;
304 case CISCO_MULTICAST:
305 case CISCO_UNICAST:
306 /* Don't check the control field here (RFC 1547). */
307 if (! (sp->pp_flags & PP_CISCO)) {
308 if (sp->pp_flags & PP_DEBUG)
309 printk (KERN_WARNING "%s: Cisco packet in PPP mode <0x%x 0x%x 0x%x>\n",
310 dev->name,
311 h->address, h->control, ntohs (h->protocol));
312 goto drop;
313 }
314 switch (ntohs (h->protocol)) {
315 default:
316 goto invalid;
317 case CISCO_KEEPALIVE:
318 sppp_cisco_input (sp, skb);
319 goto drop;
320#ifdef CONFIG_INET
321 case ETH_P_IP:
322 skb->protocol=htons(ETH_P_IP);
323 netif_rx(skb);
324 dev->last_rx = jiffies;
325 goto done;
326#endif
327#ifdef CONFIG_IPX
328 case ETH_P_IPX:
329 skb->protocol=htons(ETH_P_IPX);
330 netif_rx(skb);
331 dev->last_rx = jiffies;
332 goto done;
333#endif
334 }
335 break;
336 }
337 goto drop;
338
339invalid:
340 if (sp->pp_flags & PP_DEBUG)
341 printk (KERN_WARNING "%s: invalid input packet <0x%x 0x%x 0x%x>\n",
342 dev->name, h->address, h->control, ntohs (h->protocol));
343drop:
344 kfree_skb(skb);
345done:
346 spin_unlock_irqrestore(&sp->lock, flags);
347 sppp_flush_xmit();
348 return;
349}
350
351/*
352 * Handle transmit packets.
353 */
354
355static int sppp_hard_header(struct sk_buff *skb,
356 struct net_device *dev, __u16 type,
357 const void *daddr, const void *saddr,
358 unsigned int len)
359{
360 struct sppp *sp = (struct sppp *)sppp_of(dev);
361 struct ppp_header *h;
362 skb_push(skb,sizeof(struct ppp_header));
363 h=(struct ppp_header *)skb->data;
364 if(sp->pp_flags&PP_CISCO)
365 {
366 h->address = CISCO_UNICAST;
367 h->control = 0;
368 }
369 else
370 {
371 h->address = PPP_ALLSTATIONS;
372 h->control = PPP_UI;
373 }
374 if(sp->pp_flags & PP_CISCO)
375 {
376 h->protocol = htons(type);
377 }
378 else switch(type)
379 {
380 case ETH_P_IP:
381 h->protocol = htons(PPP_IP);
382 break;
383 case ETH_P_IPX:
384 h->protocol = htons(PPP_IPX);
385 break;
386 }
387 return sizeof(struct ppp_header);
388}
389
390static const struct header_ops sppp_header_ops = {
391 .create = sppp_hard_header,
392};
393
394/*
395 * Send keepalive packets, every 10 seconds.
396 */
397
398static void sppp_keepalive (unsigned long dummy)
399{
400 struct sppp *sp;
401 unsigned long flags;
402
403 spin_lock_irqsave(&spppq_lock, flags);
404
405 for (sp=spppq; sp; sp=sp->pp_next)
406 {
407 struct net_device *dev = sp->pp_if;
408
409 /* Keepalive mode disabled or channel down? */
410 if (! (sp->pp_flags & PP_KEEPALIVE) ||
411 ! (dev->flags & IFF_UP))
412 continue;
413
414 spin_lock(&sp->lock);
415
416 /* No keepalive in PPP mode if LCP not opened yet. */
417 if (! (sp->pp_flags & PP_CISCO) &&
418 sp->lcp.state != LCP_STATE_OPENED) {
419 spin_unlock(&sp->lock);
420 continue;
421 }
422
423 if (sp->pp_alivecnt == MAXALIVECNT) {
424 /* No keepalive packets got. Stop the interface. */
425 printk (KERN_WARNING "%s: protocol down\n", dev->name);
426 if_down (dev);
427 if (! (sp->pp_flags & PP_CISCO)) {
428 /* Shut down the PPP link. */
429 sp->lcp.magic = jiffies;
430 sp->lcp.state = LCP_STATE_CLOSED;
431 sp->ipcp.state = IPCP_STATE_CLOSED;
432 sppp_clear_timeout (sp);
433 /* Initiate negotiation. */
434 sppp_lcp_open (sp);
435 }
436 }
437 if (sp->pp_alivecnt <= MAXALIVECNT)
438 ++sp->pp_alivecnt;
439 if (sp->pp_flags & PP_CISCO)
440 sppp_cisco_send (sp, CISCO_KEEPALIVE_REQ, ++sp->pp_seq,
441 sp->pp_rseq);
442 else if (sp->lcp.state == LCP_STATE_OPENED) {
443 __be32 nmagic = htonl (sp->lcp.magic);
444 sp->lcp.echoid = ++sp->pp_seq;
445 sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REQ,
446 sp->lcp.echoid, 4, &nmagic);
447 }
448
449 spin_unlock(&sp->lock);
450 }
451 spin_unlock_irqrestore(&spppq_lock, flags);
452 sppp_flush_xmit();
453 sppp_keepalive_timer.expires=jiffies+10*HZ;
454 add_timer(&sppp_keepalive_timer);
455}
456
457/*
458 * Handle incoming PPP Link Control Protocol packets.
459 */
460
461static void sppp_lcp_input (struct sppp *sp, struct sk_buff *skb)
462{
463 struct lcp_header *h;
464 struct net_device *dev = sp->pp_if;
465 int len = skb->len;
466 u8 *p, opt[6];
467 u32 rmagic = 0;
468
469 if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
470 if (sp->pp_flags & PP_DEBUG)
471 printk (KERN_WARNING "%s: invalid lcp packet length: %d bytes\n",
472 dev->name, len);
473 return;
474 }
475 h = (struct lcp_header *)skb->data;
476 skb_pull(skb,sizeof(struct lcp_header *));
477
478 if (sp->pp_flags & PP_DEBUG)
479 {
480 char state = '?';
481 switch (sp->lcp.state) {
482 case LCP_STATE_CLOSED: state = 'C'; break;
483 case LCP_STATE_ACK_RCVD: state = 'R'; break;
484 case LCP_STATE_ACK_SENT: state = 'S'; break;
485 case LCP_STATE_OPENED: state = 'O'; break;
486 }
487 printk (KERN_WARNING "%s: lcp input(%c): %d bytes <%s id=%xh len=%xh",
488 dev->name, state, len,
489 sppp_lcp_type_name (h->type), h->ident, ntohs (h->len));
490 if (len > 4)
491 sppp_print_bytes ((u8*) (h+1), len-4);
492 printk (">\n");
493 }
494 if (len > ntohs (h->len))
495 len = ntohs (h->len);
496 switch (h->type) {
497 default:
498 /* Unknown packet type -- send Code-Reject packet. */
499 sppp_cp_send (sp, PPP_LCP, LCP_CODE_REJ, ++sp->pp_seq,
500 skb->len, h);
501 break;
502 case LCP_CONF_REQ:
503 if (len < 4) {
504 if (sp->pp_flags & PP_DEBUG)
505 printk (KERN_DEBUG"%s: invalid lcp configure request packet length: %d bytes\n",
506 dev->name, len);
507 break;
508 }
509 if (len>4 && !sppp_lcp_conf_parse_options (sp, h, len, &rmagic))
510 goto badreq;
511 if (rmagic == sp->lcp.magic) {
512 /* Local and remote magics equal -- loopback? */
513 if (sp->pp_loopcnt >= MAXALIVECNT*5) {
514 printk (KERN_WARNING "%s: loopback\n",
515 dev->name);
516 sp->pp_loopcnt = 0;
517 if (dev->flags & IFF_UP) {
518 if_down (dev);
519 }
520 } else if (sp->pp_flags & PP_DEBUG)
521 printk (KERN_DEBUG "%s: conf req: magic glitch\n",
522 dev->name);
523 ++sp->pp_loopcnt;
524
525 /* MUST send Conf-Nack packet. */
526 rmagic = ~sp->lcp.magic;
527 opt[0] = LCP_OPT_MAGIC;
528 opt[1] = sizeof (opt);
529 opt[2] = rmagic >> 24;
530 opt[3] = rmagic >> 16;
531 opt[4] = rmagic >> 8;
532 opt[5] = rmagic;
533 sppp_cp_send (sp, PPP_LCP, LCP_CONF_NAK,
534 h->ident, sizeof (opt), &opt);
535badreq:
536 switch (sp->lcp.state) {
537 case LCP_STATE_OPENED:
538 /* Initiate renegotiation. */
539 sppp_lcp_open (sp);
540 /* fall through... */
541 case LCP_STATE_ACK_SENT:
542 /* Go to closed state. */
543 sp->lcp.state = LCP_STATE_CLOSED;
544 sp->ipcp.state = IPCP_STATE_CLOSED;
545 }
546 break;
547 }
548 /* Send Configure-Ack packet. */
549 sp->pp_loopcnt = 0;
550 if (sp->lcp.state != LCP_STATE_OPENED) {
551 sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
552 h->ident, len-4, h+1);
553 }
554 /* Change the state. */
555 switch (sp->lcp.state) {
556 case LCP_STATE_CLOSED:
557 sp->lcp.state = LCP_STATE_ACK_SENT;
558 break;
559 case LCP_STATE_ACK_RCVD:
560 sp->lcp.state = LCP_STATE_OPENED;
561 sppp_ipcp_open (sp);
562 break;
563 case LCP_STATE_OPENED:
564 /* Remote magic changed -- close session. */
565 sp->lcp.state = LCP_STATE_CLOSED;
566 sp->ipcp.state = IPCP_STATE_CLOSED;
567 /* Initiate renegotiation. */
568 sppp_lcp_open (sp);
569 /* Send ACK after our REQ in attempt to break loop */
570 sppp_cp_send (sp, PPP_LCP, LCP_CONF_ACK,
571 h->ident, len-4, h+1);
572 sp->lcp.state = LCP_STATE_ACK_SENT;
573 break;
574 }
575 break;
576 case LCP_CONF_ACK:
577 if (h->ident != sp->lcp.confid)
578 break;
579 sppp_clear_timeout (sp);
580 if ((sp->pp_link_state != SPPP_LINK_UP) &&
581 (dev->flags & IFF_UP)) {
582 /* Coming out of loopback mode. */
583 sp->pp_link_state=SPPP_LINK_UP;
584 printk (KERN_INFO "%s: protocol up\n", dev->name);
585 }
586 switch (sp->lcp.state) {
587 case LCP_STATE_CLOSED:
588 sp->lcp.state = LCP_STATE_ACK_RCVD;
589 sppp_set_timeout (sp, 5);
590 break;
591 case LCP_STATE_ACK_SENT:
592 sp->lcp.state = LCP_STATE_OPENED;
593 sppp_ipcp_open (sp);
594 break;
595 }
596 break;
597 case LCP_CONF_NAK:
598 if (h->ident != sp->lcp.confid)
599 break;
600 p = (u8*) (h+1);
601 if (len>=10 && p[0] == LCP_OPT_MAGIC && p[1] >= 4) {
602 rmagic = (u32)p[2] << 24 |
603 (u32)p[3] << 16 | p[4] << 8 | p[5];
604 if (rmagic == ~sp->lcp.magic) {
605 int newmagic;
606 if (sp->pp_flags & PP_DEBUG)
607 printk (KERN_DEBUG "%s: conf nak: magic glitch\n",
608 dev->name);
609 get_random_bytes(&newmagic, sizeof(newmagic));
610 sp->lcp.magic += newmagic;
611 } else
612 sp->lcp.magic = rmagic;
613 }
614 if (sp->lcp.state != LCP_STATE_ACK_SENT) {
615 /* Go to closed state. */
616 sp->lcp.state = LCP_STATE_CLOSED;
617 sp->ipcp.state = IPCP_STATE_CLOSED;
618 }
619 /* The link will be renegotiated after timeout,
620 * to avoid endless req-nack loop. */
621 sppp_clear_timeout (sp);
622 sppp_set_timeout (sp, 2);
623 break;
624 case LCP_CONF_REJ:
625 if (h->ident != sp->lcp.confid)
626 break;
627 sppp_clear_timeout (sp);
628 /* Initiate renegotiation. */
629 sppp_lcp_open (sp);
630 if (sp->lcp.state != LCP_STATE_ACK_SENT) {
631 /* Go to closed state. */
632 sp->lcp.state = LCP_STATE_CLOSED;
633 sp->ipcp.state = IPCP_STATE_CLOSED;
634 }
635 break;
636 case LCP_TERM_REQ:
637 sppp_clear_timeout (sp);
638 /* Send Terminate-Ack packet. */
639 sppp_cp_send (sp, PPP_LCP, LCP_TERM_ACK, h->ident, 0, NULL);
640 /* Go to closed state. */
641 sp->lcp.state = LCP_STATE_CLOSED;
642 sp->ipcp.state = IPCP_STATE_CLOSED;
643 /* Initiate renegotiation. */
644 sppp_lcp_open (sp);
645 break;
646 case LCP_TERM_ACK:
647 case LCP_CODE_REJ:
648 case LCP_PROTO_REJ:
649 /* Ignore for now. */
650 break;
651 case LCP_DISC_REQ:
652 /* Discard the packet. */
653 break;
654 case LCP_ECHO_REQ:
655 if (sp->lcp.state != LCP_STATE_OPENED)
656 break;
657 if (len < 8) {
658 if (sp->pp_flags & PP_DEBUG)
659 printk (KERN_WARNING "%s: invalid lcp echo request packet length: %d bytes\n",
660 dev->name, len);
661 break;
662 }
663 if (ntohl (*(__be32*)(h+1)) == sp->lcp.magic) {
664 /* Line loopback mode detected. */
665 printk (KERN_WARNING "%s: loopback\n", dev->name);
666 if_down (dev);
667
668 /* Shut down the PPP link. */
669 sp->lcp.state = LCP_STATE_CLOSED;
670 sp->ipcp.state = IPCP_STATE_CLOSED;
671 sppp_clear_timeout (sp);
672 /* Initiate negotiation. */
673 sppp_lcp_open (sp);
674 break;
675 }
676 *(__be32 *)(h+1) = htonl (sp->lcp.magic);
677 sppp_cp_send (sp, PPP_LCP, LCP_ECHO_REPLY, h->ident, len-4, h+1);
678 break;
679 case LCP_ECHO_REPLY:
680 if (h->ident != sp->lcp.echoid)
681 break;
682 if (len < 8) {
683 if (sp->pp_flags & PP_DEBUG)
684 printk (KERN_WARNING "%s: invalid lcp echo reply packet length: %d bytes\n",
685 dev->name, len);
686 break;
687 }
688 if (ntohl(*(__be32 *)(h+1)) != sp->lcp.magic)
689 sp->pp_alivecnt = 0;
690 break;
691 }
692}
693
694/*
695 * Handle incoming Cisco keepalive protocol packets.
696 */
697
698static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
699{
700 struct cisco_packet *h;
701 struct net_device *dev = sp->pp_if;
702
703 if (!pskb_may_pull(skb, sizeof(struct cisco_packet))
704 || (skb->len != CISCO_PACKET_LEN
705 && skb->len != CISCO_BIG_PACKET_LEN)) {
706 if (sp->pp_flags & PP_DEBUG)
707 printk (KERN_WARNING "%s: invalid cisco packet length: %d bytes\n",
708 dev->name, skb->len);
709 return;
710 }
711 h = (struct cisco_packet *)skb->data;
712 skb_pull(skb, sizeof(struct cisco_packet*));
713 if (sp->pp_flags & PP_DEBUG)
714 printk (KERN_WARNING "%s: cisco input: %d bytes <%xh %xh %xh %xh %xh-%xh>\n",
715 dev->name, skb->len,
716 ntohl (h->type), h->par1, h->par2, h->rel,
717 h->time0, h->time1);
718 switch (ntohl (h->type)) {
719 default:
720 if (sp->pp_flags & PP_DEBUG)
721 printk (KERN_WARNING "%s: unknown cisco packet type: 0x%x\n",
722 dev->name, ntohl (h->type));
723 break;
724 case CISCO_ADDR_REPLY:
725 /* Reply on address request, ignore */
726 break;
727 case CISCO_KEEPALIVE_REQ:
728 sp->pp_alivecnt = 0;
729 sp->pp_rseq = ntohl (h->par1);
730 if (sp->pp_seq == sp->pp_rseq) {
731 /* Local and remote sequence numbers are equal.
732 * Probably, the line is in loopback mode. */
733 int newseq;
734 if (sp->pp_loopcnt >= MAXALIVECNT) {
735 printk (KERN_WARNING "%s: loopback\n",
736 dev->name);
737 sp->pp_loopcnt = 0;
738 if (dev->flags & IFF_UP) {
739 if_down (dev);
740 }
741 }
742 ++sp->pp_loopcnt;
743
744 /* Generate new local sequence number */
745 get_random_bytes(&newseq, sizeof(newseq));
746 sp->pp_seq ^= newseq;
747 break;
748 }
749 sp->pp_loopcnt = 0;
750 if (sp->pp_link_state==SPPP_LINK_DOWN &&
751 (dev->flags & IFF_UP)) {
752 sp->pp_link_state=SPPP_LINK_UP;
753 printk (KERN_INFO "%s: protocol up\n", dev->name);
754 }
755 break;
756 case CISCO_ADDR_REQ:
757 /* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */
758 {
759 __be32 addr = 0, mask = htonl(~0U); /* FIXME: is the mask correct? */
760#ifdef CONFIG_INET
761 struct in_device *in_dev;
762 struct in_ifaddr *ifa;
763
764 rcu_read_lock();
765 if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
766 {
767 for (ifa=in_dev->ifa_list; ifa != NULL;
768 ifa=ifa->ifa_next) {
769 if (strcmp(dev->name, ifa->ifa_label) == 0)
770 {
771 addr = ifa->ifa_local;
772 mask = ifa->ifa_mask;
773 break;
774 }
775 }
776 }
777 rcu_read_unlock();
778#endif
779 sppp_cisco_send (sp, CISCO_ADDR_REPLY, ntohl(addr), ntohl(mask));
780 break;
781 }
782 }
783}
784
785
786/*
787 * Send PPP LCP packet.
788 */
789
790static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type,
791 u8 ident, u16 len, void *data)
792{
793 struct ppp_header *h;
794 struct lcp_header *lh;
795 struct sk_buff *skb;
796 struct net_device *dev = sp->pp_if;
797
798 skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+LCP_HEADER_LEN+len,
799 GFP_ATOMIC);
800 if (skb==NULL)
801 return;
802
803 skb_reserve(skb,dev->hard_header_len);
804
805 h = (struct ppp_header *)skb_put(skb, sizeof(struct ppp_header));
806 h->address = PPP_ALLSTATIONS; /* broadcast address */
807 h->control = PPP_UI; /* Unnumbered Info */
808 h->protocol = htons (proto); /* Link Control Protocol */
809
810 lh = (struct lcp_header *)skb_put(skb, sizeof(struct lcp_header));
811 lh->type = type;
812 lh->ident = ident;
813 lh->len = htons (LCP_HEADER_LEN + len);
814
815 if (len)
816 memcpy(skb_put(skb,len),data, len);
817
818 if (sp->pp_flags & PP_DEBUG) {
819 printk (KERN_WARNING "%s: %s output <%s id=%xh len=%xh",
820 dev->name,
821 proto==PPP_LCP ? "lcp" : "ipcp",
822 proto==PPP_LCP ? sppp_lcp_type_name (lh->type) :
823 sppp_ipcp_type_name (lh->type), lh->ident,
824 ntohs (lh->len));
825 if (len)
826 sppp_print_bytes ((u8*) (lh+1), len);
827 printk (">\n");
828 }
829 /* Control is high priority so it doesn't get queued behind data */
830 skb->priority=TC_PRIO_CONTROL;
831 skb->dev = dev;
832 skb_queue_tail(&tx_queue, skb);
833}
834
835/*
836 * Send Cisco keepalive packet.
837 */
838
839static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2)
840{
841 struct ppp_header *h;
842 struct cisco_packet *ch;
843 struct sk_buff *skb;
844 struct net_device *dev = sp->pp_if;
845 u32 t = jiffies * 1000/HZ;
846
847 skb=alloc_skb(dev->hard_header_len+PPP_HEADER_LEN+CISCO_PACKET_LEN,
848 GFP_ATOMIC);
849
850 if(skb==NULL)
851 return;
852
853 skb_reserve(skb, dev->hard_header_len);
854 h = (struct ppp_header *)skb_put (skb, sizeof(struct ppp_header));
855 h->address = CISCO_MULTICAST;
856 h->control = 0;
857 h->protocol = htons (CISCO_KEEPALIVE);
858
859 ch = (struct cisco_packet*)skb_put(skb, CISCO_PACKET_LEN);
860 ch->type = htonl (type);
861 ch->par1 = htonl (par1);
862 ch->par2 = htonl (par2);
863 ch->rel = htons(0xffff);
864 ch->time0 = htons ((u16) (t >> 16));
865 ch->time1 = htons ((u16) t);
866
867 if (sp->pp_flags & PP_DEBUG)
868 printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n",
869 dev->name, ntohl (ch->type), ch->par1,
870 ch->par2, ch->rel, ch->time0, ch->time1);
871 skb->priority=TC_PRIO_CONTROL;
872 skb->dev = dev;
873 skb_queue_tail(&tx_queue, skb);
874}
875
876/**
877 * sppp_close - close down a synchronous PPP or Cisco HDLC link
878 * @dev: The network device to drop the link of
879 *
880 * This drops the logical interface to the channel. It is not
881 * done politely as we assume we will also be dropping DTR. Any
882 * timeouts are killed.
883 */
884
885int sppp_close (struct net_device *dev)
886{
887 struct sppp *sp = (struct sppp *)sppp_of(dev);
888 unsigned long flags;
889
890 spin_lock_irqsave(&sp->lock, flags);
891 sp->pp_link_state = SPPP_LINK_DOWN;
892 sp->lcp.state = LCP_STATE_CLOSED;
893 sp->ipcp.state = IPCP_STATE_CLOSED;
894 sppp_clear_timeout (sp);
895 spin_unlock_irqrestore(&sp->lock, flags);
896
897 return 0;
898}
899
900EXPORT_SYMBOL(sppp_close);
901
902/**
903 * sppp_open - open a synchronous PPP or Cisco HDLC link
904 * @dev: Network device to activate
905 *
906 * Close down any existing synchronous session and commence
907 * from scratch. In the PPP case this means negotiating LCP/IPCP
908 * and friends, while for Cisco HDLC we simply need to start sending
909 * keepalives
910 */
911
912int sppp_open (struct net_device *dev)
913{
914 struct sppp *sp = (struct sppp *)sppp_of(dev);
915 unsigned long flags;
916
917 sppp_close(dev);
918
919 spin_lock_irqsave(&sp->lock, flags);
920 if (!(sp->pp_flags & PP_CISCO)) {
921 sppp_lcp_open (sp);
922 }
923 sp->pp_link_state = SPPP_LINK_DOWN;
924 spin_unlock_irqrestore(&sp->lock, flags);
925 sppp_flush_xmit();
926
927 return 0;
928}
929
930EXPORT_SYMBOL(sppp_open);
931
932/**
933 * sppp_reopen - notify of physical link loss
934 * @dev: Device that lost the link
935 *
936 * This function informs the synchronous protocol code that
937 * the underlying link died (for example a carrier drop on X.21)
938 *
939 * We increment the magic numbers to ensure that if the other end
940 * failed to notice we will correctly start a new session. It happens
941 * do to the nature of telco circuits is that you can lose carrier on
942 * one endonly.
943 *
944 * Having done this we go back to negotiating. This function may
945 * be called from an interrupt context.
946 */
947
948int sppp_reopen (struct net_device *dev)
949{
950 struct sppp *sp = (struct sppp *)sppp_of(dev);
951 unsigned long flags;
952
953 sppp_close(dev);
954
955 spin_lock_irqsave(&sp->lock, flags);
956 if (!(sp->pp_flags & PP_CISCO))
957 {
958 sp->lcp.magic = jiffies;
959 ++sp->pp_seq;
960 sp->lcp.state = LCP_STATE_CLOSED;
961 sp->ipcp.state = IPCP_STATE_CLOSED;
962 /* Give it a moment for the line to settle then go */
963 sppp_set_timeout (sp, 1);
964 }
965 sp->pp_link_state=SPPP_LINK_DOWN;
966 spin_unlock_irqrestore(&sp->lock, flags);
967
968 return 0;
969}
970
971EXPORT_SYMBOL(sppp_reopen);
972
973/**
974 * sppp_change_mtu - Change the link MTU
975 * @dev: Device to change MTU on
976 * @new_mtu: New MTU
977 *
978 * Change the MTU on the link. This can only be called with
979 * the link down. It returns an error if the link is up or
980 * the mtu is out of range.
981 */
982
983static int sppp_change_mtu(struct net_device *dev, int new_mtu)
984{
985 if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP))
986 return -EINVAL;
987 dev->mtu=new_mtu;
988 return 0;
989}
990
991/**
992 * sppp_do_ioctl - Ioctl handler for ppp/hdlc
993 * @dev: Device subject to ioctl
994 * @ifr: Interface request block from the user
995 * @cmd: Command that is being issued
996 *
997 * This function handles the ioctls that may be issued by the user
998 * to control the settings of a PPP/HDLC link. It does both busy
999 * and security checks. This function is intended to be wrapped by
1000 * callers who wish to add additional ioctl calls of their own.
1001 */
1002
1003int sppp_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1004{
1005 struct sppp *sp = (struct sppp *)sppp_of(dev);
1006
1007 if(dev->flags&IFF_UP)
1008 return -EBUSY;
1009
1010 if(!capable(CAP_NET_ADMIN))
1011 return -EPERM;
1012
1013 switch(cmd)
1014 {
1015 case SPPPIOCCISCO:
1016 sp->pp_flags|=PP_CISCO;
1017 dev->type = ARPHRD_HDLC;
1018 break;
1019 case SPPPIOCPPP:
1020 sp->pp_flags&=~PP_CISCO;
1021 dev->type = ARPHRD_PPP;
1022 break;
1023 case SPPPIOCDEBUG:
1024 sp->pp_flags&=~PP_DEBUG;
1025 if(ifr->ifr_flags)
1026 sp->pp_flags|=PP_DEBUG;
1027 break;
1028 case SPPPIOCGFLAGS:
1029 if(copy_to_user(ifr->ifr_data, &sp->pp_flags, sizeof(sp->pp_flags)))
1030 return -EFAULT;
1031 break;
1032 case SPPPIOCSFLAGS:
1033 if(copy_from_user(&sp->pp_flags, ifr->ifr_data, sizeof(sp->pp_flags)))
1034 return -EFAULT;
1035 break;
1036 default:
1037 return -EINVAL;
1038 }
1039 return 0;
1040}
1041
1042EXPORT_SYMBOL(sppp_do_ioctl);
1043
1044/**
1045 * sppp_attach - attach synchronous PPP/HDLC to a device
1046 * @pd: PPP device to initialise
1047 *
1048 * This initialises the PPP/HDLC support on an interface. At the
1049 * time of calling the dev element must point to the network device
1050 * that this interface is attached to. The interface should not yet
1051 * be registered.
1052 */
1053
1054void sppp_attach(struct ppp_device *pd)
1055{
1056 struct net_device *dev = pd->dev;
1057 struct sppp *sp = &pd->sppp;
1058 unsigned long flags;
1059
1060 /* Make sure embedding is safe for sppp_of */
1061 BUG_ON(sppp_of(dev) != sp);
1062
1063 spin_lock_irqsave(&spppq_lock, flags);
1064 /* Initialize keepalive handler. */
1065 if (! spppq)
1066 {
1067 init_timer(&sppp_keepalive_timer);
1068 sppp_keepalive_timer.expires=jiffies+10*HZ;
1069 sppp_keepalive_timer.function=sppp_keepalive;
1070 add_timer(&sppp_keepalive_timer);
1071 }
1072 /* Insert new entry into the keepalive list. */
1073 sp->pp_next = spppq;
1074 spppq = sp;
1075 spin_unlock_irqrestore(&spppq_lock, flags);
1076
1077 sp->pp_loopcnt = 0;
1078 sp->pp_alivecnt = 0;
1079 sp->pp_seq = 0;
1080 sp->pp_rseq = 0;
1081 sp->pp_flags = PP_KEEPALIVE|PP_CISCO|debug;/*PP_DEBUG;*/
1082 sp->lcp.magic = 0;
1083 sp->lcp.state = LCP_STATE_CLOSED;
1084 sp->ipcp.state = IPCP_STATE_CLOSED;
1085 sp->pp_if = dev;
1086 spin_lock_init(&sp->lock);
1087
1088 /*
1089 * Device specific setup. All but interrupt handler and
1090 * hard_start_xmit.
1091 */
1092
1093 dev->header_ops = &sppp_header_ops;
1094
1095 dev->tx_queue_len = 10;
1096 dev->type = ARPHRD_HDLC;
1097 dev->addr_len = 0;
1098 dev->hard_header_len = sizeof(struct ppp_header);
1099 dev->mtu = PPP_MTU;
1100 /*
1101 * These 4 are callers but MUST also call sppp_ functions
1102 */
1103 dev->do_ioctl = sppp_do_ioctl;
1104#if 0
1105 dev->get_stats = NULL; /* Let the driver override these */
1106 dev->open = sppp_open;
1107 dev->stop = sppp_close;
1108#endif
1109 dev->change_mtu = sppp_change_mtu;
1110 dev->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP;
1111}
1112
1113EXPORT_SYMBOL(sppp_attach);
1114
1115/**
1116 * sppp_detach - release PPP resources from a device
1117 * @dev: Network device to release
1118 *
1119 * Stop and free up any PPP/HDLC resources used by this
1120 * interface. This must be called before the device is
1121 * freed.
1122 */
1123
1124void sppp_detach (struct net_device *dev)
1125{
1126 struct sppp **q, *p, *sp = (struct sppp *)sppp_of(dev);
1127 unsigned long flags;
1128
1129 spin_lock_irqsave(&spppq_lock, flags);
1130 /* Remove the entry from the keepalive list. */
1131 for (q = &spppq; (p = *q); q = &p->pp_next)
1132 if (p == sp) {
1133 *q = p->pp_next;
1134 break;
1135 }
1136
1137 /* Stop keepalive handler. */
1138 if (! spppq)
1139 del_timer(&sppp_keepalive_timer);
1140 sppp_clear_timeout (sp);
1141 spin_unlock_irqrestore(&spppq_lock, flags);
1142}
1143
1144EXPORT_SYMBOL(sppp_detach);
1145
1146/*
1147 * Analyze the LCP Configure-Request options list
1148 * for the presence of unknown options.
1149 * If the request contains unknown options, build and
1150 * send Configure-reject packet, containing only unknown options.
1151 */
1152static int
1153sppp_lcp_conf_parse_options (struct sppp *sp, struct lcp_header *h,
1154 int len, u32 *magic)
1155{
1156 u8 *buf, *r, *p;
1157 int rlen;
1158
1159 len -= 4;
1160 buf = r = kmalloc (len, GFP_ATOMIC);
1161 if (! buf)
1162 return (0);
1163
1164 p = (void*) (h+1);
1165 for (rlen=0; len>1 && p[1]; len-=p[1], p+=p[1]) {
1166 switch (*p) {
1167 case LCP_OPT_MAGIC:
1168 /* Magic number -- extract. */
1169 if (len >= 6 && p[1] == 6) {
1170 *magic = (u32)p[2] << 24 |
1171 (u32)p[3] << 16 | p[4] << 8 | p[5];
1172 continue;
1173 }
1174 break;
1175 case LCP_OPT_ASYNC_MAP:
1176 /* Async control character map -- check to be zero. */
1177 if (len >= 6 && p[1] == 6 && ! p[2] && ! p[3] &&
1178 ! p[4] && ! p[5])
1179 continue;
1180 break;
1181 case LCP_OPT_MRU:
1182 /* Maximum receive unit -- always OK. */
1183 continue;
1184 default:
1185 /* Others not supported. */
1186 break;
1187 }
1188 /* Add the option to rejected list. */
1189 memcpy(r, p, p[1]);
1190 r += p[1];
1191 rlen += p[1];
1192 }
1193 if (rlen)
1194 sppp_cp_send (sp, PPP_LCP, LCP_CONF_REJ, h->ident, rlen, buf);
1195 kfree(buf);
1196 return (rlen == 0);
1197}
1198
1199static void sppp_ipcp_input (struct sppp *sp, struct sk_buff *skb)
1200{
1201 struct lcp_header *h;
1202 struct net_device *dev = sp->pp_if;
1203 int len = skb->len;
1204
1205 if (!pskb_may_pull(skb, sizeof(struct lcp_header))) {
1206 if (sp->pp_flags & PP_DEBUG)
1207 printk (KERN_WARNING "%s: invalid ipcp packet length: %d bytes\n",
1208 dev->name, len);
1209 return;
1210 }
1211 h = (struct lcp_header *)skb->data;
1212 skb_pull(skb,sizeof(struct lcp_header));
1213 if (sp->pp_flags & PP_DEBUG) {
1214 printk (KERN_WARNING "%s: ipcp input: %d bytes <%s id=%xh len=%xh",
1215 dev->name, len,
1216 sppp_ipcp_type_name (h->type), h->ident, ntohs (h->len));
1217 if (len > 4)
1218 sppp_print_bytes ((u8*) (h+1), len-4);
1219 printk (">\n");
1220 }
1221 if (len > ntohs (h->len))
1222 len = ntohs (h->len);
1223 switch (h->type) {
1224 default:
1225 /* Unknown packet type -- send Code-Reject packet. */
1226 sppp_cp_send (sp, PPP_IPCP, IPCP_CODE_REJ, ++sp->pp_seq, len, h);
1227 break;
1228 case IPCP_CONF_REQ:
1229 if (len < 4) {
1230 if (sp->pp_flags & PP_DEBUG)
1231 printk (KERN_WARNING "%s: invalid ipcp configure request packet length: %d bytes\n",
1232 dev->name, len);
1233 return;
1234 }
1235 if (len > 4) {
1236 sppp_cp_send (sp, PPP_IPCP, LCP_CONF_REJ, h->ident,
1237 len-4, h+1);
1238
1239 switch (sp->ipcp.state) {
1240 case IPCP_STATE_OPENED:
1241 /* Initiate renegotiation. */
1242 sppp_ipcp_open (sp);
1243 /* fall through... */
1244 case IPCP_STATE_ACK_SENT:
1245 /* Go to closed state. */
1246 sp->ipcp.state = IPCP_STATE_CLOSED;
1247 }
1248 } else {
1249 /* Send Configure-Ack packet. */
1250 sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_ACK, h->ident,
1251 0, NULL);
1252 /* Change the state. */
1253 if (sp->ipcp.state == IPCP_STATE_ACK_RCVD)
1254 sp->ipcp.state = IPCP_STATE_OPENED;
1255 else
1256 sp->ipcp.state = IPCP_STATE_ACK_SENT;
1257 }
1258 break;
1259 case IPCP_CONF_ACK:
1260 if (h->ident != sp->ipcp.confid)
1261 break;
1262 sppp_clear_timeout (sp);
1263 switch (sp->ipcp.state) {
1264 case IPCP_STATE_CLOSED:
1265 sp->ipcp.state = IPCP_STATE_ACK_RCVD;
1266 sppp_set_timeout (sp, 5);
1267 break;
1268 case IPCP_STATE_ACK_SENT:
1269 sp->ipcp.state = IPCP_STATE_OPENED;
1270 break;
1271 }
1272 break;
1273 case IPCP_CONF_NAK:
1274 case IPCP_CONF_REJ:
1275 if (h->ident != sp->ipcp.confid)
1276 break;
1277 sppp_clear_timeout (sp);
1278 /* Initiate renegotiation. */
1279 sppp_ipcp_open (sp);
1280 if (sp->ipcp.state != IPCP_STATE_ACK_SENT)
1281 /* Go to closed state. */
1282 sp->ipcp.state = IPCP_STATE_CLOSED;
1283 break;
1284 case IPCP_TERM_REQ:
1285 /* Send Terminate-Ack packet. */
1286 sppp_cp_send (sp, PPP_IPCP, IPCP_TERM_ACK, h->ident, 0, NULL);
1287 /* Go to closed state. */
1288 sp->ipcp.state = IPCP_STATE_CLOSED;
1289 /* Initiate renegotiation. */
1290 sppp_ipcp_open (sp);
1291 break;
1292 case IPCP_TERM_ACK:
1293 /* Ignore for now. */
1294 case IPCP_CODE_REJ:
1295 /* Ignore for now. */
1296 break;
1297 }
1298}
1299
1300static void sppp_lcp_open (struct sppp *sp)
1301{
1302 char opt[6];
1303
1304 if (! sp->lcp.magic)
1305 sp->lcp.magic = jiffies;
1306 opt[0] = LCP_OPT_MAGIC;
1307 opt[1] = sizeof (opt);
1308 opt[2] = sp->lcp.magic >> 24;
1309 opt[3] = sp->lcp.magic >> 16;
1310 opt[4] = sp->lcp.magic >> 8;
1311 opt[5] = sp->lcp.magic;
1312 sp->lcp.confid = ++sp->pp_seq;
1313 sppp_cp_send (sp, PPP_LCP, LCP_CONF_REQ, sp->lcp.confid,
1314 sizeof (opt), &opt);
1315 sppp_set_timeout (sp, 2);
1316}
1317
1318static void sppp_ipcp_open (struct sppp *sp)
1319{
1320 sp->ipcp.confid = ++sp->pp_seq;
1321 sppp_cp_send (sp, PPP_IPCP, IPCP_CONF_REQ, sp->ipcp.confid, 0, NULL);
1322 sppp_set_timeout (sp, 2);
1323}
1324
1325/*
1326 * Process PPP control protocol timeouts.
1327 */
1328
1329static void sppp_cp_timeout (unsigned long arg)
1330{
1331 struct sppp *sp = (struct sppp*) arg;
1332 unsigned long flags;
1333
1334 spin_lock_irqsave(&sp->lock, flags);
1335
1336 sp->pp_flags &= ~PP_TIMO;
1337 if (! (sp->pp_if->flags & IFF_UP) || (sp->pp_flags & PP_CISCO)) {
1338 spin_unlock_irqrestore(&sp->lock, flags);
1339 return;
1340 }
1341 switch (sp->lcp.state) {
1342 case LCP_STATE_CLOSED:
1343 /* No ACK for Configure-Request, retry. */
1344 sppp_lcp_open (sp);
1345 break;
1346 case LCP_STATE_ACK_RCVD:
1347 /* ACK got, but no Configure-Request for peer, retry. */
1348 sppp_lcp_open (sp);
1349 sp->lcp.state = LCP_STATE_CLOSED;
1350 break;
1351 case LCP_STATE_ACK_SENT:
1352 /* ACK sent but no ACK for Configure-Request, retry. */
1353 sppp_lcp_open (sp);
1354 break;
1355 case LCP_STATE_OPENED:
1356 /* LCP is already OK, try IPCP. */
1357 switch (sp->ipcp.state) {
1358 case IPCP_STATE_CLOSED:
1359 /* No ACK for Configure-Request, retry. */
1360 sppp_ipcp_open (sp);
1361 break;
1362 case IPCP_STATE_ACK_RCVD:
1363 /* ACK got, but no Configure-Request for peer, retry. */
1364 sppp_ipcp_open (sp);
1365 sp->ipcp.state = IPCP_STATE_CLOSED;
1366 break;
1367 case IPCP_STATE_ACK_SENT:
1368 /* ACK sent but no ACK for Configure-Request, retry. */
1369 sppp_ipcp_open (sp);
1370 break;
1371 case IPCP_STATE_OPENED:
1372 /* IPCP is OK. */
1373 break;
1374 }
1375 break;
1376 }
1377 spin_unlock_irqrestore(&sp->lock, flags);
1378 sppp_flush_xmit();
1379}
1380
1381static char *sppp_lcp_type_name (u8 type)
1382{
1383 static char buf [8];
1384 switch (type) {
1385 case LCP_CONF_REQ: return ("conf-req");
1386 case LCP_CONF_ACK: return ("conf-ack");
1387 case LCP_CONF_NAK: return ("conf-nack");
1388 case LCP_CONF_REJ: return ("conf-rej");
1389 case LCP_TERM_REQ: return ("term-req");
1390 case LCP_TERM_ACK: return ("term-ack");
1391 case LCP_CODE_REJ: return ("code-rej");
1392 case LCP_PROTO_REJ: return ("proto-rej");
1393 case LCP_ECHO_REQ: return ("echo-req");
1394 case LCP_ECHO_REPLY: return ("echo-reply");
1395 case LCP_DISC_REQ: return ("discard-req");
1396 }
1397 sprintf (buf, "%xh", type);
1398 return (buf);
1399}
1400
1401static char *sppp_ipcp_type_name (u8 type)
1402{
1403 static char buf [8];
1404 switch (type) {
1405 case IPCP_CONF_REQ: return ("conf-req");
1406 case IPCP_CONF_ACK: return ("conf-ack");
1407 case IPCP_CONF_NAK: return ("conf-nack");
1408 case IPCP_CONF_REJ: return ("conf-rej");
1409 case IPCP_TERM_REQ: return ("term-req");
1410 case IPCP_TERM_ACK: return ("term-ack");
1411 case IPCP_CODE_REJ: return ("code-rej");
1412 }
1413 sprintf (buf, "%xh", type);
1414 return (buf);
1415}
1416
1417static void sppp_print_bytes (u_char *p, u16 len)
1418{
1419 printk (" %x", *p++);
1420 while (--len > 0)
1421 printk ("-%x", *p++);
1422}
1423
1424/**
1425 * sppp_rcv - receive and process a WAN PPP frame
1426 * @skb: The buffer to process
1427 * @dev: The device it arrived on
1428 * @p: Unused
1429 * @orig_dev: Unused
1430 *
1431 * Protocol glue. This drives the deferred processing mode the poorer
1432 * cards use. This can be called directly by cards that do not have
1433 * timing constraints but is normally called from the network layer
1434 * after interrupt servicing to process frames queued via netif_rx.
1435 */
1436
1437static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev)
1438{
1439 if (dev_net(dev) != &init_net) {
1440 kfree_skb(skb);
1441 return 0;
1442 }
1443
1444 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
1445 return NET_RX_DROP;
1446 sppp_input(dev,skb);
1447 return 0;
1448}
1449
1450static struct packet_type sppp_packet_type = {
1451 .type = __constant_htons(ETH_P_WAN_PPP),
1452 .func = sppp_rcv,
1453};
1454
1455static char banner[] __initdata =
1456 KERN_INFO "Cronyx Ltd, Synchronous PPP and CISCO HDLC (c) 1994\n"
1457 KERN_INFO "Linux port (c) 1998 Building Number Three Ltd & "
1458 "Jan \"Yenya\" Kasprzak.\n";
1459
1460static int __init sync_ppp_init(void)
1461{
1462 if(debug)
1463 debug=PP_DEBUG;
1464 printk(banner);
1465 skb_queue_head_init(&tx_queue);
1466 dev_add_pack(&sppp_packet_type);
1467 return 0;
1468}
1469
1470
1471static void __exit sync_ppp_cleanup(void)
1472{
1473 dev_remove_pack(&sppp_packet_type);
1474}
1475
1476module_init(sync_ppp_init);
1477module_exit(sync_ppp_cleanup);
1478module_param(debug, int, 0);
1479MODULE_LICENSE("GPL");
1480
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index a8a5ca0ee6c2..4bffb67ebcae 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -220,7 +220,6 @@ static inline void wanxl_rx_intr(card_t *card)
220#endif 220#endif
221 dev->stats.rx_packets++; 221 dev->stats.rx_packets++;
222 dev->stats.rx_bytes += skb->len; 222 dev->stats.rx_bytes += skb->len;
223 dev->last_rx = jiffies;
224 skb->protocol = hdlc_type_trans(skb, dev); 223 skb->protocol = hdlc_type_trans(skb, dev);
225 netif_rx(skb); 224 netif_rx(skb);
226 skb = NULL; 225 skb = NULL;
@@ -411,12 +410,12 @@ static int wanxl_open(struct net_device *dev)
411 writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr); 410 writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
412 411
413 timeout = jiffies + HZ; 412 timeout = jiffies + HZ;
414 do 413 do {
415 if (get_status(port)->open) { 414 if (get_status(port)->open) {
416 netif_start_queue(dev); 415 netif_start_queue(dev);
417 return 0; 416 return 0;
418 } 417 }
419 while (time_after(timeout, jiffies)); 418 } while (time_after(timeout, jiffies));
420 419
421 printk(KERN_ERR "%s: unable to open port\n", dev->name); 420 printk(KERN_ERR "%s: unable to open port\n", dev->name);
422 /* ask the card to close the port, should it be still alive */ 421 /* ask the card to close the port, should it be still alive */
@@ -438,10 +437,10 @@ static int wanxl_close(struct net_device *dev)
438 port->card->plx + PLX_DOORBELL_TO_CARD); 437 port->card->plx + PLX_DOORBELL_TO_CARD);
439 438
440 timeout = jiffies + HZ; 439 timeout = jiffies + HZ;
441 do 440 do {
442 if (!get_status(port)->open) 441 if (!get_status(port)->open)
443 break; 442 break;
444 while (time_after(timeout, jiffies)); 443 } while (time_after(timeout, jiffies));
445 444
446 if (get_status(port)->open) 445 if (get_status(port)->open)
447 printk(KERN_ERR "%s: unable to close port\n", dev->name); 446 printk(KERN_ERR "%s: unable to close port\n", dev->name);
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 2a6c7a60756f..e6e2ce3e7bcf 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -64,7 +64,7 @@ static struct x25_asy *x25_asy_alloc(void)
64 if (dev == NULL) 64 if (dev == NULL)
65 break; 65 break;
66 66
67 sl = dev->priv; 67 sl = netdev_priv(dev);
68 /* Not in use ? */ 68 /* Not in use ? */
69 if (!test_and_set_bit(SLF_INUSE, &sl->flags)) 69 if (!test_and_set_bit(SLF_INUSE, &sl->flags))
70 return sl; 70 return sl;
@@ -86,7 +86,7 @@ static struct x25_asy *x25_asy_alloc(void)
86 return NULL; 86 return NULL;
87 87
88 /* Initialize channel control data */ 88 /* Initialize channel control data */
89 sl = dev->priv; 89 sl = netdev_priv(dev);
90 dev->base_addr = i; 90 dev->base_addr = i;
91 91
92 /* register device so that it can be ifconfig'ed */ 92 /* register device so that it can be ifconfig'ed */
@@ -120,7 +120,7 @@ static void x25_asy_free(struct x25_asy *sl)
120 120
121static int x25_asy_change_mtu(struct net_device *dev, int newmtu) 121static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
122{ 122{
123 struct x25_asy *sl = dev->priv; 123 struct x25_asy *sl = netdev_priv(dev);
124 unsigned char *xbuff, *rbuff; 124 unsigned char *xbuff, *rbuff;
125 int len = 2 * newmtu; 125 int len = 2 * newmtu;
126 126
@@ -211,7 +211,6 @@ static void x25_asy_bump(struct x25_asy *sl)
211 printk(KERN_DEBUG "x25_asy: data received err - %d\n", err); 211 printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
212 } else { 212 } else {
213 netif_rx(skb); 213 netif_rx(skb);
214 sl->dev->last_rx = jiffies;
215 sl->stats.rx_packets++; 214 sl->stats.rx_packets++;
216 } 215 }
217} 216}
@@ -243,7 +242,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
243 * if we did not request it before write operation. 242 * if we did not request it before write operation.
244 * 14 Oct 1994 Dmitry Gorodchanin. 243 * 14 Oct 1994 Dmitry Gorodchanin.
245 */ 244 */
246 sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); 245 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
247 actual = sl->tty->ops->write(sl->tty, sl->xbuff, count); 246 actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
248 sl->xleft = count - actual; 247 sl->xleft = count - actual;
249 sl->xhead = sl->xbuff + actual; 248 sl->xhead = sl->xbuff + actual;
@@ -258,7 +257,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
258static void x25_asy_write_wakeup(struct tty_struct *tty) 257static void x25_asy_write_wakeup(struct tty_struct *tty)
259{ 258{
260 int actual; 259 int actual;
261 struct x25_asy *sl = (struct x25_asy *) tty->disc_data; 260 struct x25_asy *sl = tty->disc_data;
262 261
263 /* First make sure we're connected. */ 262 /* First make sure we're connected. */
264 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev)) 263 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
@@ -268,7 +267,7 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
268 /* Now serial buffer is almost free & we can start 267 /* Now serial buffer is almost free & we can start
269 * transmission of another packet */ 268 * transmission of another packet */
270 sl->stats.tx_packets++; 269 sl->stats.tx_packets++;
271 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 270 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
272 x25_asy_unlock(sl); 271 x25_asy_unlock(sl);
273 return; 272 return;
274 } 273 }
@@ -280,7 +279,7 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
280 279
281static void x25_asy_timeout(struct net_device *dev) 280static void x25_asy_timeout(struct net_device *dev)
282{ 281{
283 struct x25_asy *sl = dev->priv; 282 struct x25_asy *sl = netdev_priv(dev);
284 283
285 spin_lock(&sl->lock); 284 spin_lock(&sl->lock);
286 if (netif_queue_stopped(dev)) { 285 if (netif_queue_stopped(dev)) {
@@ -291,7 +290,7 @@ static void x25_asy_timeout(struct net_device *dev)
291 (tty_chars_in_buffer(sl->tty) || sl->xleft) ? 290 (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
292 "bad line quality" : "driver error"); 291 "bad line quality" : "driver error");
293 sl->xleft = 0; 292 sl->xleft = 0;
294 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 293 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
295 x25_asy_unlock(sl); 294 x25_asy_unlock(sl);
296 } 295 }
297 spin_unlock(&sl->lock); 296 spin_unlock(&sl->lock);
@@ -301,7 +300,7 @@ static void x25_asy_timeout(struct net_device *dev)
301 300
302static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev) 301static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
303{ 302{
304 struct x25_asy *sl = dev->priv; 303 struct x25_asy *sl = netdev_priv(dev);
305 int err; 304 int err;
306 305
307 if (!netif_running(sl->dev)) { 306 if (!netif_running(sl->dev)) {
@@ -361,7 +360,6 @@ static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
361 360
362static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) 361static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
363{ 362{
364 skb->dev->last_rx = jiffies;
365 return netif_rx(skb); 363 return netif_rx(skb);
366} 364}
367 365
@@ -373,7 +371,7 @@ static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
373 371
374static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb) 372static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
375{ 373{
376 struct x25_asy *sl = dev->priv; 374 struct x25_asy *sl = netdev_priv(dev);
377 375
378 spin_lock(&sl->lock); 376 spin_lock(&sl->lock);
379 if (netif_queue_stopped(sl->dev) || sl->tty == NULL) { 377 if (netif_queue_stopped(sl->dev) || sl->tty == NULL) {
@@ -398,7 +396,7 @@ static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
398 396
399static void x25_asy_connected(struct net_device *dev, int reason) 397static void x25_asy_connected(struct net_device *dev, int reason)
400{ 398{
401 struct x25_asy *sl = dev->priv; 399 struct x25_asy *sl = netdev_priv(dev);
402 struct sk_buff *skb; 400 struct sk_buff *skb;
403 unsigned char *ptr; 401 unsigned char *ptr;
404 402
@@ -413,12 +411,11 @@ static void x25_asy_connected(struct net_device *dev, int reason)
413 411
414 skb->protocol = x25_type_trans(skb, sl->dev); 412 skb->protocol = x25_type_trans(skb, sl->dev);
415 netif_rx(skb); 413 netif_rx(skb);
416 sl->dev->last_rx = jiffies;
417} 414}
418 415
419static void x25_asy_disconnected(struct net_device *dev, int reason) 416static void x25_asy_disconnected(struct net_device *dev, int reason)
420{ 417{
421 struct x25_asy *sl = dev->priv; 418 struct x25_asy *sl = netdev_priv(dev);
422 struct sk_buff *skb; 419 struct sk_buff *skb;
423 unsigned char *ptr; 420 unsigned char *ptr;
424 421
@@ -433,7 +430,6 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
433 430
434 skb->protocol = x25_type_trans(skb, sl->dev); 431 skb->protocol = x25_type_trans(skb, sl->dev);
435 netif_rx(skb); 432 netif_rx(skb);
436 sl->dev->last_rx = jiffies;
437} 433}
438 434
439static struct lapb_register_struct x25_asy_callbacks = { 435static struct lapb_register_struct x25_asy_callbacks = {
@@ -450,7 +446,7 @@ static struct lapb_register_struct x25_asy_callbacks = {
450/* Open the low-level part of the X.25 channel. Easy! */ 446/* Open the low-level part of the X.25 channel. Easy! */
451static int x25_asy_open(struct net_device *dev) 447static int x25_asy_open(struct net_device *dev)
452{ 448{
453 struct x25_asy *sl = dev->priv; 449 struct x25_asy *sl = netdev_priv(dev);
454 unsigned long len; 450 unsigned long len;
455 int err; 451 int err;
456 452
@@ -499,12 +495,12 @@ norbuff:
499/* Close the low-level part of the X.25 channel. Easy! */ 495/* Close the low-level part of the X.25 channel. Easy! */
500static int x25_asy_close(struct net_device *dev) 496static int x25_asy_close(struct net_device *dev)
501{ 497{
502 struct x25_asy *sl = dev->priv; 498 struct x25_asy *sl = netdev_priv(dev);
503 int err; 499 int err;
504 500
505 spin_lock(&sl->lock); 501 spin_lock(&sl->lock);
506 if (sl->tty) 502 if (sl->tty)
507 sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 503 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
508 504
509 netif_stop_queue(dev); 505 netif_stop_queue(dev);
510 sl->rcount = 0; 506 sl->rcount = 0;
@@ -527,7 +523,7 @@ static int x25_asy_close(struct net_device *dev)
527static void x25_asy_receive_buf(struct tty_struct *tty, 523static void x25_asy_receive_buf(struct tty_struct *tty,
528 const unsigned char *cp, char *fp, int count) 524 const unsigned char *cp, char *fp, int count)
529{ 525{
530 struct x25_asy *sl = (struct x25_asy *) tty->disc_data; 526 struct x25_asy *sl = tty->disc_data;
531 527
532 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev)) 528 if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
533 return; 529 return;
@@ -555,7 +551,7 @@ static void x25_asy_receive_buf(struct tty_struct *tty,
555 551
556static int x25_asy_open_tty(struct tty_struct *tty) 552static int x25_asy_open_tty(struct tty_struct *tty)
557{ 553{
558 struct x25_asy *sl = (struct x25_asy *) tty->disc_data; 554 struct x25_asy *sl = tty->disc_data;
559 int err; 555 int err;
560 556
561 if (tty->ops->write == NULL) 557 if (tty->ops->write == NULL)
@@ -596,7 +592,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
596 */ 592 */
597static void x25_asy_close_tty(struct tty_struct *tty) 593static void x25_asy_close_tty(struct tty_struct *tty)
598{ 594{
599 struct x25_asy *sl = (struct x25_asy *) tty->disc_data; 595 struct x25_asy *sl = tty->disc_data;
600 596
601 /* First make sure we're connected. */ 597 /* First make sure we're connected. */
602 if (!sl || sl->magic != X25_ASY_MAGIC) 598 if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -615,7 +611,7 @@ static void x25_asy_close_tty(struct tty_struct *tty)
615 611
616static struct net_device_stats *x25_asy_get_stats(struct net_device *dev) 612static struct net_device_stats *x25_asy_get_stats(struct net_device *dev)
617{ 613{
618 struct x25_asy *sl = dev->priv; 614 struct x25_asy *sl = netdev_priv(dev);
619 return &sl->stats; 615 return &sl->stats;
620} 616}
621 617
@@ -624,7 +620,7 @@ static struct net_device_stats *x25_asy_get_stats(struct net_device *dev)
624 * STANDARD X.25 ENCAPSULATION * 620 * STANDARD X.25 ENCAPSULATION *
625 ************************************************************************/ 621 ************************************************************************/
626 622
627int x25_asy_esc(unsigned char *s, unsigned char *d, int len) 623static int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
628{ 624{
629 unsigned char *ptr = d; 625 unsigned char *ptr = d;
630 unsigned char c; 626 unsigned char c;
@@ -696,7 +692,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
696static int x25_asy_ioctl(struct tty_struct *tty, struct file *file, 692static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
697 unsigned int cmd, unsigned long arg) 693 unsigned int cmd, unsigned long arg)
698{ 694{
699 struct x25_asy *sl = (struct x25_asy *) tty->disc_data; 695 struct x25_asy *sl = tty->disc_data;
700 696
701 /* First make sure we're connected. */ 697 /* First make sure we're connected. */
702 if (!sl || sl->magic != X25_ASY_MAGIC) 698 if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -717,7 +713,7 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
717 713
718static int x25_asy_open_dev(struct net_device *dev) 714static int x25_asy_open_dev(struct net_device *dev)
719{ 715{
720 struct x25_asy *sl = dev->priv; 716 struct x25_asy *sl = netdev_priv(dev);
721 if (sl->tty == NULL) 717 if (sl->tty == NULL)
722 return -ENODEV; 718 return -ENODEV;
723 return 0; 719 return 0;
@@ -726,7 +722,7 @@ static int x25_asy_open_dev(struct net_device *dev)
726/* Initialise the X.25 driver. Called by the device init code */ 722/* Initialise the X.25 driver. Called by the device init code */
727static void x25_asy_setup(struct net_device *dev) 723static void x25_asy_setup(struct net_device *dev)
728{ 724{
729 struct x25_asy *sl = dev->priv; 725 struct x25_asy *sl = netdev_priv(dev);
730 726
731 sl->magic = X25_ASY_MAGIC; 727 sl->magic = X25_ASY_MAGIC;
732 sl->dev = dev; 728 sl->dev = dev;
@@ -793,7 +789,7 @@ static void __exit exit_x25_asy(void)
793 for (i = 0; i < x25_asy_maxdev; i++) { 789 for (i = 0; i < x25_asy_maxdev; i++) {
794 dev = x25_asy_devs[i]; 790 dev = x25_asy_devs[i];
795 if (dev) { 791 if (dev) {
796 struct x25_asy *sl = dev->priv; 792 struct x25_asy *sl = netdev_priv(dev);
797 793
798 spin_lock_bh(&sl->lock); 794 spin_lock_bh(&sl->lock);
799 if (sl->tty) 795 if (sl->tty)
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 5bf7e01ef0e9..3d00971fe5ee 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -601,24 +601,18 @@ static void z8530_dma_status(struct z8530_channel *chan)
601 write_zsctrl(chan, RES_H_IUS); 601 write_zsctrl(chan, RES_H_IUS);
602} 602}
603 603
604struct z8530_irqhandler z8530_dma_sync= 604static struct z8530_irqhandler z8530_dma_sync = {
605{
606 z8530_dma_rx, 605 z8530_dma_rx,
607 z8530_dma_tx, 606 z8530_dma_tx,
608 z8530_dma_status 607 z8530_dma_status
609}; 608};
610 609
611EXPORT_SYMBOL(z8530_dma_sync); 610static struct z8530_irqhandler z8530_txdma_sync = {
612
613struct z8530_irqhandler z8530_txdma_sync=
614{
615 z8530_rx, 611 z8530_rx,
616 z8530_dma_tx, 612 z8530_dma_tx,
617 z8530_dma_status 613 z8530_dma_status
618}; 614};
619 615
620EXPORT_SYMBOL(z8530_txdma_sync);
621
622/** 616/**
623 * z8530_rx_clear - Handle RX events from a stopped chip 617 * z8530_rx_clear - Handle RX events from a stopped chip
624 * @c: Z8530 channel to shut up 618 * @c: Z8530 channel to shut up
@@ -710,7 +704,7 @@ EXPORT_SYMBOL(z8530_nop);
710irqreturn_t z8530_interrupt(int irq, void *dev_id) 704irqreturn_t z8530_interrupt(int irq, void *dev_id)
711{ 705{
712 struct z8530_dev *dev=dev_id; 706 struct z8530_dev *dev=dev_id;
713 u8 intr; 707 u8 uninitialized_var(intr);
714 static volatile int locker=0; 708 static volatile int locker=0;
715 int work=0; 709 int work=0;
716 struct z8530_irqhandler *irqs; 710 struct z8530_irqhandler *irqs;
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index fa14255282af..3c1edda08d3d 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -147,6 +147,20 @@ out:
147} 147}
148#endif 148#endif
149 149
150static const struct net_device_ops wd_netdev_ops = {
151 .ndo_open = wd_open,
152 .ndo_stop = wd_close,
153 .ndo_start_xmit = ei_start_xmit,
154 .ndo_tx_timeout = ei_tx_timeout,
155 .ndo_get_stats = ei_get_stats,
156 .ndo_set_multicast_list = ei_set_multicast_list,
157 .ndo_validate_addr = eth_validate_addr,
158 .ndo_change_mtu = eth_change_mtu,
159#ifdef CONFIG_NET_POLL_CONTROLLER
160 .ndo_poll_controller = ei_poll,
161#endif
162};
163
150static int __init wd_probe1(struct net_device *dev, int ioaddr) 164static int __init wd_probe1(struct net_device *dev, int ioaddr)
151{ 165{
152 int i; 166 int i;
@@ -156,7 +170,6 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
156 int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */ 170 int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
157 const char *model_name; 171 const char *model_name;
158 static unsigned version_printed; 172 static unsigned version_printed;
159 DECLARE_MAC_BUF(mac);
160 173
161 for (i = 0; i < 8; i++) 174 for (i = 0; i < 8; i++)
162 checksum += inb(ioaddr + 8 + i); 175 checksum += inb(ioaddr + 8 + i);
@@ -178,8 +191,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
178 for (i = 0; i < 6; i++) 191 for (i = 0; i < 6; i++)
179 dev->dev_addr[i] = inb(ioaddr + 8 + i); 192 dev->dev_addr[i] = inb(ioaddr + 8 + i);
180 193
181 printk("%s: WD80x3 at %#3x, %s", 194 printk("%s: WD80x3 at %#3x, %pM",
182 dev->name, ioaddr, print_mac(mac, dev->dev_addr)); 195 dev->name, ioaddr, dev->dev_addr);
183 196
184 /* The following PureData probe code was contributed by 197 /* The following PureData probe code was contributed by
185 Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software 198 Mike Jagdis <jaggy@purplet.demon.co.uk>. Puredata does software
@@ -332,11 +345,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
332 ei_status.block_input = &wd_block_input; 345 ei_status.block_input = &wd_block_input;
333 ei_status.block_output = &wd_block_output; 346 ei_status.block_output = &wd_block_output;
334 ei_status.get_8390_hdr = &wd_get_8390_hdr; 347 ei_status.get_8390_hdr = &wd_get_8390_hdr;
335 dev->open = &wd_open; 348
336 dev->stop = &wd_close; 349 dev->netdev_ops = &wd_netdev_ops;
337#ifdef CONFIG_NET_POLL_CONTROLLER
338 dev->poll_controller = ei_poll;
339#endif
340 NS8390_init(dev, 0); 350 NS8390_init(dev, 0);
341 351
342#if 1 352#if 1
@@ -366,8 +376,7 @@ wd_open(struct net_device *dev)
366 outb(ei_status.reg5, ioaddr+WD_CMDREG5); 376 outb(ei_status.reg5, ioaddr+WD_CMDREG5);
367 outb(ei_status.reg0, ioaddr); /* WD_CMDREG */ 377 outb(ei_status.reg0, ioaddr); /* WD_CMDREG */
368 378
369 ei_open(dev); 379 return ei_open(dev);
370 return 0;
371} 380}
372 381
373static void 382static void
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 45bdf0b339bb..ea543fcf2687 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -123,154 +123,11 @@ config PCMCIA_RAYCS
123 To compile this driver as a module, choose M here: the module will be 123 To compile this driver as a module, choose M here: the module will be
124 called ray_cs. If unsure, say N. 124 called ray_cs. If unsure, say N.
125 125
126config IPW2100
127 tristate "Intel PRO/Wireless 2100 Network Connection"
128 depends on PCI && WLAN_80211
129 select WIRELESS_EXT
130 select FW_LOADER
131 select IEEE80211
132 ---help---
133 A driver for the Intel PRO/Wireless 2100 Network
134 Connection 802.11b wireless network adapter.
135
136 See <file:Documentation/networking/README.ipw2100> for information on
137 the capabilities currently enabled in this driver and for tips
138 for debugging issues and problems.
139
140 In order to use this driver, you will need a firmware image for it.
141 You can obtain the firmware from
142 <http://ipw2100.sf.net/>. Once you have the firmware image, you
143 will need to place it in /lib/firmware.
144
145 You will also very likely need the Wireless Tools in order to
146 configure your card:
147
148 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
149
150 It is recommended that you compile this driver as a module (M)
151 rather than built-in (Y). This driver requires firmware at device
152 initialization time, and when built-in this typically happens
153 before the filesystem is accessible (hence firmware will be
154 unavailable and initialization will fail). If you do choose to build
155 this driver into your kernel image, you can avoid this problem by
156 including the firmware and a firmware loader in an initramfs.
157
158config IPW2100_MONITOR
159 bool "Enable promiscuous mode"
160 depends on IPW2100
161 ---help---
162 Enables promiscuous/monitor mode support for the ipw2100 driver.
163 With this feature compiled into the driver, you can switch to
164 promiscuous mode via the Wireless Tool's Monitor mode. While in this
165 mode, no packets can be sent.
166
167config IPW2100_DEBUG
168 bool "Enable full debugging output in IPW2100 module."
169 depends on IPW2100
170 ---help---
171 This option will enable debug tracing output for the IPW2100.
172
173 This will result in the kernel module being ~60k larger. You can
174 control which debug output is sent to the kernel log by setting the
175 value in
176
177 /sys/bus/pci/drivers/ipw2100/debug_level
178
179 This entry will only exist if this option is enabled.
180
181 If you are not trying to debug or develop the IPW2100 driver, you
182 most likely want to say N here.
183
184config IPW2200
185 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
186 depends on PCI && WLAN_80211
187 select WIRELESS_EXT
188 select FW_LOADER
189 select IEEE80211
190 ---help---
191 A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
192 Connection adapters.
193
194 See <file:Documentation/networking/README.ipw2200> for
195 information on the capabilities currently enabled in this
196 driver and for tips for debugging issues and problems.
197
198 In order to use this driver, you will need a firmware image for it.
199 You can obtain the firmware from
200 <http://ipw2200.sf.net/>. See the above referenced README.ipw2200
201 for information on where to install the firmware images.
202
203 You will also very likely need the Wireless Tools in order to
204 configure your card:
205
206 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
207
208 It is recommended that you compile this driver as a module (M)
209 rather than built-in (Y). This driver requires firmware at device
210 initialization time, and when built-in this typically happens
211 before the filesystem is accessible (hence firmware will be
212 unavailable and initialization will fail). If you do choose to build
213 this driver into your kernel image, you can avoid this problem by
214 including the firmware and a firmware loader in an initramfs.
215
216config IPW2200_MONITOR
217 bool "Enable promiscuous mode"
218 depends on IPW2200
219 ---help---
220 Enables promiscuous/monitor mode support for the ipw2200 driver.
221 With this feature compiled into the driver, you can switch to
222 promiscuous mode via the Wireless Tool's Monitor mode. While in this
223 mode, no packets can be sent.
224
225config IPW2200_RADIOTAP
226 bool "Enable radiotap format 802.11 raw packet support"
227 depends on IPW2200_MONITOR
228
229config IPW2200_PROMISCUOUS
230 bool "Enable creation of a RF radiotap promiscuous interface"
231 depends on IPW2200_MONITOR
232 select IPW2200_RADIOTAP
233 ---help---
234 Enables the creation of a second interface prefixed 'rtap'.
235 This second interface will provide every received in radiotap
236 format.
237
238 This is useful for performing wireless network analysis while
239 maintaining an active association.
240
241 Example usage:
242
243 % modprobe ipw2200 rtap_iface=1
244 % ifconfig rtap0 up
245 % tethereal -i rtap0
246
247 If you do not specify 'rtap_iface=1' as a module parameter then
248 the rtap interface will not be created and you will need to turn
249 it on via sysfs:
250
251 % echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface
252
253config IPW2200_QOS
254 bool "Enable QoS support"
255 depends on IPW2200 && EXPERIMENTAL
256
257config IPW2200_DEBUG
258 bool "Enable full debugging output in IPW2200 module."
259 depends on IPW2200
260 ---help---
261 This option will enable low level debug tracing output for IPW2200.
262
263 Note, normal debug code is already compiled in. This low level
264 debug option enables debug on hot paths (e.g Tx, Rx, ISR) and
265 will result in the kernel module being ~70 larger. Most users
266 will typically not need this high verbosity debug information.
267
268 If you are not sure, say N here.
269
270config LIBERTAS 126config LIBERTAS
271 tristate "Marvell 8xxx Libertas WLAN driver support" 127 tristate "Marvell 8xxx Libertas WLAN driver support"
272 depends on WLAN_80211 128 depends on WLAN_80211
273 select WIRELESS_EXT 129 select WIRELESS_EXT
130 select LIB80211
274 select FW_LOADER 131 select FW_LOADER
275 ---help--- 132 ---help---
276 A library for Marvell Libertas 8xxx devices. 133 A library for Marvell Libertas 8xxx devices.
@@ -357,6 +214,21 @@ config HERMES
357 configure your card and that /etc/pcmcia/wireless.opts works : 214 configure your card and that /etc/pcmcia/wireless.opts works :
358 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html> 215 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
359 216
217config HERMES_CACHE_FW_ON_INIT
218 bool "Cache Hermes firmware on driver initialisation"
219 depends on HERMES
220 default y
221 ---help---
222 Say Y to cache any firmware required by the Hermes drivers
223 on startup. The firmware will remain cached until the
224 driver is unloaded. The cache uses 64K of RAM.
225
226 Otherwise load the firmware from userspace as required. In
227 this case the driver should be unloaded and restarted
228 whenever the firmware is changed.
229
230 If you are not sure, say Y.
231
360config APPLE_AIRPORT 232config APPLE_AIRPORT
361 tristate "Apple Airport support (built-in)" 233 tristate "Apple Airport support (built-in)"
362 depends on PPC_PMAC && HERMES 234 depends on PPC_PMAC && HERMES
@@ -651,7 +523,7 @@ config RTL8180
651 523
652config RTL8187 524config RTL8187
653 tristate "Realtek 8187 and 8187B USB support" 525 tristate "Realtek 8187 and 8187B USB support"
654 depends on MAC80211 && USB && WLAN_80211 && EXPERIMENTAL 526 depends on MAC80211 && USB && WLAN_80211
655 select EEPROM_93CX6 527 select EEPROM_93CX6
656 ---help--- 528 ---help---
657 This is a driver for RTL8187 and RTL8187B based cards. 529 This is a driver for RTL8187 and RTL8187B based cards.
@@ -711,6 +583,7 @@ config MAC80211_HWSIM
711source "drivers/net/wireless/p54/Kconfig" 583source "drivers/net/wireless/p54/Kconfig"
712source "drivers/net/wireless/ath5k/Kconfig" 584source "drivers/net/wireless/ath5k/Kconfig"
713source "drivers/net/wireless/ath9k/Kconfig" 585source "drivers/net/wireless/ath9k/Kconfig"
586source "drivers/net/wireless/ipw2x00/Kconfig"
714source "drivers/net/wireless/iwlwifi/Kconfig" 587source "drivers/net/wireless/iwlwifi/Kconfig"
715source "drivers/net/wireless/hostap/Kconfig" 588source "drivers/net/wireless/hostap/Kconfig"
716source "drivers/net/wireless/b43/Kconfig" 589source "drivers/net/wireless/b43/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 59d2d805f60b..ac590e1ca8be 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -2,9 +2,8 @@
2# Makefile for the Linux Wireless network device drivers. 2# Makefile for the Linux Wireless network device drivers.
3# 3#
4 4
5obj-$(CONFIG_IPW2100) += ipw2100.o 5obj-$(CONFIG_IPW2100) += ipw2x00/
6 6obj-$(CONFIG_IPW2200) += ipw2x00/
7obj-$(CONFIG_IPW2200) += ipw2200.o
8 7
9obj-$(CONFIG_STRIP) += strip.o 8obj-$(CONFIG_STRIP) += strip.o
10obj-$(CONFIG_ARLAN) += arlan.o 9obj-$(CONFIG_ARLAN) += arlan.o
@@ -16,14 +15,7 @@ obj-$(CONFIG_WAVELAN) += wavelan.o
16obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o 15obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o
17obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o 16obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o
18 17
19obj-$(CONFIG_HERMES) += orinoco.o hermes.o hermes_dld.o 18obj-$(CONFIG_HERMES) += orinoco/
20obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o
21obj-$(CONFIG_APPLE_AIRPORT) += airport.o
22obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o
23obj-$(CONFIG_PCI_HERMES) += orinoco_pci.o
24obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
25obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
26obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
27 19
28obj-$(CONFIG_AIRO) += airo.o 20obj-$(CONFIG_AIRO) += airo.o
29obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o 21obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o
@@ -38,6 +30,8 @@ obj-$(CONFIG_HOSTAP) += hostap/
38obj-$(CONFIG_B43) += b43/ 30obj-$(CONFIG_B43) += b43/
39obj-$(CONFIG_B43LEGACY) += b43legacy/ 31obj-$(CONFIG_B43LEGACY) += b43legacy/
40obj-$(CONFIG_ZD1211RW) += zd1211rw/ 32obj-$(CONFIG_ZD1211RW) += zd1211rw/
33obj-$(CONFIG_RTL8180) += rtl818x/
34obj-$(CONFIG_RTL8187) += rtl818x/
41 35
42# 16-bit wireless PCMCIA client drivers 36# 16-bit wireless PCMCIA client drivers
43obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o 37obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
@@ -50,12 +44,6 @@ obj-$(CONFIG_LIBERTAS) += libertas/
50 44
51obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf/ 45obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf/
52 46
53rtl8180-objs := rtl8180_dev.o rtl8180_rtl8225.o rtl8180_sa2400.o rtl8180_max2820.o rtl8180_grf5101.o
54rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o
55
56obj-$(CONFIG_RTL8180) += rtl8180.o
57obj-$(CONFIG_RTL8187) += rtl8187.o
58
59obj-$(CONFIG_ADM8211) += adm8211.o 47obj-$(CONFIG_ADM8211) += adm8211.o
60 48
61obj-$(CONFIG_IWLWIFI) += iwlwifi/ 49obj-$(CONFIG_IWLWIFI) += iwlwifi/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index b2c050b68890..fc0897fb2239 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -341,15 +341,14 @@ static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
341 pci_unmap_single(priv->pdev, info->mapping, 341 pci_unmap_single(priv->pdev, info->mapping,
342 info->skb->len, PCI_DMA_TODEVICE); 342 info->skb->len, PCI_DMA_TODEVICE);
343 343
344 memset(&txi->status, 0, sizeof(txi->status)); 344 ieee80211_tx_info_clear_status(txi);
345
345 skb_pull(skb, sizeof(struct adm8211_tx_hdr)); 346 skb_pull(skb, sizeof(struct adm8211_tx_hdr));
346 memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen); 347 memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen);
347 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK)) { 348 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) &&
348 if (status & TDES0_STATUS_ES) 349 !(status & TDES0_STATUS_ES))
349 txi->status.excessive_retries = 1; 350 txi->flags |= IEEE80211_TX_STAT_ACK;
350 else 351
351 txi->flags |= IEEE80211_TX_STAT_ACK;
352 }
353 ieee80211_tx_status_irqsafe(dev, skb); 352 ieee80211_tx_status_irqsafe(dev, skb);
354 353
355 info->skb = NULL; 354 info->skb = NULL;
@@ -1298,25 +1297,10 @@ static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid)
1298 ADM8211_CSR_WRITE(ABDA1, reg); 1297 ADM8211_CSR_WRITE(ABDA1, reg);
1299} 1298}
1300 1299
1301static int adm8211_set_ssid(struct ieee80211_hw *dev, u8 *ssid, size_t ssid_len) 1300static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
1302{
1303 struct adm8211_priv *priv = dev->priv;
1304 u8 buf[36];
1305
1306 if (ssid_len > 32)
1307 return -EINVAL;
1308
1309 memset(buf, 0, sizeof(buf));
1310 buf[0] = ssid_len;
1311 memcpy(buf + 1, ssid, ssid_len);
1312 adm8211_write_sram_bytes(dev, ADM8211_SRAM_SSID, buf, 33);
1313 /* TODO: configure beacon for adhoc? */
1314 return 0;
1315}
1316
1317static int adm8211_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
1318{ 1301{
1319 struct adm8211_priv *priv = dev->priv; 1302 struct adm8211_priv *priv = dev->priv;
1303 struct ieee80211_conf *conf = &dev->conf;
1320 int channel = ieee80211_frequency_to_channel(conf->channel->center_freq); 1304 int channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
1321 1305
1322 if (channel != priv->channel) { 1306 if (channel != priv->channel) {
@@ -1338,13 +1322,6 @@ static int adm8211_config_interface(struct ieee80211_hw *dev,
1338 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 1322 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
1339 } 1323 }
1340 1324
1341 if (conf->ssid_len != priv->ssid_len ||
1342 memcmp(conf->ssid, priv->ssid, conf->ssid_len)) {
1343 adm8211_set_ssid(dev, conf->ssid, conf->ssid_len);
1344 priv->ssid_len = conf->ssid_len;
1345 memcpy(priv->ssid, conf->ssid, conf->ssid_len);
1346 }
1347
1348 return 0; 1325 return 0;
1349} 1326}
1350 1327
@@ -1690,8 +1667,10 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1690 struct ieee80211_hdr *hdr; 1667 struct ieee80211_hdr *hdr;
1691 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1668 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1692 struct ieee80211_rate *txrate = ieee80211_get_tx_rate(dev, info); 1669 struct ieee80211_rate *txrate = ieee80211_get_tx_rate(dev, info);
1670 u8 rc_flags;
1693 1671
1694 short_preamble = !!(txrate->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE); 1672 rc_flags = info->control.rates[0].flags;
1673 short_preamble = !!(rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
1695 plcp_signal = txrate->bitrate; 1674 plcp_signal = txrate->bitrate;
1696 1675
1697 hdr = (struct ieee80211_hdr *)skb->data; 1676 hdr = (struct ieee80211_hdr *)skb->data;
@@ -1723,10 +1702,10 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
1723 if (short_preamble) 1702 if (short_preamble)
1724 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE); 1703 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE);
1725 1704
1726 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) 1705 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
1727 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS); 1706 txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS);
1728 1707
1729 txhdr->retry_limit = info->control.retry_limit; 1708 txhdr->retry_limit = info->control.rates[0].count;
1730 1709
1731 adm8211_tx_raw(dev, skb, plcp_signal, hdrlen); 1710 adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
1732 1711
@@ -1791,7 +1770,6 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1791 int err; 1770 int err;
1792 u32 reg; 1771 u32 reg;
1793 u8 perm_addr[ETH_ALEN]; 1772 u8 perm_addr[ETH_ALEN];
1794 DECLARE_MAC_BUF(mac);
1795 1773
1796 err = pci_enable_device(pdev); 1774 err = pci_enable_device(pdev);
1797 if (err) { 1775 if (err) {
@@ -1925,8 +1903,8 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1925 goto err_free_desc; 1903 goto err_free_desc;
1926 } 1904 }
1927 1905
1928 printk(KERN_INFO "%s: hwaddr %s, Rev 0x%02x\n", 1906 printk(KERN_INFO "%s: hwaddr %pM, Rev 0x%02x\n",
1929 wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr), 1907 wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
1930 pdev->revision); 1908 pdev->revision);
1931 1909
1932 return 0; 1910 return 0;
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h
index 9b190ee26e90..4f6ab1322189 100644
--- a/drivers/net/wireless/adm8211.h
+++ b/drivers/net/wireless/adm8211.h
@@ -553,8 +553,6 @@ struct adm8211_priv {
553 553
554 int channel; 554 int channel;
555 u8 bssid[ETH_ALEN]; 555 u8 bssid[ETH_ALEN];
556 u8 ssid[32];
557 size_t ssid_len;
558 556
559 u8 soft_rx_crc; 557 u8 soft_rx_crc;
560 u8 retry_limit; 558 u8 retry_limit;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 370133e492d2..fc4322ca669f 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -47,10 +47,11 @@
47#include <linux/ioport.h> 47#include <linux/ioport.h>
48#include <linux/pci.h> 48#include <linux/pci.h>
49#include <asm/uaccess.h> 49#include <asm/uaccess.h>
50#include <net/ieee80211.h>
51#include <linux/kthread.h> 50#include <linux/kthread.h>
52#include <linux/freezer.h> 51#include <linux/freezer.h>
53 52
53#include <linux/ieee80211.h>
54
54#include "airo.h" 55#include "airo.h"
55 56
56#define DRV_NAME "airo" 57#define DRV_NAME "airo"
@@ -1270,6 +1271,7 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev);
1270#define airo_print_err(name, fmt, args...) \ 1271#define airo_print_err(name, fmt, args...) \
1271 airo_print(KERN_ERR, name, fmt, ##args) 1272 airo_print(KERN_ERR, name, fmt, ##args)
1272 1273
1274#define AIRO_FLASH(dev) (((struct airo_info *)dev->ml_priv)->flash)
1273 1275
1274/*********************************************************************** 1276/***********************************************************************
1275 * MIC ROUTINES * 1277 * MIC ROUTINES *
@@ -1865,7 +1867,7 @@ static void try_auto_wep(struct airo_info *ai)
1865} 1867}
1866 1868
1867static int airo_open(struct net_device *dev) { 1869static int airo_open(struct net_device *dev) {
1868 struct airo_info *ai = dev->priv; 1870 struct airo_info *ai = dev->ml_priv;
1869 int rc = 0; 1871 int rc = 0;
1870 1872
1871 if (test_bit(FLAG_FLASHING, &ai->flags)) 1873 if (test_bit(FLAG_FLASHING, &ai->flags))
@@ -1912,7 +1914,7 @@ static int airo_open(struct net_device *dev) {
1912static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) { 1914static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) {
1913 int npacks, pending; 1915 int npacks, pending;
1914 unsigned long flags; 1916 unsigned long flags;
1915 struct airo_info *ai = dev->priv; 1917 struct airo_info *ai = dev->ml_priv;
1916 1918
1917 if (!skb) { 1919 if (!skb) {
1918 airo_print_err(dev->name, "%s: skb == NULL!",__func__); 1920 airo_print_err(dev->name, "%s: skb == NULL!",__func__);
@@ -1956,7 +1958,7 @@ static int mpi_send_packet (struct net_device *dev)
1956 unsigned char *buffer; 1958 unsigned char *buffer;
1957 s16 len; 1959 s16 len;
1958 __le16 *payloadLen; 1960 __le16 *payloadLen;
1959 struct airo_info *ai = dev->priv; 1961 struct airo_info *ai = dev->ml_priv;
1960 u8 *sendbuf; 1962 u8 *sendbuf;
1961 1963
1962 /* get a packet to send */ 1964 /* get a packet to send */
@@ -2085,7 +2087,7 @@ static void get_tx_error(struct airo_info *ai, s32 fid)
2085static void airo_end_xmit(struct net_device *dev) { 2087static void airo_end_xmit(struct net_device *dev) {
2086 u16 status; 2088 u16 status;
2087 int i; 2089 int i;
2088 struct airo_info *priv = dev->priv; 2090 struct airo_info *priv = dev->ml_priv;
2089 struct sk_buff *skb = priv->xmit.skb; 2091 struct sk_buff *skb = priv->xmit.skb;
2090 int fid = priv->xmit.fid; 2092 int fid = priv->xmit.fid;
2091 u32 *fids = priv->fids; 2093 u32 *fids = priv->fids;
@@ -2111,7 +2113,7 @@ static void airo_end_xmit(struct net_device *dev) {
2111static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) { 2113static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
2112 s16 len; 2114 s16 len;
2113 int i, j; 2115 int i, j;
2114 struct airo_info *priv = dev->priv; 2116 struct airo_info *priv = dev->ml_priv;
2115 u32 *fids = priv->fids; 2117 u32 *fids = priv->fids;
2116 2118
2117 if ( skb == NULL ) { 2119 if ( skb == NULL ) {
@@ -2150,7 +2152,7 @@ static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
2150static void airo_end_xmit11(struct net_device *dev) { 2152static void airo_end_xmit11(struct net_device *dev) {
2151 u16 status; 2153 u16 status;
2152 int i; 2154 int i;
2153 struct airo_info *priv = dev->priv; 2155 struct airo_info *priv = dev->ml_priv;
2154 struct sk_buff *skb = priv->xmit11.skb; 2156 struct sk_buff *skb = priv->xmit11.skb;
2155 int fid = priv->xmit11.fid; 2157 int fid = priv->xmit11.fid;
2156 u32 *fids = priv->fids; 2158 u32 *fids = priv->fids;
@@ -2176,7 +2178,7 @@ static void airo_end_xmit11(struct net_device *dev) {
2176static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) { 2178static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
2177 s16 len; 2179 s16 len;
2178 int i, j; 2180 int i, j;
2179 struct airo_info *priv = dev->priv; 2181 struct airo_info *priv = dev->ml_priv;
2180 u32 *fids = priv->fids; 2182 u32 *fids = priv->fids;
2181 2183
2182 if (test_bit(FLAG_MPI, &priv->flags)) { 2184 if (test_bit(FLAG_MPI, &priv->flags)) {
@@ -2220,7 +2222,7 @@ static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
2220 2222
2221static void airo_read_stats(struct net_device *dev) 2223static void airo_read_stats(struct net_device *dev)
2222{ 2224{
2223 struct airo_info *ai = dev->priv; 2225 struct airo_info *ai = dev->ml_priv;
2224 StatsRid stats_rid; 2226 StatsRid stats_rid;
2225 __le32 *vals = stats_rid.vals; 2227 __le32 *vals = stats_rid.vals;
2226 2228
@@ -2254,7 +2256,7 @@ static void airo_read_stats(struct net_device *dev)
2254 2256
2255static struct net_device_stats *airo_get_stats(struct net_device *dev) 2257static struct net_device_stats *airo_get_stats(struct net_device *dev)
2256{ 2258{
2257 struct airo_info *local = dev->priv; 2259 struct airo_info *local = dev->ml_priv;
2258 2260
2259 if (!test_bit(JOB_STATS, &local->jobs)) { 2261 if (!test_bit(JOB_STATS, &local->jobs)) {
2260 /* Get stats out of the card if available */ 2262 /* Get stats out of the card if available */
@@ -2281,7 +2283,7 @@ static void airo_set_promisc(struct airo_info *ai) {
2281} 2283}
2282 2284
2283static void airo_set_multicast_list(struct net_device *dev) { 2285static void airo_set_multicast_list(struct net_device *dev) {
2284 struct airo_info *ai = dev->priv; 2286 struct airo_info *ai = dev->ml_priv;
2285 2287
2286 if ((dev->flags ^ ai->flags) & IFF_PROMISC) { 2288 if ((dev->flags ^ ai->flags) & IFF_PROMISC) {
2287 change_bit(FLAG_PROMISC, &ai->flags); 2289 change_bit(FLAG_PROMISC, &ai->flags);
@@ -2299,7 +2301,7 @@ static void airo_set_multicast_list(struct net_device *dev) {
2299 2301
2300static int airo_set_mac_address(struct net_device *dev, void *p) 2302static int airo_set_mac_address(struct net_device *dev, void *p)
2301{ 2303{
2302 struct airo_info *ai = dev->priv; 2304 struct airo_info *ai = dev->ml_priv;
2303 struct sockaddr *addr = p; 2305 struct sockaddr *addr = p;
2304 2306
2305 readConfigRid(ai, 1); 2307 readConfigRid(ai, 1);
@@ -2339,7 +2341,7 @@ static void del_airo_dev(struct airo_info *ai)
2339} 2341}
2340 2342
2341static int airo_close(struct net_device *dev) { 2343static int airo_close(struct net_device *dev) {
2342 struct airo_info *ai = dev->priv; 2344 struct airo_info *ai = dev->ml_priv;
2343 2345
2344 netif_stop_queue(dev); 2346 netif_stop_queue(dev);
2345 2347
@@ -2365,7 +2367,7 @@ static int airo_close(struct net_device *dev) {
2365 2367
2366void stop_airo_card( struct net_device *dev, int freeres ) 2368void stop_airo_card( struct net_device *dev, int freeres )
2367{ 2369{
2368 struct airo_info *ai = dev->priv; 2370 struct airo_info *ai = dev->ml_priv;
2369 2371
2370 set_bit(FLAG_RADIO_DOWN, &ai->flags); 2372 set_bit(FLAG_RADIO_DOWN, &ai->flags);
2371 disable_MAC(ai, 1); 2373 disable_MAC(ai, 1);
@@ -2665,7 +2667,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
2665 struct net_device *dev = alloc_netdev(0, "wifi%d", wifi_setup); 2667 struct net_device *dev = alloc_netdev(0, "wifi%d", wifi_setup);
2666 if (!dev) 2668 if (!dev)
2667 return NULL; 2669 return NULL;
2668 dev->priv = ethdev->priv; 2670 dev->ml_priv = ethdev->ml_priv;
2669 dev->irq = ethdev->irq; 2671 dev->irq = ethdev->irq;
2670 dev->base_addr = ethdev->base_addr; 2672 dev->base_addr = ethdev->base_addr;
2671 dev->wireless_data = ethdev->wireless_data; 2673 dev->wireless_data = ethdev->wireless_data;
@@ -2680,7 +2682,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
2680} 2682}
2681 2683
2682static int reset_card( struct net_device *dev , int lock) { 2684static int reset_card( struct net_device *dev , int lock) {
2683 struct airo_info *ai = dev->priv; 2685 struct airo_info *ai = dev->ml_priv;
2684 2686
2685 if (lock && down_interruptible(&ai->sem)) 2687 if (lock && down_interruptible(&ai->sem))
2686 return -1; 2688 return -1;
@@ -2757,7 +2759,6 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2757 struct net_device *dev; 2759 struct net_device *dev;
2758 struct airo_info *ai; 2760 struct airo_info *ai;
2759 int i, rc; 2761 int i, rc;
2760 DECLARE_MAC_BUF(mac);
2761 2762
2762 /* Create the network device object. */ 2763 /* Create the network device object. */
2763 dev = alloc_netdev(sizeof(*ai), "", ether_setup); 2764 dev = alloc_netdev(sizeof(*ai), "", ether_setup);
@@ -2766,7 +2767,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2766 return NULL; 2767 return NULL;
2767 } 2768 }
2768 2769
2769 ai = dev->priv; 2770 ai = dev->ml_priv = netdev_priv(dev);
2770 ai->wifidev = NULL; 2771 ai->wifidev = NULL;
2771 ai->flags = 1 << FLAG_RADIO_DOWN; 2772 ai->flags = 1 << FLAG_RADIO_DOWN;
2772 ai->jobs = 0; 2773 ai->jobs = 0;
@@ -2860,15 +2861,14 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2860 goto err_out_reg; 2861 goto err_out_reg;
2861 2862
2862 set_bit(FLAG_REGISTERED,&ai->flags); 2863 set_bit(FLAG_REGISTERED,&ai->flags);
2863 airo_print_info(dev->name, "MAC enabled %s", 2864 airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr);
2864 print_mac(mac, dev->dev_addr));
2865 2865
2866 /* Allocate the transmit buffers */ 2866 /* Allocate the transmit buffers */
2867 if (probe && !test_bit(FLAG_MPI,&ai->flags)) 2867 if (probe && !test_bit(FLAG_MPI,&ai->flags))
2868 for( i = 0; i < MAX_FIDS; i++ ) 2868 for( i = 0; i < MAX_FIDS; i++ )
2869 ai->fids[i] = transmit_allocate(ai,AIRO_DEF_MTU,i>=MAX_FIDS/2); 2869 ai->fids[i] = transmit_allocate(ai,AIRO_DEF_MTU,i>=MAX_FIDS/2);
2870 2870
2871 if (setup_proc_entry(dev, dev->priv) < 0) 2871 if (setup_proc_entry(dev, dev->ml_priv) < 0)
2872 goto err_out_wifi; 2872 goto err_out_wifi;
2873 2873
2874 return dev; 2874 return dev;
@@ -2917,8 +2917,7 @@ static int waitbusy (struct airo_info *ai) {
2917int reset_airo_card( struct net_device *dev ) 2917int reset_airo_card( struct net_device *dev )
2918{ 2918{
2919 int i; 2919 int i;
2920 struct airo_info *ai = dev->priv; 2920 struct airo_info *ai = dev->ml_priv;
2921 DECLARE_MAC_BUF(mac);
2922 2921
2923 if (reset_card (dev, 1)) 2922 if (reset_card (dev, 1))
2924 return -1; 2923 return -1;
@@ -2927,8 +2926,7 @@ int reset_airo_card( struct net_device *dev )
2927 airo_print_err(dev->name, "MAC could not be enabled"); 2926 airo_print_err(dev->name, "MAC could not be enabled");
2928 return -1; 2927 return -1;
2929 } 2928 }
2930 airo_print_info(dev->name, "MAC enabled %s", 2929 airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr);
2931 print_mac(mac, dev->dev_addr));
2932 /* Allocate the transmit buffers if needed */ 2930 /* Allocate the transmit buffers if needed */
2933 if (!test_bit(FLAG_MPI,&ai->flags)) 2931 if (!test_bit(FLAG_MPI,&ai->flags))
2934 for( i = 0; i < MAX_FIDS; i++ ) 2932 for( i = 0; i < MAX_FIDS; i++ )
@@ -2942,7 +2940,7 @@ int reset_airo_card( struct net_device *dev )
2942EXPORT_SYMBOL(reset_airo_card); 2940EXPORT_SYMBOL(reset_airo_card);
2943 2941
2944static void airo_send_event(struct net_device *dev) { 2942static void airo_send_event(struct net_device *dev) {
2945 struct airo_info *ai = dev->priv; 2943 struct airo_info *ai = dev->ml_priv;
2946 union iwreq_data wrqu; 2944 union iwreq_data wrqu;
2947 StatusRid status_rid; 2945 StatusRid status_rid;
2948 2946
@@ -3019,7 +3017,7 @@ out:
3019 3017
3020static int airo_thread(void *data) { 3018static int airo_thread(void *data) {
3021 struct net_device *dev = data; 3019 struct net_device *dev = data;
3022 struct airo_info *ai = dev->priv; 3020 struct airo_info *ai = dev->ml_priv;
3023 int locked; 3021 int locked;
3024 3022
3025 set_freezable(); 3023 set_freezable();
@@ -3134,7 +3132,7 @@ static irqreturn_t airo_interrupt(int irq, void *dev_id)
3134 struct net_device *dev = dev_id; 3132 struct net_device *dev = dev_id;
3135 u16 status; 3133 u16 status;
3136 u16 fid; 3134 u16 fid;
3137 struct airo_info *apriv = dev->priv; 3135 struct airo_info *apriv = dev->ml_priv;
3138 u16 savedInterrupts = 0; 3136 u16 savedInterrupts = 0;
3139 int handled = 0; 3137 int handled = 0;
3140 3138
@@ -3369,7 +3367,6 @@ badrx:
3369 skb->protocol = htons(ETH_P_802_2); 3367 skb->protocol = htons(ETH_P_802_2);
3370 } else 3368 } else
3371 skb->protocol = eth_type_trans(skb,dev); 3369 skb->protocol = eth_type_trans(skb,dev);
3372 skb->dev->last_rx = jiffies;
3373 skb->ip_summed = CHECKSUM_NONE; 3370 skb->ip_summed = CHECKSUM_NONE;
3374 3371
3375 netif_rx( skb ); 3372 netif_rx( skb );
@@ -3599,7 +3596,6 @@ badmic:
3599 3596
3600 skb->ip_summed = CHECKSUM_NONE; 3597 skb->ip_summed = CHECKSUM_NONE;
3601 skb->protocol = eth_type_trans(skb, ai->dev); 3598 skb->protocol = eth_type_trans(skb, ai->dev);
3602 skb->dev->last_rx = jiffies;
3603 netif_rx(skb); 3599 netif_rx(skb);
3604 } 3600 }
3605badrx: 3601badrx:
@@ -3611,7 +3607,7 @@ badrx:
3611 } 3607 }
3612} 3608}
3613 3609
3614void mpi_receive_802_11 (struct airo_info *ai) 3610static void mpi_receive_802_11(struct airo_info *ai)
3615{ 3611{
3616 RxFid rxd; 3612 RxFid rxd;
3617 struct sk_buff *skb = NULL; 3613 struct sk_buff *skb = NULL;
@@ -3693,7 +3689,6 @@ void mpi_receive_802_11 (struct airo_info *ai)
3693 skb->pkt_type = PACKET_OTHERHOST; 3689 skb->pkt_type = PACKET_OTHERHOST;
3694 skb->dev = ai->wifidev; 3690 skb->dev = ai->wifidev;
3695 skb->protocol = htons(ETH_P_802_2); 3691 skb->protocol = htons(ETH_P_802_2);
3696 skb->dev->last_rx = jiffies;
3697 skb->ip_summed = CHECKSUM_NONE; 3692 skb->ip_summed = CHECKSUM_NONE;
3698 netif_rx( skb ); 3693 netif_rx( skb );
3699badrx: 3694badrx:
@@ -4604,7 +4599,7 @@ static int proc_status_open(struct inode *inode, struct file *file)
4604 struct proc_data *data; 4599 struct proc_data *data;
4605 struct proc_dir_entry *dp = PDE(inode); 4600 struct proc_dir_entry *dp = PDE(inode);
4606 struct net_device *dev = dp->data; 4601 struct net_device *dev = dp->data;
4607 struct airo_info *apriv = dev->priv; 4602 struct airo_info *apriv = dev->ml_priv;
4608 CapabilityRid cap_rid; 4603 CapabilityRid cap_rid;
4609 StatusRid status_rid; 4604 StatusRid status_rid;
4610 u16 mode; 4605 u16 mode;
@@ -4687,7 +4682,7 @@ static int proc_stats_rid_open( struct inode *inode,
4687 struct proc_data *data; 4682 struct proc_data *data;
4688 struct proc_dir_entry *dp = PDE(inode); 4683 struct proc_dir_entry *dp = PDE(inode);
4689 struct net_device *dev = dp->data; 4684 struct net_device *dev = dp->data;
4690 struct airo_info *apriv = dev->priv; 4685 struct airo_info *apriv = dev->ml_priv;
4691 StatsRid stats; 4686 StatsRid stats;
4692 int i, j; 4687 int i, j;
4693 __le32 *vals = stats.vals; 4688 __le32 *vals = stats.vals;
@@ -4750,7 +4745,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
4750 struct proc_data *data = file->private_data; 4745 struct proc_data *data = file->private_data;
4751 struct proc_dir_entry *dp = PDE(inode); 4746 struct proc_dir_entry *dp = PDE(inode);
4752 struct net_device *dev = dp->data; 4747 struct net_device *dev = dp->data;
4753 struct airo_info *ai = dev->priv; 4748 struct airo_info *ai = dev->ml_priv;
4754 char *line; 4749 char *line;
4755 4750
4756 if ( !data->writelen ) return; 4751 if ( !data->writelen ) return;
@@ -4962,7 +4957,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
4962 struct proc_data *data; 4957 struct proc_data *data;
4963 struct proc_dir_entry *dp = PDE(inode); 4958 struct proc_dir_entry *dp = PDE(inode);
4964 struct net_device *dev = dp->data; 4959 struct net_device *dev = dp->data;
4965 struct airo_info *ai = dev->priv; 4960 struct airo_info *ai = dev->ml_priv;
4966 int i; 4961 int i;
4967 __le16 mode; 4962 __le16 mode;
4968 4963
@@ -5053,7 +5048,7 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
5053 struct proc_data *data = (struct proc_data *)file->private_data; 5048 struct proc_data *data = (struct proc_data *)file->private_data;
5054 struct proc_dir_entry *dp = PDE(inode); 5049 struct proc_dir_entry *dp = PDE(inode);
5055 struct net_device *dev = dp->data; 5050 struct net_device *dev = dp->data;
5056 struct airo_info *ai = dev->priv; 5051 struct airo_info *ai = dev->ml_priv;
5057 SsidRid SSID_rid; 5052 SsidRid SSID_rid;
5058 int i; 5053 int i;
5059 char *p = data->wbuffer; 5054 char *p = data->wbuffer;
@@ -5096,7 +5091,7 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) {
5096 struct proc_data *data = (struct proc_data *)file->private_data; 5091 struct proc_data *data = (struct proc_data *)file->private_data;
5097 struct proc_dir_entry *dp = PDE(inode); 5092 struct proc_dir_entry *dp = PDE(inode);
5098 struct net_device *dev = dp->data; 5093 struct net_device *dev = dp->data;
5099 struct airo_info *ai = dev->priv; 5094 struct airo_info *ai = dev->ml_priv;
5100 APListRid APList_rid; 5095 APListRid APList_rid;
5101 int i; 5096 int i;
5102 5097
@@ -5191,7 +5186,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
5191 struct proc_data *data; 5186 struct proc_data *data;
5192 struct proc_dir_entry *dp = PDE(inode); 5187 struct proc_dir_entry *dp = PDE(inode);
5193 struct net_device *dev = dp->data; 5188 struct net_device *dev = dp->data;
5194 struct airo_info *ai = dev->priv; 5189 struct airo_info *ai = dev->ml_priv;
5195 int i; 5190 int i;
5196 char key[16]; 5191 char key[16];
5197 u16 index = 0; 5192 u16 index = 0;
@@ -5233,7 +5228,7 @@ static int proc_wepkey_open( struct inode *inode, struct file *file )
5233 struct proc_data *data; 5228 struct proc_data *data;
5234 struct proc_dir_entry *dp = PDE(inode); 5229 struct proc_dir_entry *dp = PDE(inode);
5235 struct net_device *dev = dp->data; 5230 struct net_device *dev = dp->data;
5236 struct airo_info *ai = dev->priv; 5231 struct airo_info *ai = dev->ml_priv;
5237 char *ptr; 5232 char *ptr;
5238 WepKeyRid wkr; 5233 WepKeyRid wkr;
5239 __le16 lastindex; 5234 __le16 lastindex;
@@ -5282,7 +5277,7 @@ static int proc_SSID_open(struct inode *inode, struct file *file)
5282 struct proc_data *data; 5277 struct proc_data *data;
5283 struct proc_dir_entry *dp = PDE(inode); 5278 struct proc_dir_entry *dp = PDE(inode);
5284 struct net_device *dev = dp->data; 5279 struct net_device *dev = dp->data;
5285 struct airo_info *ai = dev->priv; 5280 struct airo_info *ai = dev->ml_priv;
5286 int i; 5281 int i;
5287 char *ptr; 5282 char *ptr;
5288 SsidRid SSID_rid; 5283 SsidRid SSID_rid;
@@ -5326,11 +5321,10 @@ static int proc_APList_open( struct inode *inode, struct file *file ) {
5326 struct proc_data *data; 5321 struct proc_data *data;
5327 struct proc_dir_entry *dp = PDE(inode); 5322 struct proc_dir_entry *dp = PDE(inode);
5328 struct net_device *dev = dp->data; 5323 struct net_device *dev = dp->data;
5329 struct airo_info *ai = dev->priv; 5324 struct airo_info *ai = dev->ml_priv;
5330 int i; 5325 int i;
5331 char *ptr; 5326 char *ptr;
5332 APListRid APList_rid; 5327 APListRid APList_rid;
5333 DECLARE_MAC_BUF(mac);
5334 5328
5335 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) 5329 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
5336 return -ENOMEM; 5330 return -ENOMEM;
@@ -5354,8 +5348,7 @@ static int proc_APList_open( struct inode *inode, struct file *file ) {
5354// We end when we find a zero MAC 5348// We end when we find a zero MAC
5355 if ( !*(int*)APList_rid.ap[i] && 5349 if ( !*(int*)APList_rid.ap[i] &&
5356 !*(int*)&APList_rid.ap[i][2]) break; 5350 !*(int*)&APList_rid.ap[i][2]) break;
5357 ptr += sprintf(ptr, "%s\n", 5351 ptr += sprintf(ptr, "%pM\n", APList_rid.ap[i]);
5358 print_mac(mac, APList_rid.ap[i]));
5359 } 5352 }
5360 if (i==0) ptr += sprintf(ptr, "Not using specific APs\n"); 5353 if (i==0) ptr += sprintf(ptr, "Not using specific APs\n");
5361 5354
@@ -5368,13 +5361,12 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
5368 struct proc_data *data; 5361 struct proc_data *data;
5369 struct proc_dir_entry *dp = PDE(inode); 5362 struct proc_dir_entry *dp = PDE(inode);
5370 struct net_device *dev = dp->data; 5363 struct net_device *dev = dp->data;
5371 struct airo_info *ai = dev->priv; 5364 struct airo_info *ai = dev->ml_priv;
5372 char *ptr; 5365 char *ptr;
5373 BSSListRid BSSList_rid; 5366 BSSListRid BSSList_rid;
5374 int rc; 5367 int rc;
5375 /* If doLoseSync is not 1, we won't do a Lose Sync */ 5368 /* If doLoseSync is not 1, we won't do a Lose Sync */
5376 int doLoseSync = -1; 5369 int doLoseSync = -1;
5377 DECLARE_MAC_BUF(mac);
5378 5370
5379 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL) 5371 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
5380 return -ENOMEM; 5372 return -ENOMEM;
@@ -5411,8 +5403,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
5411 we have to add a spin lock... */ 5403 we have to add a spin lock... */
5412 rc = readBSSListRid(ai, doLoseSync, &BSSList_rid); 5404 rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
5413 while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) { 5405 while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
5414 ptr += sprintf(ptr, "%s %*s rssi = %d", 5406 ptr += sprintf(ptr, "%pM %*s rssi = %d",
5415 print_mac(mac, BSSList_rid.bssid), 5407 BSSList_rid.bssid,
5416 (int)BSSList_rid.ssidLen, 5408 (int)BSSList_rid.ssidLen,
5417 BSSList_rid.ssid, 5409 BSSList_rid.ssid,
5418 le16_to_cpu(BSSList_rid.dBm)); 5410 le16_to_cpu(BSSList_rid.dBm));
@@ -5447,7 +5439,7 @@ static int proc_close( struct inode *inode, struct file *file )
5447 associated we will check every minute to see if anything has 5439 associated we will check every minute to see if anything has
5448 changed. */ 5440 changed. */
5449static void timer_func( struct net_device *dev ) { 5441static void timer_func( struct net_device *dev ) {
5450 struct airo_info *apriv = dev->priv; 5442 struct airo_info *apriv = dev->ml_priv;
5451 5443
5452/* We don't have a link so try changing the authtype */ 5444/* We don't have a link so try changing the authtype */
5453 readConfigRid(apriv, 0); 5445 readConfigRid(apriv, 0);
@@ -5518,7 +5510,7 @@ static void __devexit airo_pci_remove(struct pci_dev *pdev)
5518static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state) 5510static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
5519{ 5511{
5520 struct net_device *dev = pci_get_drvdata(pdev); 5512 struct net_device *dev = pci_get_drvdata(pdev);
5521 struct airo_info *ai = dev->priv; 5513 struct airo_info *ai = dev->ml_priv;
5522 Cmd cmd; 5514 Cmd cmd;
5523 Resp rsp; 5515 Resp rsp;
5524 5516
@@ -5550,7 +5542,7 @@ static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
5550static int airo_pci_resume(struct pci_dev *pdev) 5542static int airo_pci_resume(struct pci_dev *pdev)
5551{ 5543{
5552 struct net_device *dev = pci_get_drvdata(pdev); 5544 struct net_device *dev = pci_get_drvdata(pdev);
5553 struct airo_info *ai = dev->priv; 5545 struct airo_info *ai = dev->ml_priv;
5554 pci_power_t prev_state = pdev->current_state; 5546 pci_power_t prev_state = pdev->current_state;
5555 5547
5556 pci_set_power_state(pdev, PCI_D0); 5548 pci_set_power_state(pdev, PCI_D0);
@@ -5729,7 +5721,7 @@ static int airo_set_freq(struct net_device *dev,
5729 struct iw_freq *fwrq, 5721 struct iw_freq *fwrq,
5730 char *extra) 5722 char *extra)
5731{ 5723{
5732 struct airo_info *local = dev->priv; 5724 struct airo_info *local = dev->ml_priv;
5733 int rc = -EINPROGRESS; /* Call commit handler */ 5725 int rc = -EINPROGRESS; /* Call commit handler */
5734 5726
5735 /* If setting by frequency, convert to a channel */ 5727 /* If setting by frequency, convert to a channel */
@@ -5774,7 +5766,7 @@ static int airo_get_freq(struct net_device *dev,
5774 struct iw_freq *fwrq, 5766 struct iw_freq *fwrq,
5775 char *extra) 5767 char *extra)
5776{ 5768{
5777 struct airo_info *local = dev->priv; 5769 struct airo_info *local = dev->ml_priv;
5778 StatusRid status_rid; /* Card status info */ 5770 StatusRid status_rid; /* Card status info */
5779 int ch; 5771 int ch;
5780 5772
@@ -5805,7 +5797,7 @@ static int airo_set_essid(struct net_device *dev,
5805 struct iw_point *dwrq, 5797 struct iw_point *dwrq,
5806 char *extra) 5798 char *extra)
5807{ 5799{
5808 struct airo_info *local = dev->priv; 5800 struct airo_info *local = dev->ml_priv;
5809 SsidRid SSID_rid; /* SSIDs */ 5801 SsidRid SSID_rid; /* SSIDs */
5810 5802
5811 /* Reload the list of current SSID */ 5803 /* Reload the list of current SSID */
@@ -5851,7 +5843,7 @@ static int airo_get_essid(struct net_device *dev,
5851 struct iw_point *dwrq, 5843 struct iw_point *dwrq,
5852 char *extra) 5844 char *extra)
5853{ 5845{
5854 struct airo_info *local = dev->priv; 5846 struct airo_info *local = dev->ml_priv;
5855 StatusRid status_rid; /* Card status info */ 5847 StatusRid status_rid; /* Card status info */
5856 5848
5857 readStatusRid(local, &status_rid, 1); 5849 readStatusRid(local, &status_rid, 1);
@@ -5879,7 +5871,7 @@ static int airo_set_wap(struct net_device *dev,
5879 struct sockaddr *awrq, 5871 struct sockaddr *awrq,
5880 char *extra) 5872 char *extra)
5881{ 5873{
5882 struct airo_info *local = dev->priv; 5874 struct airo_info *local = dev->ml_priv;
5883 Cmd cmd; 5875 Cmd cmd;
5884 Resp rsp; 5876 Resp rsp;
5885 APListRid APList_rid; 5877 APListRid APList_rid;
@@ -5916,7 +5908,7 @@ static int airo_get_wap(struct net_device *dev,
5916 struct sockaddr *awrq, 5908 struct sockaddr *awrq,
5917 char *extra) 5909 char *extra)
5918{ 5910{
5919 struct airo_info *local = dev->priv; 5911 struct airo_info *local = dev->ml_priv;
5920 StatusRid status_rid; /* Card status info */ 5912 StatusRid status_rid; /* Card status info */
5921 5913
5922 readStatusRid(local, &status_rid, 1); 5914 readStatusRid(local, &status_rid, 1);
@@ -5937,7 +5929,7 @@ static int airo_set_nick(struct net_device *dev,
5937 struct iw_point *dwrq, 5929 struct iw_point *dwrq,
5938 char *extra) 5930 char *extra)
5939{ 5931{
5940 struct airo_info *local = dev->priv; 5932 struct airo_info *local = dev->ml_priv;
5941 5933
5942 /* Check the size of the string */ 5934 /* Check the size of the string */
5943 if(dwrq->length > 16) { 5935 if(dwrq->length > 16) {
@@ -5960,7 +5952,7 @@ static int airo_get_nick(struct net_device *dev,
5960 struct iw_point *dwrq, 5952 struct iw_point *dwrq,
5961 char *extra) 5953 char *extra)
5962{ 5954{
5963 struct airo_info *local = dev->priv; 5955 struct airo_info *local = dev->ml_priv;
5964 5956
5965 readConfigRid(local, 1); 5957 readConfigRid(local, 1);
5966 strncpy(extra, local->config.nodeName, 16); 5958 strncpy(extra, local->config.nodeName, 16);
@@ -5979,7 +5971,7 @@ static int airo_set_rate(struct net_device *dev,
5979 struct iw_param *vwrq, 5971 struct iw_param *vwrq,
5980 char *extra) 5972 char *extra)
5981{ 5973{
5982 struct airo_info *local = dev->priv; 5974 struct airo_info *local = dev->ml_priv;
5983 CapabilityRid cap_rid; /* Card capability info */ 5975 CapabilityRid cap_rid; /* Card capability info */
5984 u8 brate = 0; 5976 u8 brate = 0;
5985 int i; 5977 int i;
@@ -6049,7 +6041,7 @@ static int airo_get_rate(struct net_device *dev,
6049 struct iw_param *vwrq, 6041 struct iw_param *vwrq,
6050 char *extra) 6042 char *extra)
6051{ 6043{
6052 struct airo_info *local = dev->priv; 6044 struct airo_info *local = dev->ml_priv;
6053 StatusRid status_rid; /* Card status info */ 6045 StatusRid status_rid; /* Card status info */
6054 6046
6055 readStatusRid(local, &status_rid, 1); 6047 readStatusRid(local, &status_rid, 1);
@@ -6071,7 +6063,7 @@ static int airo_set_rts(struct net_device *dev,
6071 struct iw_param *vwrq, 6063 struct iw_param *vwrq,
6072 char *extra) 6064 char *extra)
6073{ 6065{
6074 struct airo_info *local = dev->priv; 6066 struct airo_info *local = dev->ml_priv;
6075 int rthr = vwrq->value; 6067 int rthr = vwrq->value;
6076 6068
6077 if(vwrq->disabled) 6069 if(vwrq->disabled)
@@ -6095,7 +6087,7 @@ static int airo_get_rts(struct net_device *dev,
6095 struct iw_param *vwrq, 6087 struct iw_param *vwrq,
6096 char *extra) 6088 char *extra)
6097{ 6089{
6098 struct airo_info *local = dev->priv; 6090 struct airo_info *local = dev->ml_priv;
6099 6091
6100 readConfigRid(local, 1); 6092 readConfigRid(local, 1);
6101 vwrq->value = le16_to_cpu(local->config.rtsThres); 6093 vwrq->value = le16_to_cpu(local->config.rtsThres);
@@ -6114,7 +6106,7 @@ static int airo_set_frag(struct net_device *dev,
6114 struct iw_param *vwrq, 6106 struct iw_param *vwrq,
6115 char *extra) 6107 char *extra)
6116{ 6108{
6117 struct airo_info *local = dev->priv; 6109 struct airo_info *local = dev->ml_priv;
6118 int fthr = vwrq->value; 6110 int fthr = vwrq->value;
6119 6111
6120 if(vwrq->disabled) 6112 if(vwrq->disabled)
@@ -6139,7 +6131,7 @@ static int airo_get_frag(struct net_device *dev,
6139 struct iw_param *vwrq, 6131 struct iw_param *vwrq,
6140 char *extra) 6132 char *extra)
6141{ 6133{
6142 struct airo_info *local = dev->priv; 6134 struct airo_info *local = dev->ml_priv;
6143 6135
6144 readConfigRid(local, 1); 6136 readConfigRid(local, 1);
6145 vwrq->value = le16_to_cpu(local->config.fragThresh); 6137 vwrq->value = le16_to_cpu(local->config.fragThresh);
@@ -6158,7 +6150,7 @@ static int airo_set_mode(struct net_device *dev,
6158 __u32 *uwrq, 6150 __u32 *uwrq,
6159 char *extra) 6151 char *extra)
6160{ 6152{
6161 struct airo_info *local = dev->priv; 6153 struct airo_info *local = dev->ml_priv;
6162 int reset = 0; 6154 int reset = 0;
6163 6155
6164 readConfigRid(local, 1); 6156 readConfigRid(local, 1);
@@ -6221,7 +6213,7 @@ static int airo_get_mode(struct net_device *dev,
6221 __u32 *uwrq, 6213 __u32 *uwrq,
6222 char *extra) 6214 char *extra)
6223{ 6215{
6224 struct airo_info *local = dev->priv; 6216 struct airo_info *local = dev->ml_priv;
6225 6217
6226 readConfigRid(local, 1); 6218 readConfigRid(local, 1);
6227 /* If not managed, assume it's ad-hoc */ 6219 /* If not managed, assume it's ad-hoc */
@@ -6258,7 +6250,7 @@ static int airo_set_encode(struct net_device *dev,
6258 struct iw_point *dwrq, 6250 struct iw_point *dwrq,
6259 char *extra) 6251 char *extra)
6260{ 6252{
6261 struct airo_info *local = dev->priv; 6253 struct airo_info *local = dev->ml_priv;
6262 CapabilityRid cap_rid; /* Card capability info */ 6254 CapabilityRid cap_rid; /* Card capability info */
6263 int perm = ( dwrq->flags & IW_ENCODE_TEMP ? 0 : 1 ); 6255 int perm = ( dwrq->flags & IW_ENCODE_TEMP ? 0 : 1 );
6264 __le16 currentAuthType = local->config.authType; 6256 __le16 currentAuthType = local->config.authType;
@@ -6345,7 +6337,7 @@ static int airo_get_encode(struct net_device *dev,
6345 struct iw_point *dwrq, 6337 struct iw_point *dwrq,
6346 char *extra) 6338 char *extra)
6347{ 6339{
6348 struct airo_info *local = dev->priv; 6340 struct airo_info *local = dev->ml_priv;
6349 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 6341 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
6350 CapabilityRid cap_rid; /* Card capability info */ 6342 CapabilityRid cap_rid; /* Card capability info */
6351 6343
@@ -6393,7 +6385,7 @@ static int airo_set_encodeext(struct net_device *dev,
6393 union iwreq_data *wrqu, 6385 union iwreq_data *wrqu,
6394 char *extra) 6386 char *extra)
6395{ 6387{
6396 struct airo_info *local = dev->priv; 6388 struct airo_info *local = dev->ml_priv;
6397 struct iw_point *encoding = &wrqu->encoding; 6389 struct iw_point *encoding = &wrqu->encoding;
6398 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 6390 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6399 CapabilityRid cap_rid; /* Card capability info */ 6391 CapabilityRid cap_rid; /* Card capability info */
@@ -6479,7 +6471,7 @@ static int airo_get_encodeext(struct net_device *dev,
6479 union iwreq_data *wrqu, 6471 union iwreq_data *wrqu,
6480 char *extra) 6472 char *extra)
6481{ 6473{
6482 struct airo_info *local = dev->priv; 6474 struct airo_info *local = dev->ml_priv;
6483 struct iw_point *encoding = &wrqu->encoding; 6475 struct iw_point *encoding = &wrqu->encoding;
6484 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 6476 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6485 CapabilityRid cap_rid; /* Card capability info */ 6477 CapabilityRid cap_rid; /* Card capability info */
@@ -6542,7 +6534,7 @@ static int airo_set_auth(struct net_device *dev,
6542 struct iw_request_info *info, 6534 struct iw_request_info *info,
6543 union iwreq_data *wrqu, char *extra) 6535 union iwreq_data *wrqu, char *extra)
6544{ 6536{
6545 struct airo_info *local = dev->priv; 6537 struct airo_info *local = dev->ml_priv;
6546 struct iw_param *param = &wrqu->param; 6538 struct iw_param *param = &wrqu->param;
6547 __le16 currentAuthType = local->config.authType; 6539 __le16 currentAuthType = local->config.authType;
6548 6540
@@ -6610,7 +6602,7 @@ static int airo_get_auth(struct net_device *dev,
6610 struct iw_request_info *info, 6602 struct iw_request_info *info,
6611 union iwreq_data *wrqu, char *extra) 6603 union iwreq_data *wrqu, char *extra)
6612{ 6604{
6613 struct airo_info *local = dev->priv; 6605 struct airo_info *local = dev->ml_priv;
6614 struct iw_param *param = &wrqu->param; 6606 struct iw_param *param = &wrqu->param;
6615 __le16 currentAuthType = local->config.authType; 6607 __le16 currentAuthType = local->config.authType;
6616 6608
@@ -6659,7 +6651,7 @@ static int airo_set_txpow(struct net_device *dev,
6659 struct iw_param *vwrq, 6651 struct iw_param *vwrq,
6660 char *extra) 6652 char *extra)
6661{ 6653{
6662 struct airo_info *local = dev->priv; 6654 struct airo_info *local = dev->ml_priv;
6663 CapabilityRid cap_rid; /* Card capability info */ 6655 CapabilityRid cap_rid; /* Card capability info */
6664 int i; 6656 int i;
6665 int rc = -EINVAL; 6657 int rc = -EINVAL;
@@ -6696,7 +6688,7 @@ static int airo_get_txpow(struct net_device *dev,
6696 struct iw_param *vwrq, 6688 struct iw_param *vwrq,
6697 char *extra) 6689 char *extra)
6698{ 6690{
6699 struct airo_info *local = dev->priv; 6691 struct airo_info *local = dev->ml_priv;
6700 6692
6701 readConfigRid(local, 1); 6693 readConfigRid(local, 1);
6702 vwrq->value = le16_to_cpu(local->config.txPower); 6694 vwrq->value = le16_to_cpu(local->config.txPower);
@@ -6716,7 +6708,7 @@ static int airo_set_retry(struct net_device *dev,
6716 struct iw_param *vwrq, 6708 struct iw_param *vwrq,
6717 char *extra) 6709 char *extra)
6718{ 6710{
6719 struct airo_info *local = dev->priv; 6711 struct airo_info *local = dev->ml_priv;
6720 int rc = -EINVAL; 6712 int rc = -EINVAL;
6721 6713
6722 if(vwrq->disabled) { 6714 if(vwrq->disabled) {
@@ -6754,7 +6746,7 @@ static int airo_get_retry(struct net_device *dev,
6754 struct iw_param *vwrq, 6746 struct iw_param *vwrq,
6755 char *extra) 6747 char *extra)
6756{ 6748{
6757 struct airo_info *local = dev->priv; 6749 struct airo_info *local = dev->ml_priv;
6758 6750
6759 vwrq->disabled = 0; /* Can't be disabled */ 6751 vwrq->disabled = 0; /* Can't be disabled */
6760 6752
@@ -6785,7 +6777,7 @@ static int airo_get_range(struct net_device *dev,
6785 struct iw_point *dwrq, 6777 struct iw_point *dwrq,
6786 char *extra) 6778 char *extra)
6787{ 6779{
6788 struct airo_info *local = dev->priv; 6780 struct airo_info *local = dev->ml_priv;
6789 struct iw_range *range = (struct iw_range *) extra; 6781 struct iw_range *range = (struct iw_range *) extra;
6790 CapabilityRid cap_rid; /* Card capability info */ 6782 CapabilityRid cap_rid; /* Card capability info */
6791 int i; 6783 int i;
@@ -6910,7 +6902,7 @@ static int airo_set_power(struct net_device *dev,
6910 struct iw_param *vwrq, 6902 struct iw_param *vwrq,
6911 char *extra) 6903 char *extra)
6912{ 6904{
6913 struct airo_info *local = dev->priv; 6905 struct airo_info *local = dev->ml_priv;
6914 6906
6915 readConfigRid(local, 1); 6907 readConfigRid(local, 1);
6916 if (vwrq->disabled) { 6908 if (vwrq->disabled) {
@@ -6967,7 +6959,7 @@ static int airo_get_power(struct net_device *dev,
6967 struct iw_param *vwrq, 6959 struct iw_param *vwrq,
6968 char *extra) 6960 char *extra)
6969{ 6961{
6970 struct airo_info *local = dev->priv; 6962 struct airo_info *local = dev->ml_priv;
6971 __le16 mode; 6963 __le16 mode;
6972 6964
6973 readConfigRid(local, 1); 6965 readConfigRid(local, 1);
@@ -6998,7 +6990,7 @@ static int airo_set_sens(struct net_device *dev,
6998 struct iw_param *vwrq, 6990 struct iw_param *vwrq,
6999 char *extra) 6991 char *extra)
7000{ 6992{
7001 struct airo_info *local = dev->priv; 6993 struct airo_info *local = dev->ml_priv;
7002 6994
7003 readConfigRid(local, 1); 6995 readConfigRid(local, 1);
7004 local->config.rssiThreshold = 6996 local->config.rssiThreshold =
@@ -7017,7 +7009,7 @@ static int airo_get_sens(struct net_device *dev,
7017 struct iw_param *vwrq, 7009 struct iw_param *vwrq,
7018 char *extra) 7010 char *extra)
7019{ 7011{
7020 struct airo_info *local = dev->priv; 7012 struct airo_info *local = dev->ml_priv;
7021 7013
7022 readConfigRid(local, 1); 7014 readConfigRid(local, 1);
7023 vwrq->value = le16_to_cpu(local->config.rssiThreshold); 7015 vwrq->value = le16_to_cpu(local->config.rssiThreshold);
@@ -7037,7 +7029,7 @@ static int airo_get_aplist(struct net_device *dev,
7037 struct iw_point *dwrq, 7029 struct iw_point *dwrq,
7038 char *extra) 7030 char *extra)
7039{ 7031{
7040 struct airo_info *local = dev->priv; 7032 struct airo_info *local = dev->ml_priv;
7041 struct sockaddr *address = (struct sockaddr *) extra; 7033 struct sockaddr *address = (struct sockaddr *) extra;
7042 struct iw_quality qual[IW_MAX_AP]; 7034 struct iw_quality qual[IW_MAX_AP];
7043 BSSListRid BSSList; 7035 BSSListRid BSSList;
@@ -7110,7 +7102,7 @@ static int airo_set_scan(struct net_device *dev,
7110 struct iw_point *dwrq, 7102 struct iw_point *dwrq,
7111 char *extra) 7103 char *extra)
7112{ 7104{
7113 struct airo_info *ai = dev->priv; 7105 struct airo_info *ai = dev->ml_priv;
7114 Cmd cmd; 7106 Cmd cmd;
7115 Resp rsp; 7107 Resp rsp;
7116 int wake = 0; 7108 int wake = 0;
@@ -7156,7 +7148,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
7156 char *end_buf, 7148 char *end_buf,
7157 BSSListRid *bss) 7149 BSSListRid *bss)
7158{ 7150{
7159 struct airo_info *ai = dev->priv; 7151 struct airo_info *ai = dev->ml_priv;
7160 struct iw_event iwe; /* Temporary buffer */ 7152 struct iw_event iwe; /* Temporary buffer */
7161 __le16 capabilities; 7153 __le16 capabilities;
7162 char * current_val; /* For rates */ 7154 char * current_val; /* For rates */
@@ -7274,56 +7266,53 @@ static inline char *airo_translate_scan(struct net_device *dev,
7274 if (test_bit(FLAG_WPA_CAPABLE, &ai->flags)) { 7266 if (test_bit(FLAG_WPA_CAPABLE, &ai->flags)) {
7275 unsigned int num_null_ies = 0; 7267 unsigned int num_null_ies = 0;
7276 u16 length = sizeof (bss->extra.iep); 7268 u16 length = sizeof (bss->extra.iep);
7277 struct ieee80211_info_element *info_element = 7269 u8 *ie = (void *)&bss->extra.iep;
7278 (struct ieee80211_info_element *) &bss->extra.iep;
7279 7270
7280 while ((length >= sizeof(*info_element)) && (num_null_ies < 2)) { 7271 while ((length >= 2) && (num_null_ies < 2)) {
7281 if (sizeof(*info_element) + info_element->len > length) { 7272 if (2 + ie[1] > length) {
7282 /* Invalid element, don't continue parsing IE */ 7273 /* Invalid element, don't continue parsing IE */
7283 break; 7274 break;
7284 } 7275 }
7285 7276
7286 switch (info_element->id) { 7277 switch (ie[0]) {
7287 case MFIE_TYPE_SSID: 7278 case WLAN_EID_SSID:
7288 /* Two zero-length SSID elements 7279 /* Two zero-length SSID elements
7289 * mean we're done parsing elements */ 7280 * mean we're done parsing elements */
7290 if (!info_element->len) 7281 if (!ie[1])
7291 num_null_ies++; 7282 num_null_ies++;
7292 break; 7283 break;
7293 7284
7294 case MFIE_TYPE_GENERIC: 7285 case WLAN_EID_GENERIC:
7295 if (info_element->len >= 4 && 7286 if (ie[1] >= 4 &&
7296 info_element->data[0] == 0x00 && 7287 ie[2] == 0x00 &&
7297 info_element->data[1] == 0x50 && 7288 ie[3] == 0x50 &&
7298 info_element->data[2] == 0xf2 && 7289 ie[4] == 0xf2 &&
7299 info_element->data[3] == 0x01) { 7290 ie[5] == 0x01) {
7300 iwe.cmd = IWEVGENIE; 7291 iwe.cmd = IWEVGENIE;
7301 iwe.u.data.length = min(info_element->len + 2, 7292 /* 64 is an arbitrary cut-off */
7302 MAX_WPA_IE_LEN); 7293 iwe.u.data.length = min(ie[1] + 2,
7294 64);
7303 current_ev = iwe_stream_add_point( 7295 current_ev = iwe_stream_add_point(
7304 info, current_ev, 7296 info, current_ev,
7305 end_buf, &iwe, 7297 end_buf, &iwe, ie);
7306 (char *) info_element);
7307 } 7298 }
7308 break; 7299 break;
7309 7300
7310 case MFIE_TYPE_RSN: 7301 case WLAN_EID_RSN:
7311 iwe.cmd = IWEVGENIE; 7302 iwe.cmd = IWEVGENIE;
7312 iwe.u.data.length = min(info_element->len + 2, 7303 /* 64 is an arbitrary cut-off */
7313 MAX_WPA_IE_LEN); 7304 iwe.u.data.length = min(ie[1] + 2, 64);
7314 current_ev = iwe_stream_add_point( 7305 current_ev = iwe_stream_add_point(
7315 info, current_ev, end_buf, 7306 info, current_ev, end_buf,
7316 &iwe, (char *) info_element); 7307 &iwe, ie);
7317 break; 7308 break;
7318 7309
7319 default: 7310 default:
7320 break; 7311 break;
7321 } 7312 }
7322 7313
7323 length -= sizeof(*info_element) + info_element->len; 7314 length -= 2 + ie[1];
7324 info_element = 7315 ie += 2 + ie[1];
7325 (struct ieee80211_info_element *)&info_element->
7326 data[info_element->len];
7327 } 7316 }
7328 } 7317 }
7329 return current_ev; 7318 return current_ev;
@@ -7338,7 +7327,7 @@ static int airo_get_scan(struct net_device *dev,
7338 struct iw_point *dwrq, 7327 struct iw_point *dwrq,
7339 char *extra) 7328 char *extra)
7340{ 7329{
7341 struct airo_info *ai = dev->priv; 7330 struct airo_info *ai = dev->ml_priv;
7342 BSSListElement *net; 7331 BSSListElement *net;
7343 int err = 0; 7332 int err = 0;
7344 char *current_ev = extra; 7333 char *current_ev = extra;
@@ -7382,7 +7371,7 @@ static int airo_config_commit(struct net_device *dev,
7382 void *zwrq, /* NULL */ 7371 void *zwrq, /* NULL */
7383 char *extra) /* NULL */ 7372 char *extra) /* NULL */
7384{ 7373{
7385 struct airo_info *local = dev->priv; 7374 struct airo_info *local = dev->ml_priv;
7386 7375
7387 if (!test_bit (FLAG_COMMIT, &local->flags)) 7376 if (!test_bit (FLAG_COMMIT, &local->flags))
7388 return 0; 7377 return 0;
@@ -7527,7 +7516,7 @@ static const struct iw_handler_def airo_handler_def =
7527static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 7516static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
7528{ 7517{
7529 int rc = 0; 7518 int rc = 0;
7530 struct airo_info *ai = (struct airo_info *)dev->priv; 7519 struct airo_info *ai = dev->ml_priv;
7531 7520
7532 if (ai->power.event) 7521 if (ai->power.event)
7533 return 0; 7522 return 0;
@@ -7655,7 +7644,7 @@ static void airo_read_wireless_stats(struct airo_info *local)
7655 7644
7656static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev) 7645static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
7657{ 7646{
7658 struct airo_info *local = dev->priv; 7647 struct airo_info *local = dev->ml_priv;
7659 7648
7660 if (!test_bit(JOB_WSTATS, &local->jobs)) { 7649 if (!test_bit(JOB_WSTATS, &local->jobs)) {
7661 /* Get stats out of the card if available */ 7650 /* Get stats out of the card if available */
@@ -7680,7 +7669,7 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
7680 unsigned short ridcode; 7669 unsigned short ridcode;
7681 unsigned char *iobuf; 7670 unsigned char *iobuf;
7682 int len; 7671 int len;
7683 struct airo_info *ai = dev->priv; 7672 struct airo_info *ai = dev->ml_priv;
7684 7673
7685 if (test_bit(FLAG_FLASHING, &ai->flags)) 7674 if (test_bit(FLAG_FLASHING, &ai->flags))
7686 return -EIO; 7675 return -EIO;
@@ -7746,7 +7735,7 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
7746 */ 7735 */
7747 7736
7748static int writerids(struct net_device *dev, aironet_ioctl *comp) { 7737static int writerids(struct net_device *dev, aironet_ioctl *comp) {
7749 struct airo_info *ai = dev->priv; 7738 struct airo_info *ai = dev->ml_priv;
7750 int ridcode; 7739 int ridcode;
7751 int enabled; 7740 int enabled;
7752 static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); 7741 static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
@@ -7869,41 +7858,41 @@ static int flashcard(struct net_device *dev, aironet_ioctl *comp) {
7869 switch(comp->command) 7858 switch(comp->command)
7870 { 7859 {
7871 case AIROFLSHRST: 7860 case AIROFLSHRST:
7872 return cmdreset((struct airo_info *)dev->priv); 7861 return cmdreset((struct airo_info *)dev->ml_priv);
7873 7862
7874 case AIROFLSHSTFL: 7863 case AIROFLSHSTFL:
7875 if (!((struct airo_info *)dev->priv)->flash && 7864 if (!AIRO_FLASH(dev) &&
7876 (((struct airo_info *)dev->priv)->flash = kmalloc (FLASHSIZE, GFP_KERNEL)) == NULL) 7865 (AIRO_FLASH(dev) = kmalloc(FLASHSIZE, GFP_KERNEL)) == NULL)
7877 return -ENOMEM; 7866 return -ENOMEM;
7878 return setflashmode((struct airo_info *)dev->priv); 7867 return setflashmode((struct airo_info *)dev->ml_priv);
7879 7868
7880 case AIROFLSHGCHR: /* Get char from aux */ 7869 case AIROFLSHGCHR: /* Get char from aux */
7881 if(comp->len != sizeof(int)) 7870 if(comp->len != sizeof(int))
7882 return -EINVAL; 7871 return -EINVAL;
7883 if (copy_from_user(&z,comp->data,comp->len)) 7872 if (copy_from_user(&z,comp->data,comp->len))
7884 return -EFAULT; 7873 return -EFAULT;
7885 return flashgchar((struct airo_info *)dev->priv,z,8000); 7874 return flashgchar((struct airo_info *)dev->ml_priv, z, 8000);
7886 7875
7887 case AIROFLSHPCHR: /* Send char to card. */ 7876 case AIROFLSHPCHR: /* Send char to card. */
7888 if(comp->len != sizeof(int)) 7877 if(comp->len != sizeof(int))
7889 return -EINVAL; 7878 return -EINVAL;
7890 if (copy_from_user(&z,comp->data,comp->len)) 7879 if (copy_from_user(&z,comp->data,comp->len))
7891 return -EFAULT; 7880 return -EFAULT;
7892 return flashpchar((struct airo_info *)dev->priv,z,8000); 7881 return flashpchar((struct airo_info *)dev->ml_priv, z, 8000);
7893 7882
7894 case AIROFLPUTBUF: /* Send 32k to card */ 7883 case AIROFLPUTBUF: /* Send 32k to card */
7895 if (!((struct airo_info *)dev->priv)->flash) 7884 if (!AIRO_FLASH(dev))
7896 return -ENOMEM; 7885 return -ENOMEM;
7897 if(comp->len > FLASHSIZE) 7886 if(comp->len > FLASHSIZE)
7898 return -EINVAL; 7887 return -EINVAL;
7899 if(copy_from_user(((struct airo_info *)dev->priv)->flash,comp->data,comp->len)) 7888 if (copy_from_user(AIRO_FLASH(dev), comp->data, comp->len))
7900 return -EFAULT; 7889 return -EFAULT;
7901 7890
7902 flashputbuf((struct airo_info *)dev->priv); 7891 flashputbuf((struct airo_info *)dev->ml_priv);
7903 return 0; 7892 return 0;
7904 7893
7905 case AIRORESTART: 7894 case AIRORESTART:
7906 if(flashrestart((struct airo_info *)dev->priv,dev)) 7895 if (flashrestart((struct airo_info *)dev->ml_priv, dev))
7907 return -EIO; 7896 return -EIO;
7908 return 0; 7897 return 0;
7909 } 7898 }
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
index dec5e874a54d..bfca15da6f0f 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/net/wireless/arlan-main.c
@@ -1467,19 +1467,17 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
1467 else if (hw_dst_addr[1] == 0x40) 1467 else if (hw_dst_addr[1] == 0x40)
1468 printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name); 1468 printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name);
1469 while (dmi) 1469 while (dmi)
1470 { if (dmi->dmi_addrlen == 6) 1470 {
1471 { 1471 if (dmi->dmi_addrlen == 6) {
1472 DECLARE_MAC_BUF(mac);
1473 if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP) 1472 if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP)
1474 printk(KERN_ERR "%s mcl %s\n", 1473 printk(KERN_ERR "%s mcl %pM\n",
1475 dev->name, print_mac(mac, dmi->dmi_addr)); 1474 dev->name, dmi->dmi_addr);
1476 for (i = 0; i < 6; i++) 1475 for (i = 0; i < 6; i++)
1477 if (dmi->dmi_addr[i] != hw_dst_addr[i]) 1476 if (dmi->dmi_addr[i] != hw_dst_addr[i])
1478 break; 1477 break;
1479 if (i == 6) 1478 if (i == 6)
1480 break; 1479 break;
1481 } 1480 } else
1482 else
1483 printk(KERN_ERR "%s: invalid multicast address length given.\n", dev->name); 1481 printk(KERN_ERR "%s: invalid multicast address length given.\n", dev->name);
1484 dmi = dmi->next; 1482 dmi = dmi->next;
1485 } 1483 }
@@ -1512,18 +1510,14 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
1512 { 1510 {
1513 char immedDestAddress[6]; 1511 char immedDestAddress[6];
1514 char immedSrcAddress[6]; 1512 char immedSrcAddress[6];
1515 DECLARE_MAC_BUF(mac);
1516 DECLARE_MAC_BUF(mac2);
1517 DECLARE_MAC_BUF(mac3);
1518 DECLARE_MAC_BUF(mac4);
1519 memcpy_fromio(immedDestAddress, arlan->immedDestAddress, 6); 1513 memcpy_fromio(immedDestAddress, arlan->immedDestAddress, 6);
1520 memcpy_fromio(immedSrcAddress, arlan->immedSrcAddress, 6); 1514 memcpy_fromio(immedSrcAddress, arlan->immedSrcAddress, 6);
1521 1515
1522 printk(KERN_WARNING "%s t %s f %s imd %s ims %s\n", 1516 printk(KERN_WARNING "%s t %pM f %pM imd %pM ims %pM\n",
1523 dev->name, print_mac(mac, skbtmp), 1517 dev->name, skbtmp,
1524 print_mac(mac2, &skbtmp[6]), 1518 &skbtmp[6],
1525 print_mac(mac3, immedDestAddress), 1519 immedDestAddress,
1526 print_mac(mac4, immedSrcAddress)); 1520 immedSrcAddress);
1527 } 1521 }
1528 skb->protocol = eth_type_trans(skb, dev); 1522 skb->protocol = eth_type_trans(skb, dev);
1529 IFDEBUG(ARLAN_DEBUG_HEADER_DUMP) 1523 IFDEBUG(ARLAN_DEBUG_HEADER_DUMP)
@@ -1535,7 +1529,6 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
1535 printk(KERN_WARNING "arlan kernel pkt type trans %x \n", skb->protocol); 1529 printk(KERN_WARNING "arlan kernel pkt type trans %x \n", skb->protocol);
1536 } 1530 }
1537 netif_rx(skb); 1531 netif_rx(skb);
1538 dev->last_rx = jiffies;
1539 dev->stats.rx_packets++; 1532 dev->stats.rx_packets++;
1540 dev->stats.rx_bytes += pkt_len; 1533 dev->stats.rx_bytes += pkt_len;
1541 } 1534 }
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index 53ea439aff48..183ffc8e62ca 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -507,11 +507,15 @@ enum ath5k_tx_queue_id {
507#define AR5K_TXQ_FLAG_TXEOLINT_ENABLE 0x0004 /* Enable TXEOL interrupt -not used- */ 507#define AR5K_TXQ_FLAG_TXEOLINT_ENABLE 0x0004 /* Enable TXEOL interrupt -not used- */
508#define AR5K_TXQ_FLAG_TXDESCINT_ENABLE 0x0008 /* Enable TXDESC interrupt -not used- */ 508#define AR5K_TXQ_FLAG_TXDESCINT_ENABLE 0x0008 /* Enable TXDESC interrupt -not used- */
509#define AR5K_TXQ_FLAG_TXURNINT_ENABLE 0x0010 /* Enable TXURN interrupt */ 509#define AR5K_TXQ_FLAG_TXURNINT_ENABLE 0x0010 /* Enable TXURN interrupt */
510#define AR5K_TXQ_FLAG_BACKOFF_DISABLE 0x0020 /* Disable random post-backoff */ 510#define AR5K_TXQ_FLAG_CBRORNINT_ENABLE 0x0020 /* Enable CBRORN interrupt */
511#define AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE 0x0040 /* Enable ready time expiry policy (?)*/ 511#define AR5K_TXQ_FLAG_CBRURNINT_ENABLE 0x0040 /* Enable CBRURN interrupt */
512#define AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE 0x0080 /* Enable backoff while bursting */ 512#define AR5K_TXQ_FLAG_QTRIGINT_ENABLE 0x0080 /* Enable QTRIG interrupt */
513#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x0100 /* Disable backoff while bursting */ 513#define AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE 0x0100 /* Enable TXNOFRM interrupt */
514#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x0200 /* Enable hw compression -not implemented-*/ 514#define AR5K_TXQ_FLAG_BACKOFF_DISABLE 0x0200 /* Disable random post-backoff */
515#define AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE 0x0300 /* Enable ready time expiry policy (?)*/
516#define AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE 0x0800 /* Enable backoff while bursting */
517#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */
518#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/
515 519
516/* 520/*
517 * A struct to hold tx queue's parameters 521 * A struct to hold tx queue's parameters
@@ -817,13 +821,6 @@ struct ath5k_athchan_2ghz {
817 return (false); \ 821 return (false); \
818} while (0) 822} while (0)
819 823
820enum ath5k_ant_setting {
821 AR5K_ANT_VARIABLE = 0, /* variable by programming */
822 AR5K_ANT_FIXED_A = 1, /* fixed to 11a frequencies */
823 AR5K_ANT_FIXED_B = 2, /* fixed to 11b frequencies */
824 AR5K_ANT_MAX = 3,
825};
826
827/* 824/*
828 * Hardware interrupt abstraction 825 * Hardware interrupt abstraction
829 */ 826 */
@@ -853,7 +850,7 @@ enum ath5k_ant_setting {
853 * checked. We should do this with ath5k_hw_update_mib_counters() but 850 * checked. We should do this with ath5k_hw_update_mib_counters() but
854 * it seems we should also then do some noise immunity work. 851 * it seems we should also then do some noise immunity work.
855 * @AR5K_INT_RXPHY: RX PHY Error 852 * @AR5K_INT_RXPHY: RX PHY Error
856 * @AR5K_INT_RXKCM: ?? 853 * @AR5K_INT_RXKCM: RX Key cache miss
857 * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a 854 * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
858 * beacon that must be handled in software. The alternative is if you 855 * beacon that must be handled in software. The alternative is if you
859 * have VEOL support, in that case you let the hardware deal with things. 856 * have VEOL support, in that case you let the hardware deal with things.
@@ -869,7 +866,7 @@ enum ath5k_ant_setting {
869 * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA 866 * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA
870 * errors. These types of errors we can enable seem to be of type 867 * errors. These types of errors we can enable seem to be of type
871 * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR. 868 * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR.
872 * @AR5K_INT_GLOBAL: Seems to be used to clear and set the IER 869 * @AR5K_INT_GLOBAL: Used to clear and set the IER
873 * @AR5K_INT_NOCARD: signals the card has been removed 870 * @AR5K_INT_NOCARD: signals the card has been removed
874 * @AR5K_INT_COMMON: common interrupts shared amogst MACs with the same 871 * @AR5K_INT_COMMON: common interrupts shared amogst MACs with the same
875 * bit value 872 * bit value
@@ -881,36 +878,61 @@ enum ath5k_ant_setting {
881 * MACs. 878 * MACs.
882 */ 879 */
883enum ath5k_int { 880enum ath5k_int {
884 AR5K_INT_RX = 0x00000001, /* Not common */ 881 AR5K_INT_RXOK = 0x00000001,
885 AR5K_INT_RXDESC = 0x00000002, 882 AR5K_INT_RXDESC = 0x00000002,
883 AR5K_INT_RXERR = 0x00000004,
886 AR5K_INT_RXNOFRM = 0x00000008, 884 AR5K_INT_RXNOFRM = 0x00000008,
887 AR5K_INT_RXEOL = 0x00000010, 885 AR5K_INT_RXEOL = 0x00000010,
888 AR5K_INT_RXORN = 0x00000020, 886 AR5K_INT_RXORN = 0x00000020,
889 AR5K_INT_TX = 0x00000040, /* Not common */ 887 AR5K_INT_TXOK = 0x00000040,
890 AR5K_INT_TXDESC = 0x00000080, 888 AR5K_INT_TXDESC = 0x00000080,
889 AR5K_INT_TXERR = 0x00000100,
890 AR5K_INT_TXNOFRM = 0x00000200,
891 AR5K_INT_TXEOL = 0x00000400,
891 AR5K_INT_TXURN = 0x00000800, 892 AR5K_INT_TXURN = 0x00000800,
892 AR5K_INT_MIB = 0x00001000, 893 AR5K_INT_MIB = 0x00001000,
894 AR5K_INT_SWI = 0x00002000,
893 AR5K_INT_RXPHY = 0x00004000, 895 AR5K_INT_RXPHY = 0x00004000,
894 AR5K_INT_RXKCM = 0x00008000, 896 AR5K_INT_RXKCM = 0x00008000,
895 AR5K_INT_SWBA = 0x00010000, 897 AR5K_INT_SWBA = 0x00010000,
898 AR5K_INT_BRSSI = 0x00020000,
896 AR5K_INT_BMISS = 0x00040000, 899 AR5K_INT_BMISS = 0x00040000,
897 AR5K_INT_BNR = 0x00100000, /* Not common */ 900 AR5K_INT_FATAL = 0x00080000, /* Non common */
898 AR5K_INT_GPIO = 0x01000000, 901 AR5K_INT_BNR = 0x00100000, /* Non common */
899 AR5K_INT_FATAL = 0x40000000, /* Not common */ 902 AR5K_INT_TIM = 0x00200000, /* Non common */
900 AR5K_INT_GLOBAL = 0x80000000, 903 AR5K_INT_DTIM = 0x00400000, /* Non common */
901 904 AR5K_INT_DTIM_SYNC = 0x00800000, /* Non common */
902 AR5K_INT_COMMON = AR5K_INT_RXNOFRM 905 AR5K_INT_GPIO = 0x01000000,
903 | AR5K_INT_RXDESC 906 AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */
904 | AR5K_INT_RXEOL 907 AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */
905 | AR5K_INT_RXORN 908 AR5K_INT_RX_DOPPLER = 0x08000000, /* Non common */
906 | AR5K_INT_TXURN 909 AR5K_INT_QCBRORN = 0x10000000, /* Non common */
907 | AR5K_INT_TXDESC 910 AR5K_INT_QCBRURN = 0x20000000, /* Non common */
908 | AR5K_INT_MIB 911 AR5K_INT_QTRIG = 0x40000000, /* Non common */
909 | AR5K_INT_RXPHY 912 AR5K_INT_GLOBAL = 0x80000000,
910 | AR5K_INT_RXKCM 913
911 | AR5K_INT_SWBA 914 AR5K_INT_COMMON = AR5K_INT_RXOK
912 | AR5K_INT_BMISS 915 | AR5K_INT_RXDESC
913 | AR5K_INT_GPIO, 916 | AR5K_INT_RXERR
917 | AR5K_INT_RXNOFRM
918 | AR5K_INT_RXEOL
919 | AR5K_INT_RXORN
920 | AR5K_INT_TXOK
921 | AR5K_INT_TXDESC
922 | AR5K_INT_TXERR
923 | AR5K_INT_TXNOFRM
924 | AR5K_INT_TXEOL
925 | AR5K_INT_TXURN
926 | AR5K_INT_MIB
927 | AR5K_INT_SWI
928 | AR5K_INT_RXPHY
929 | AR5K_INT_RXKCM
930 | AR5K_INT_SWBA
931 | AR5K_INT_BRSSI
932 | AR5K_INT_BMISS
933 | AR5K_INT_GPIO
934 | AR5K_INT_GLOBAL,
935
914 AR5K_INT_NOCARD = 0xffffffff 936 AR5K_INT_NOCARD = 0xffffffff
915}; 937};
916 938
@@ -1030,6 +1052,7 @@ struct ath5k_hw {
1030 bool ah_calibration; 1052 bool ah_calibration;
1031 bool ah_running; 1053 bool ah_running;
1032 bool ah_single_chip; 1054 bool ah_single_chip;
1055 bool ah_combined_mic;
1033 enum ath5k_rfgain ah_rf_gain; 1056 enum ath5k_rfgain ah_rf_gain;
1034 1057
1035 u32 ah_mac_srev; 1058 u32 ah_mac_srev;
@@ -1064,10 +1087,11 @@ struct ath5k_hw {
1064 1087
1065 u8 ah_sta_id[ETH_ALEN]; 1088 u8 ah_sta_id[ETH_ALEN];
1066 1089
1067 /* Current BSSID we are trying to assoc to / creating. 1090 /* Current BSSID we are trying to assoc to / create.
1068 * This is passed by mac80211 on config_interface() and cached here for 1091 * This is passed by mac80211 on config_interface() and cached here for
1069 * use in resets */ 1092 * use in resets */
1070 u8 ah_bssid[ETH_ALEN]; 1093 u8 ah_bssid[ETH_ALEN];
1094 u8 ah_bssid_mask[ETH_ALEN];
1071 1095
1072 u32 ah_gpio[AR5K_MAX_GPIO]; 1096 u32 ah_gpio[AR5K_MAX_GPIO];
1073 int ah_gpio_npins; 1097 int ah_gpio_npins;
@@ -1081,6 +1105,11 @@ struct ath5k_hw {
1081 u32 ah_txq_imr_txurn; 1105 u32 ah_txq_imr_txurn;
1082 u32 ah_txq_imr_txdesc; 1106 u32 ah_txq_imr_txdesc;
1083 u32 ah_txq_imr_txeol; 1107 u32 ah_txq_imr_txeol;
1108 u32 ah_txq_imr_cbrorn;
1109 u32 ah_txq_imr_cbrurn;
1110 u32 ah_txq_imr_qtrig;
1111 u32 ah_txq_imr_nofrm;
1112 u32 ah_txq_isr;
1084 u32 *ah_rf_banks; 1113 u32 *ah_rf_banks;
1085 size_t ah_rf_banks_size; 1114 size_t ah_rf_banks_size;
1086 struct ath5k_gain ah_gain; 1115 struct ath5k_gain ah_gain;
@@ -1321,4 +1350,9 @@ static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
1321 return retval; 1350 return retval;
1322} 1351}
1323 1352
1353static inline int ath5k_pad_size(int hdrlen)
1354{
1355 return (hdrlen < 24) ? 0 : hdrlen & 3;
1356}
1357
1324#endif 1358#endif
diff --git a/drivers/net/wireless/ath5k/attach.c b/drivers/net/wireless/ath5k/attach.c
index 51d569883cdd..dea378f76731 100644
--- a/drivers/net/wireless/ath5k/attach.c
+++ b/drivers/net/wireless/ath5k/attach.c
@@ -106,7 +106,7 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
106{ 106{
107 struct ath5k_hw *ah; 107 struct ath5k_hw *ah;
108 struct pci_dev *pdev = sc->pdev; 108 struct pci_dev *pdev = sc->pdev;
109 u8 mac[ETH_ALEN]; 109 u8 mac[ETH_ALEN] = {};
110 int ret; 110 int ret;
111 u32 srev; 111 u32 srev;
112 112
@@ -317,15 +317,15 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
317 goto err_free; 317 goto err_free;
318 } 318 }
319 319
320 /* Set MAC address */ 320 if (srev >= AR5K_SREV_AR2414) {
321 ret = ath5k_eeprom_read_mac(ah, mac); 321 ah->ah_combined_mic = true;
322 if (ret) { 322 AR5K_REG_ENABLE_BITS(ah, AR5K_MISC_MODE,
323 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n", 323 AR5K_MISC_MODE_COMBINED_MIC);
324 sc->pdev->device);
325 goto err_free;
326 } 324 }
327 325
326 /* MAC address is cleared until add_interface */
328 ath5k_hw_set_lladdr(ah, mac); 327 ath5k_hw_set_lladdr(ah, mac);
328
329 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */ 329 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
330 memset(ah->ah_bssid, 0xff, ETH_ALEN); 330 memset(ah->ah_bssid, 0xff, ETH_ALEN);
331 ath5k_hw_set_associd(ah, ah->ah_bssid, 0); 331 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 2d14255eb103..4af2607deec0 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -60,6 +60,9 @@
60#include "debug.h" 60#include "debug.h"
61 61
62static int ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */ 62static int ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */
63static int modparam_nohwcrypt;
64module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
65MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
63 66
64 67
65/******************\ 68/******************\
@@ -197,7 +200,7 @@ static int ath5k_pci_resume(struct pci_dev *pdev);
197#endif /* CONFIG_PM */ 200#endif /* CONFIG_PM */
198 201
199static struct pci_driver ath5k_pci_driver = { 202static struct pci_driver ath5k_pci_driver = {
200 .name = "ath5k_pci", 203 .name = KBUILD_MODNAME,
201 .id_table = ath5k_pci_id_table, 204 .id_table = ath5k_pci_id_table,
202 .probe = ath5k_pci_probe, 205 .probe = ath5k_pci_probe,
203 .remove = __devexit_p(ath5k_pci_remove), 206 .remove = __devexit_p(ath5k_pci_remove),
@@ -219,8 +222,7 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
219 struct ieee80211_if_init_conf *conf); 222 struct ieee80211_if_init_conf *conf);
220static void ath5k_remove_interface(struct ieee80211_hw *hw, 223static void ath5k_remove_interface(struct ieee80211_hw *hw,
221 struct ieee80211_if_init_conf *conf); 224 struct ieee80211_if_init_conf *conf);
222static int ath5k_config(struct ieee80211_hw *hw, 225static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
223 struct ieee80211_conf *conf);
224static int ath5k_config_interface(struct ieee80211_hw *hw, 226static int ath5k_config_interface(struct ieee80211_hw *hw,
225 struct ieee80211_vif *vif, 227 struct ieee80211_vif *vif,
226 struct ieee80211_if_conf *conf); 228 struct ieee80211_if_conf *conf);
@@ -238,7 +240,7 @@ static int ath5k_get_tx_stats(struct ieee80211_hw *hw,
238 struct ieee80211_tx_queue_stats *stats); 240 struct ieee80211_tx_queue_stats *stats);
239static u64 ath5k_get_tsf(struct ieee80211_hw *hw); 241static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
240static void ath5k_reset_tsf(struct ieee80211_hw *hw); 242static void ath5k_reset_tsf(struct ieee80211_hw *hw);
241static int ath5k_beacon_update(struct ieee80211_hw *hw, 243static int ath5k_beacon_update(struct ath5k_softc *sc,
242 struct sk_buff *skb); 244 struct sk_buff *skb);
243static void ath5k_bss_info_changed(struct ieee80211_hw *hw, 245static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
244 struct ieee80211_vif *vif, 246 struct ieee80211_vif *vif,
@@ -548,8 +550,8 @@ ath5k_pci_probe(struct pci_dev *pdev,
548 550
549 /* set up multi-rate retry capabilities */ 551 /* set up multi-rate retry capabilities */
550 if (sc->ah->ah_version == AR5K_AR5212) { 552 if (sc->ah->ah_version == AR5K_AR5212) {
551 hw->max_altrates = 3; 553 hw->max_rates = 4;
552 hw->max_altrate_tries = 11; 554 hw->max_rate_tries = 11;
553 } 555 }
554 556
555 /* Finish private driver data initialization */ 557 /* Finish private driver data initialization */
@@ -711,7 +713,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
711{ 713{
712 struct ath5k_softc *sc = hw->priv; 714 struct ath5k_softc *sc = hw->priv;
713 struct ath5k_hw *ah = sc->ah; 715 struct ath5k_hw *ah = sc->ah;
714 u8 mac[ETH_ALEN]; 716 u8 mac[ETH_ALEN] = {};
715 int ret; 717 int ret;
716 718
717 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device); 719 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
@@ -781,7 +783,13 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
781 tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc); 783 tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc);
782 setup_timer(&sc->calib_tim, ath5k_calibrate, (unsigned long)sc); 784 setup_timer(&sc->calib_tim, ath5k_calibrate, (unsigned long)sc);
783 785
784 ath5k_hw_get_lladdr(ah, mac); 786 ret = ath5k_eeprom_read_mac(ah, mac);
787 if (ret) {
788 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n",
789 sc->pdev->device);
790 goto err_queues;
791 }
792
785 SET_IEEE80211_PERM_ADDR(hw, mac); 793 SET_IEEE80211_PERM_ADDR(hw, mac);
786 /* All MAC address bits matter for ACKs */ 794 /* All MAC address bits matter for ACKs */
787 memset(sc->bssidmask, 0xff, ETH_ALEN); 795 memset(sc->bssidmask, 0xff, ETH_ALEN);
@@ -1188,7 +1196,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1188 ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL, 1196 ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
1189 (sc->power_level * 2), 1197 (sc->power_level * 2),
1190 ieee80211_get_tx_rate(sc->hw, info)->hw_value, 1198 ieee80211_get_tx_rate(sc->hw, info)->hw_value,
1191 info->control.retry_limit, keyidx, 0, flags, 0, 0); 1199 info->control.rates[0].count, keyidx, 0, flags, 0, 0);
1192 if (ret) 1200 if (ret)
1193 goto err_unmap; 1201 goto err_unmap;
1194 1202
@@ -1200,7 +1208,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1200 break; 1208 break;
1201 1209
1202 mrr_rate[i] = rate->hw_value; 1210 mrr_rate[i] = rate->hw_value;
1203 mrr_tries[i] = info->control.retries[i].limit; 1211 mrr_tries[i] = info->control.rates[i + 1].count;
1204 } 1212 }
1205 1213
1206 ah->ah_setup_mrr_tx_desc(ah, ds, 1214 ah->ah_setup_mrr_tx_desc(ah, ds,
@@ -1660,7 +1668,7 @@ ath5k_tasklet_rx(unsigned long data)
1660 struct ath5k_desc *ds; 1668 struct ath5k_desc *ds;
1661 int ret; 1669 int ret;
1662 int hdrlen; 1670 int hdrlen;
1663 int pad; 1671 int padsize;
1664 1672
1665 spin_lock(&sc->rxbuflock); 1673 spin_lock(&sc->rxbuflock);
1666 if (list_empty(&sc->rxbuf)) { 1674 if (list_empty(&sc->rxbuf)) {
@@ -1745,16 +1753,19 @@ accept:
1745 1753
1746 skb_put(skb, rs.rs_datalen); 1754 skb_put(skb, rs.rs_datalen);
1747 1755
1748 /* 1756 /* The MAC header is padded to have 32-bit boundary if the
1749 * the hardware adds a padding to 4 byte boundaries between 1757 * packet payload is non-zero. The general calculation for
1750 * the header and the payload data if the header length is 1758 * padsize would take into account odd header lengths:
1751 * not multiples of 4 - remove it 1759 * padsize = (4 - hdrlen % 4) % 4; However, since only
1752 */ 1760 * even-length headers are used, padding can only be 0 or 2
1761 * bytes and we can optimize this a bit. In addition, we must
1762 * not try to remove padding from short control frames that do
1763 * not have payload. */
1753 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1764 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1754 if (hdrlen & 3) { 1765 padsize = ath5k_pad_size(hdrlen);
1755 pad = hdrlen % 4; 1766 if (padsize) {
1756 memmove(skb->data + pad, skb->data, hdrlen); 1767 memmove(skb->data + padsize, skb->data, hdrlen);
1757 skb_pull(skb, pad); 1768 skb_pull(skb, padsize);
1758 } 1769 }
1759 1770
1760 /* 1771 /*
@@ -1785,7 +1796,17 @@ accept:
1785 1796
1786 rxs.noise = sc->ah->ah_noise_floor; 1797 rxs.noise = sc->ah->ah_noise_floor;
1787 rxs.signal = rxs.noise + rs.rs_rssi; 1798 rxs.signal = rxs.noise + rs.rs_rssi;
1788 rxs.qual = rs.rs_rssi * 100 / 64; 1799
1800 /* An rssi of 35 indicates you should be able use
1801 * 54 Mbps reliably. A more elaborate scheme can be used
1802 * here but it requires a map of SNR/throughput for each
1803 * possible mode used */
1804 rxs.qual = rs.rs_rssi * 100 / 35;
1805
1806 /* rssi can be more than 35 though, anything above that
1807 * should be considered at 100% */
1808 if (rxs.qual > 100)
1809 rxs.qual = 100;
1789 1810
1790 rxs.antenna = rs.rs_antenna; 1811 rxs.antenna = rs.rs_antenna;
1791 rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); 1812 rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
@@ -1846,30 +1867,26 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1846 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, 1867 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len,
1847 PCI_DMA_TODEVICE); 1868 PCI_DMA_TODEVICE);
1848 1869
1849 memset(&info->status, 0, sizeof(info->status)); 1870 ieee80211_tx_info_clear_status(info);
1850 info->tx_rate_idx = ath5k_hw_to_driver_rix(sc,
1851 ts.ts_rate[ts.ts_final_idx]);
1852 info->status.retry_count = ts.ts_longretry;
1853
1854 for (i = 0; i < 4; i++) { 1871 for (i = 0; i < 4; i++) {
1855 struct ieee80211_tx_altrate *r = 1872 struct ieee80211_tx_rate *r =
1856 &info->status.retries[i]; 1873 &info->status.rates[i];
1857 1874
1858 if (ts.ts_rate[i]) { 1875 if (ts.ts_rate[i]) {
1859 r->rate_idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]); 1876 r->idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]);
1860 r->limit = ts.ts_retry[i]; 1877 r->count = ts.ts_retry[i];
1861 } else { 1878 } else {
1862 r->rate_idx = -1; 1879 r->idx = -1;
1863 r->limit = 0; 1880 r->count = 0;
1864 } 1881 }
1865 } 1882 }
1866 1883
1867 info->status.excessive_retries = 0; 1884 /* count the successful attempt as well */
1885 info->status.rates[ts.ts_final_idx].count++;
1886
1868 if (unlikely(ts.ts_status)) { 1887 if (unlikely(ts.ts_status)) {
1869 sc->ll_stats.dot11ACKFailureCount++; 1888 sc->ll_stats.dot11ACKFailureCount++;
1870 if (ts.ts_status & AR5K_TXERR_XRETRY) 1889 if (ts.ts_status & AR5K_TXERR_FILT)
1871 info->status.excessive_retries = 1;
1872 else if (ts.ts_status & AR5K_TXERR_FILT)
1873 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1890 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1874 } else { 1891 } else {
1875 info->flags |= IEEE80211_TX_STAT_ACK; 1892 info->flags |= IEEE80211_TX_STAT_ACK;
@@ -2143,8 +2160,6 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2143 * 2160 *
2144 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA 2161 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2145 * interrupts to detect TSF updates only. 2162 * interrupts to detect TSF updates only.
2146 *
2147 * AP mode is missing.
2148 */ 2163 */
2149static void 2164static void
2150ath5k_beacon_config(struct ath5k_softc *sc) 2165ath5k_beacon_config(struct ath5k_softc *sc)
@@ -2157,7 +2172,9 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2157 2172
2158 if (sc->opmode == NL80211_IFTYPE_STATION) { 2173 if (sc->opmode == NL80211_IFTYPE_STATION) {
2159 sc->imask |= AR5K_INT_BMISS; 2174 sc->imask |= AR5K_INT_BMISS;
2160 } else if (sc->opmode == NL80211_IFTYPE_ADHOC) { 2175 } else if (sc->opmode == NL80211_IFTYPE_ADHOC ||
2176 sc->opmode == NL80211_IFTYPE_MESH_POINT ||
2177 sc->opmode == NL80211_IFTYPE_AP) {
2161 /* 2178 /*
2162 * In IBSS mode we use a self-linked tx descriptor and let the 2179 * In IBSS mode we use a self-linked tx descriptor and let the
2163 * hardware send the beacons automatically. We have to load it 2180 * hardware send the beacons automatically. We have to load it
@@ -2169,13 +2186,15 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2169 2186
2170 sc->imask |= AR5K_INT_SWBA; 2187 sc->imask |= AR5K_INT_SWBA;
2171 2188
2172 if (ath5k_hw_hasveol(ah)) { 2189 if (sc->opmode == NL80211_IFTYPE_ADHOC) {
2173 spin_lock(&sc->block); 2190 if (ath5k_hw_hasveol(ah)) {
2174 ath5k_beacon_send(sc); 2191 spin_lock(&sc->block);
2175 spin_unlock(&sc->block); 2192 ath5k_beacon_send(sc);
2176 } 2193 spin_unlock(&sc->block);
2194 }
2195 } else
2196 ath5k_beacon_update_timers(sc, -1);
2177 } 2197 }
2178 /* TODO else AP */
2179 2198
2180 ath5k_hw_set_imr(ah, sc->imask); 2199 ath5k_hw_set_imr(ah, sc->imask);
2181} 2200}
@@ -2215,9 +2234,9 @@ ath5k_init(struct ath5k_softc *sc, bool is_resume)
2215 */ 2234 */
2216 sc->curchan = sc->hw->conf.channel; 2235 sc->curchan = sc->hw->conf.channel;
2217 sc->curband = &sc->sbands[sc->curchan->band]; 2236 sc->curband = &sc->sbands[sc->curchan->band];
2218 sc->imask = AR5K_INT_RX | AR5K_INT_TX | AR5K_INT_RXEOL | 2237 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2219 AR5K_INT_RXORN | AR5K_INT_FATAL | AR5K_INT_GLOBAL | 2238 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2220 AR5K_INT_MIB; 2239 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2221 ret = ath5k_reset(sc, false, false); 2240 ret = ath5k_reset(sc, false, false);
2222 if (ret) 2241 if (ret)
2223 goto done; 2242 goto done;
@@ -2409,9 +2428,10 @@ ath5k_intr(int irq, void *dev_id)
2409 /* bump tx trigger level */ 2428 /* bump tx trigger level */
2410 ath5k_hw_update_tx_triglevel(ah, true); 2429 ath5k_hw_update_tx_triglevel(ah, true);
2411 } 2430 }
2412 if (status & AR5K_INT_RX) 2431 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
2413 tasklet_schedule(&sc->rxtq); 2432 tasklet_schedule(&sc->rxtq);
2414 if (status & AR5K_INT_TX) 2433 if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
2434 | AR5K_INT_TXERR | AR5K_INT_TXEOL))
2415 tasklet_schedule(&sc->txtq); 2435 tasklet_schedule(&sc->txtq);
2416 if (status & AR5K_INT_BMISS) { 2436 if (status & AR5K_INT_BMISS) {
2417 } 2437 }
@@ -2527,8 +2547,7 @@ ath5k_register_led(struct ath5k_softc *sc, struct ath5k_led *led,
2527 led->led_dev.brightness_set = ath5k_led_brightness_set; 2547 led->led_dev.brightness_set = ath5k_led_brightness_set;
2528 2548
2529 err = led_classdev_register(&sc->pdev->dev, &led->led_dev); 2549 err = led_classdev_register(&sc->pdev->dev, &led->led_dev);
2530 if (err) 2550 if (err) {
2531 {
2532 ATH5K_WARN(sc, "could not register LED %s\n", name); 2551 ATH5K_WARN(sc, "could not register LED %s\n", name);
2533 led->sc = NULL; 2552 led->sc = NULL;
2534 } 2553 }
@@ -2607,7 +2626,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2607 struct ath5k_buf *bf; 2626 struct ath5k_buf *bf;
2608 unsigned long flags; 2627 unsigned long flags;
2609 int hdrlen; 2628 int hdrlen;
2610 int pad; 2629 int padsize;
2611 2630
2612 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 2631 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
2613 2632
@@ -2619,15 +2638,16 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2619 * if this is not the case we add the padding after the header 2638 * if this is not the case we add the padding after the header
2620 */ 2639 */
2621 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 2640 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2622 if (hdrlen & 3) { 2641 padsize = ath5k_pad_size(hdrlen);
2623 pad = hdrlen % 4; 2642 if (padsize) {
2624 if (skb_headroom(skb) < pad) { 2643
2644 if (skb_headroom(skb) < padsize) {
2625 ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough" 2645 ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
2626 " headroom to pad %d\n", hdrlen, pad); 2646 " headroom to pad %d\n", hdrlen, padsize);
2627 return -1; 2647 return -1;
2628 } 2648 }
2629 skb_push(skb, pad); 2649 skb_push(skb, padsize);
2630 memmove(skb->data, skb->data+pad, hdrlen); 2650 memmove(skb->data, skb->data+padsize, hdrlen);
2631 } 2651 }
2632 2652
2633 spin_lock_irqsave(&sc->txbuflock, flags); 2653 spin_lock_irqsave(&sc->txbuflock, flags);
@@ -2746,8 +2766,10 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2746 sc->vif = conf->vif; 2766 sc->vif = conf->vif;
2747 2767
2748 switch (conf->type) { 2768 switch (conf->type) {
2769 case NL80211_IFTYPE_AP:
2749 case NL80211_IFTYPE_STATION: 2770 case NL80211_IFTYPE_STATION:
2750 case NL80211_IFTYPE_ADHOC: 2771 case NL80211_IFTYPE_ADHOC:
2772 case NL80211_IFTYPE_MESH_POINT:
2751 case NL80211_IFTYPE_MONITOR: 2773 case NL80211_IFTYPE_MONITOR:
2752 sc->opmode = conf->type; 2774 sc->opmode = conf->type;
2753 break; 2775 break;
@@ -2759,6 +2781,7 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2759 /* Set to a reasonable value. Note that this will 2781 /* Set to a reasonable value. Note that this will
2760 * be set to mac80211's value at ath5k_config(). */ 2782 * be set to mac80211's value at ath5k_config(). */
2761 sc->bintval = 1000; 2783 sc->bintval = 1000;
2784 ath5k_hw_set_lladdr(sc->ah, conf->mac_addr);
2762 2785
2763 ret = 0; 2786 ret = 0;
2764end: 2787end:
@@ -2771,11 +2794,13 @@ ath5k_remove_interface(struct ieee80211_hw *hw,
2771 struct ieee80211_if_init_conf *conf) 2794 struct ieee80211_if_init_conf *conf)
2772{ 2795{
2773 struct ath5k_softc *sc = hw->priv; 2796 struct ath5k_softc *sc = hw->priv;
2797 u8 mac[ETH_ALEN] = {};
2774 2798
2775 mutex_lock(&sc->lock); 2799 mutex_lock(&sc->lock);
2776 if (sc->vif != conf->vif) 2800 if (sc->vif != conf->vif)
2777 goto end; 2801 goto end;
2778 2802
2803 ath5k_hw_set_lladdr(sc->ah, mac);
2779 sc->vif = NULL; 2804 sc->vif = NULL;
2780end: 2805end:
2781 mutex_unlock(&sc->lock); 2806 mutex_unlock(&sc->lock);
@@ -2785,10 +2810,10 @@ end:
2785 * TODO: Phy disable/diversity etc 2810 * TODO: Phy disable/diversity etc
2786 */ 2811 */
2787static int 2812static int
2788ath5k_config(struct ieee80211_hw *hw, 2813ath5k_config(struct ieee80211_hw *hw, u32 changed)
2789 struct ieee80211_conf *conf)
2790{ 2814{
2791 struct ath5k_softc *sc = hw->priv; 2815 struct ath5k_softc *sc = hw->priv;
2816 struct ieee80211_conf *conf = &hw->conf;
2792 2817
2793 sc->bintval = conf->beacon_int; 2818 sc->bintval = conf->beacon_int;
2794 sc->power_level = conf->power_level; 2819 sc->power_level = conf->power_level;
@@ -2809,7 +2834,7 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2809 ret = -EIO; 2834 ret = -EIO;
2810 goto unlock; 2835 goto unlock;
2811 } 2836 }
2812 if (conf->bssid) { 2837 if (conf->changed & IEEE80211_IFCC_BSSID && conf->bssid) {
2813 /* Cache for later use during resets */ 2838 /* Cache for later use during resets */
2814 memcpy(ah->ah_bssid, conf->bssid, ETH_ALEN); 2839 memcpy(ah->ah_bssid, conf->bssid, ETH_ALEN);
2815 /* XXX: assoc id is set to 0 for now, mac80211 doesn't have 2840 /* XXX: assoc id is set to 0 for now, mac80211 doesn't have
@@ -2817,18 +2842,17 @@ ath5k_config_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2817 ath5k_hw_set_associd(ah, ah->ah_bssid, 0); 2842 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
2818 mmiowb(); 2843 mmiowb();
2819 } 2844 }
2820
2821 if (conf->changed & IEEE80211_IFCC_BEACON && 2845 if (conf->changed & IEEE80211_IFCC_BEACON &&
2822 vif->type == NL80211_IFTYPE_ADHOC) { 2846 (vif->type == NL80211_IFTYPE_ADHOC ||
2847 vif->type == NL80211_IFTYPE_MESH_POINT ||
2848 vif->type == NL80211_IFTYPE_AP)) {
2823 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 2849 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
2824 if (!beacon) { 2850 if (!beacon) {
2825 ret = -ENOMEM; 2851 ret = -ENOMEM;
2826 goto unlock; 2852 goto unlock;
2827 } 2853 }
2828 /* call old handler for now */ 2854 ath5k_beacon_update(sc, beacon);
2829 ath5k_beacon_update(hw, beacon);
2830 } 2855 }
2831
2832 mutex_unlock(&sc->lock); 2856 mutex_unlock(&sc->lock);
2833 2857
2834 return ath5k_reset_wake(sc); 2858 return ath5k_reset_wake(sc);
@@ -2888,9 +2912,9 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2888 if (*new_flags & FIF_PROMISC_IN_BSS) { 2912 if (*new_flags & FIF_PROMISC_IN_BSS) {
2889 rfilt |= AR5K_RX_FILTER_PROM; 2913 rfilt |= AR5K_RX_FILTER_PROM;
2890 __set_bit(ATH_STAT_PROMISC, sc->status); 2914 __set_bit(ATH_STAT_PROMISC, sc->status);
2891 } 2915 } else {
2892 else
2893 __clear_bit(ATH_STAT_PROMISC, sc->status); 2916 __clear_bit(ATH_STAT_PROMISC, sc->status);
2917 }
2894 } 2918 }
2895 2919
2896 /* Note, AR5K_RX_FILTER_MCAST is already enabled */ 2920 /* Note, AR5K_RX_FILTER_MCAST is already enabled */
@@ -2948,12 +2972,15 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
2948 test_bit(ATH_STAT_PROMISC, sc->status)) 2972 test_bit(ATH_STAT_PROMISC, sc->status))
2949 rfilt |= AR5K_RX_FILTER_PROM; 2973 rfilt |= AR5K_RX_FILTER_PROM;
2950 if ((sc->opmode == NL80211_IFTYPE_STATION && sc->assoc) || 2974 if ((sc->opmode == NL80211_IFTYPE_STATION && sc->assoc) ||
2951 sc->opmode == NL80211_IFTYPE_ADHOC) { 2975 sc->opmode == NL80211_IFTYPE_ADHOC ||
2976 sc->opmode == NL80211_IFTYPE_AP)
2952 rfilt |= AR5K_RX_FILTER_BEACON; 2977 rfilt |= AR5K_RX_FILTER_BEACON;
2953 } 2978 if (sc->opmode == NL80211_IFTYPE_MESH_POINT)
2979 rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON |
2980 AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM;
2954 2981
2955 /* Set filters */ 2982 /* Set filters */
2956 ath5k_hw_set_rx_filter(ah,rfilt); 2983 ath5k_hw_set_rx_filter(ah, rfilt);
2957 2984
2958 /* Set multicast bits */ 2985 /* Set multicast bits */
2959 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]); 2986 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
@@ -2970,12 +2997,13 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2970 struct ath5k_softc *sc = hw->priv; 2997 struct ath5k_softc *sc = hw->priv;
2971 int ret = 0; 2998 int ret = 0;
2972 2999
2973 switch(key->alg) { 3000 if (modparam_nohwcrypt)
3001 return -EOPNOTSUPP;
3002
3003 switch (key->alg) {
2974 case ALG_WEP: 3004 case ALG_WEP:
2975 /* XXX: fix hardware encryption, its not working. For now
2976 * allow software encryption */
2977 /* break; */
2978 case ALG_TKIP: 3005 case ALG_TKIP:
3006 break;
2979 case ALG_CCMP: 3007 case ALG_CCMP:
2980 return -EOPNOTSUPP; 3008 return -EOPNOTSUPP;
2981 default: 3009 default:
@@ -2994,6 +3022,8 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2994 } 3022 }
2995 __set_bit(key->keyidx, sc->keymap); 3023 __set_bit(key->keyidx, sc->keymap);
2996 key->hw_key_idx = key->keyidx; 3024 key->hw_key_idx = key->keyidx;
3025 key->flags |= (IEEE80211_KEY_FLAG_GENERATE_IV |
3026 IEEE80211_KEY_FLAG_GENERATE_MMIC);
2997 break; 3027 break;
2998 case DISABLE_KEY: 3028 case DISABLE_KEY:
2999 ath5k_hw_reset_key(sc->ah, key->keyidx); 3029 ath5k_hw_reset_key(sc->ah, key->keyidx);
@@ -3060,19 +3090,13 @@ ath5k_reset_tsf(struct ieee80211_hw *hw)
3060} 3090}
3061 3091
3062static int 3092static int
3063ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) 3093ath5k_beacon_update(struct ath5k_softc *sc, struct sk_buff *skb)
3064{ 3094{
3065 struct ath5k_softc *sc = hw->priv;
3066 unsigned long flags; 3095 unsigned long flags;
3067 int ret; 3096 int ret;
3068 3097
3069 ath5k_debug_dump_skb(sc, skb, "BC ", 1); 3098 ath5k_debug_dump_skb(sc, skb, "BC ", 1);
3070 3099
3071 if (sc->opmode != NL80211_IFTYPE_ADHOC) {
3072 ret = -EIO;
3073 goto end;
3074 }
3075
3076 spin_lock_irqsave(&sc->block, flags); 3100 spin_lock_irqsave(&sc->block, flags);
3077 ath5k_txbuf_free(sc, sc->bbuf); 3101 ath5k_txbuf_free(sc, sc->bbuf);
3078 sc->bbuf->skb = skb; 3102 sc->bbuf->skb = skb;
@@ -3085,7 +3109,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3085 mmiowb(); 3109 mmiowb();
3086 } 3110 }
3087 3111
3088end:
3089 return ret; 3112 return ret;
3090} 3113}
3091static void 3114static void
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c
index 5e362a7a3620..b40a9287a39a 100644
--- a/drivers/net/wireless/ath5k/desc.c
+++ b/drivers/net/wireless/ath5k/desc.c
@@ -71,7 +71,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
71 /* Verify and set frame length */ 71 /* Verify and set frame length */
72 72
73 /* remove padding we might have added before */ 73 /* remove padding we might have added before */
74 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN; 74 frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN;
75 75
76 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN) 76 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
77 return -EINVAL; 77 return -EINVAL;
@@ -202,7 +202,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
202 /* Verify and set frame length */ 202 /* Verify and set frame length */
203 203
204 /* remove padding we might have added before */ 204 /* remove padding we might have added before */
205 frame_len = pkt_len - (hdr_len & 3) + FCS_LEN; 205 frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN;
206 206
207 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN) 207 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
208 return -EINVAL; 208 return -EINVAL;
diff --git a/drivers/net/wireless/ath5k/dma.c b/drivers/net/wireless/ath5k/dma.c
index 7adceb2c7fab..7e2b1a67e5da 100644
--- a/drivers/net/wireless/ath5k/dma.c
+++ b/drivers/net/wireless/ath5k/dma.c
@@ -472,9 +472,6 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
472 * 472 *
473 * NOTE: We use read-and-clear register, so after this function is called ISR 473 * NOTE: We use read-and-clear register, so after this function is called ISR
474 * is zeroed. 474 * is zeroed.
475 *
476 * XXX: Why filter interrupts in sw with interrupt_mask ? No benefit at all
477 * plus it can be misleading (one might thing that we save interrupts this way)
478 */ 475 */
479int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) 476int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
480{ 477{
@@ -494,11 +491,16 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
494 } 491 }
495 } else { 492 } else {
496 /* 493 /*
497 * Read interrupt status from the Read-And-Clear 494 * Read interrupt status from Interrupt
498 * shadow register. 495 * Status Register shadow copy (Read And Clear)
496 *
499 * Note: PISR/SISR Not available on 5210 497 * Note: PISR/SISR Not available on 5210
500 */ 498 */
501 data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR); 499 data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
500 if (unlikely(data == AR5K_INT_NOCARD)) {
501 *interrupt_mask = data;
502 return -ENODEV;
503 }
502 } 504 }
503 505
504 /* 506 /*
@@ -506,17 +508,9 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
506 */ 508 */
507 *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr; 509 *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
508 510
509 if (unlikely(data == AR5K_INT_NOCARD))
510 return -ENODEV;
511
512 if (data & (AR5K_ISR_RXOK | AR5K_ISR_RXERR))
513 *interrupt_mask |= AR5K_INT_RX;
514
515 if (data & (AR5K_ISR_TXOK | AR5K_ISR_TXERR
516 | AR5K_ISR_TXDESC | AR5K_ISR_TXEOL))
517 *interrupt_mask |= AR5K_INT_TX;
518
519 if (ah->ah_version != AR5K_AR5210) { 511 if (ah->ah_version != AR5K_AR5210) {
512 u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
513
520 /*HIU = Host Interface Unit (PCI etc)*/ 514 /*HIU = Host Interface Unit (PCI etc)*/
521 if (unlikely(data & (AR5K_ISR_HIUERR))) 515 if (unlikely(data & (AR5K_ISR_HIUERR)))
522 *interrupt_mask |= AR5K_INT_FATAL; 516 *interrupt_mask |= AR5K_INT_FATAL;
@@ -524,24 +518,93 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
524 /*Beacon Not Ready*/ 518 /*Beacon Not Ready*/
525 if (unlikely(data & (AR5K_ISR_BNR))) 519 if (unlikely(data & (AR5K_ISR_BNR)))
526 *interrupt_mask |= AR5K_INT_BNR; 520 *interrupt_mask |= AR5K_INT_BNR;
527 }
528 521
529 /* 522 if (unlikely(sisr2 & (AR5K_SISR2_SSERR |
530 * XXX: BMISS interrupts may occur after association. 523 AR5K_SISR2_DPERR |
531 * I found this on 5210 code but it needs testing. If this is 524 AR5K_SISR2_MCABT)))
532 * true we should disable them before assoc and re-enable them 525 *interrupt_mask |= AR5K_INT_FATAL;
533 * after a successfull assoc + some jiffies. 526
534 */ 527 if (data & AR5K_ISR_TIM)
535#if 0 528 *interrupt_mask |= AR5K_INT_TIM;
536 interrupt_mask &= ~AR5K_INT_BMISS; 529
537#endif 530 if (data & AR5K_ISR_BCNMISC) {
531 if (sisr2 & AR5K_SISR2_TIM)
532 *interrupt_mask |= AR5K_INT_TIM;
533 if (sisr2 & AR5K_SISR2_DTIM)
534 *interrupt_mask |= AR5K_INT_DTIM;
535 if (sisr2 & AR5K_SISR2_DTIM_SYNC)
536 *interrupt_mask |= AR5K_INT_DTIM_SYNC;
537 if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
538 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
539 if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
540 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
541 }
542
543 if (data & AR5K_ISR_RXDOPPLER)
544 *interrupt_mask |= AR5K_INT_RX_DOPPLER;
545 if (data & AR5K_ISR_QCBRORN) {
546 *interrupt_mask |= AR5K_INT_QCBRORN;
547 ah->ah_txq_isr |= AR5K_REG_MS(
548 ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
549 AR5K_SISR3_QCBRORN);
550 }
551 if (data & AR5K_ISR_QCBRURN) {
552 *interrupt_mask |= AR5K_INT_QCBRURN;
553 ah->ah_txq_isr |= AR5K_REG_MS(
554 ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
555 AR5K_SISR3_QCBRURN);
556 }
557 if (data & AR5K_ISR_QTRIG) {
558 *interrupt_mask |= AR5K_INT_QTRIG;
559 ah->ah_txq_isr |= AR5K_REG_MS(
560 ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
561 AR5K_SISR4_QTRIG);
562 }
563
564 if (data & AR5K_ISR_TXOK)
565 ah->ah_txq_isr |= AR5K_REG_MS(
566 ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
567 AR5K_SISR0_QCU_TXOK);
568
569 if (data & AR5K_ISR_TXDESC)
570 ah->ah_txq_isr |= AR5K_REG_MS(
571 ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
572 AR5K_SISR0_QCU_TXDESC);
573
574 if (data & AR5K_ISR_TXERR)
575 ah->ah_txq_isr |= AR5K_REG_MS(
576 ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
577 AR5K_SISR1_QCU_TXERR);
578
579 if (data & AR5K_ISR_TXEOL)
580 ah->ah_txq_isr |= AR5K_REG_MS(
581 ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
582 AR5K_SISR1_QCU_TXEOL);
583
584 if (data & AR5K_ISR_TXURN)
585 ah->ah_txq_isr |= AR5K_REG_MS(
586 ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
587 AR5K_SISR2_QCU_TXURN);
588 } else {
589 if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
590 | AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
591 *interrupt_mask |= AR5K_INT_FATAL;
592
593 /*
594 * XXX: BMISS interrupts may occur after association.
595 * I found this on 5210 code but it needs testing. If this is
596 * true we should disable them before assoc and re-enable them
597 * after a successfull assoc + some jiffies.
598 interrupt_mask &= ~AR5K_INT_BMISS;
599 */
600 }
538 601
539 /* 602 /*
540 * In case we didn't handle anything, 603 * In case we didn't handle anything,
541 * print the register value. 604 * print the register value.
542 */ 605 */
543 if (unlikely(*interrupt_mask == 0 && net_ratelimit())) 606 if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
544 ATH5K_PRINTF("0x%08x\n", data); 607 ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr);
545 608
546 return 0; 609 return 0;
547} 610}
@@ -560,14 +623,17 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
560{ 623{
561 enum ath5k_int old_mask, int_mask; 624 enum ath5k_int old_mask, int_mask;
562 625
626 old_mask = ah->ah_imr;
627
563 /* 628 /*
564 * Disable card interrupts to prevent any race conditions 629 * Disable card interrupts to prevent any race conditions
565 * (they will be re-enabled afterwards). 630 * (they will be re-enabled afterwards if AR5K_INT GLOBAL
631 * is set again on the new mask).
566 */ 632 */
567 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); 633 if (old_mask & AR5K_INT_GLOBAL) {
568 ath5k_hw_reg_read(ah, AR5K_IER); 634 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
569 635 ath5k_hw_reg_read(ah, AR5K_IER);
570 old_mask = ah->ah_imr; 636 }
571 637
572 /* 638 /*
573 * Add additional, chipset-dependent interrupt mask flags 639 * Add additional, chipset-dependent interrupt mask flags
@@ -575,30 +641,64 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
575 */ 641 */
576 int_mask = new_mask & AR5K_INT_COMMON; 642 int_mask = new_mask & AR5K_INT_COMMON;
577 643
578 if (new_mask & AR5K_INT_RX)
579 int_mask |= AR5K_IMR_RXOK | AR5K_IMR_RXERR | AR5K_IMR_RXORN |
580 AR5K_IMR_RXDESC;
581
582 if (new_mask & AR5K_INT_TX)
583 int_mask |= AR5K_IMR_TXOK | AR5K_IMR_TXERR | AR5K_IMR_TXDESC |
584 AR5K_IMR_TXURN;
585
586 if (ah->ah_version != AR5K_AR5210) { 644 if (ah->ah_version != AR5K_AR5210) {
645 /* Preserve per queue TXURN interrupt mask */
646 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
647 & AR5K_SIMR2_QCU_TXURN;
648
587 if (new_mask & AR5K_INT_FATAL) { 649 if (new_mask & AR5K_INT_FATAL) {
588 int_mask |= AR5K_IMR_HIUERR; 650 int_mask |= AR5K_IMR_HIUERR;
589 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_MCABT | 651 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
590 AR5K_SIMR2_SSERR | AR5K_SIMR2_DPERR); 652 | AR5K_SIMR2_DPERR);
591 } 653 }
654
655 /*Beacon Not Ready*/
656 if (new_mask & AR5K_INT_BNR)
657 int_mask |= AR5K_INT_BNR;
658
659 if (new_mask & AR5K_INT_TIM)
660 int_mask |= AR5K_IMR_TIM;
661
662 if (new_mask & AR5K_INT_TIM)
663 simr2 |= AR5K_SISR2_TIM;
664 if (new_mask & AR5K_INT_DTIM)
665 simr2 |= AR5K_SISR2_DTIM;
666 if (new_mask & AR5K_INT_DTIM_SYNC)
667 simr2 |= AR5K_SISR2_DTIM_SYNC;
668 if (new_mask & AR5K_INT_BCN_TIMEOUT)
669 simr2 |= AR5K_SISR2_BCN_TIMEOUT;
670 if (new_mask & AR5K_INT_CAB_TIMEOUT)
671 simr2 |= AR5K_SISR2_CAB_TIMEOUT;
672
673 if (new_mask & AR5K_INT_RX_DOPPLER)
674 int_mask |= AR5K_IMR_RXDOPPLER;
675
676 /* Note: Per queue interrupt masks
677 * are set via reset_tx_queue (qcu.c) */
678 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
679 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
680
681 } else {
682 if (new_mask & AR5K_INT_FATAL)
683 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
684 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
685
686 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
592 } 687 }
593 688
594 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR); 689 /* If RXNOFRM interrupt is masked disable it
690 * by setting AR5K_RXNOFRM to zero */
691 if (!(new_mask & AR5K_INT_RXNOFRM))
692 ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
595 693
596 /* Store new interrupt mask */ 694 /* Store new interrupt mask */
597 ah->ah_imr = new_mask; 695 ah->ah_imr = new_mask;
598 696
599 /* ..re-enable interrupts */ 697 /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */
600 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); 698 if (new_mask & AR5K_INT_GLOBAL) {
601 ath5k_hw_reg_read(ah, AR5K_IER); 699 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
700 ath5k_hw_reg_read(ah, AR5K_IER);
701 }
602 702
603 return old_mask; 703 return old_mask;
604} 704}
diff --git a/drivers/net/wireless/ath5k/eeprom.c b/drivers/net/wireless/ath5k/eeprom.c
index a883839b6a9f..1cb7edfae625 100644
--- a/drivers/net/wireless/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath5k/eeprom.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> 3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2008 Felix Fietkau <nbd@openwrt.org>
4 * 5 *
5 * Permission to use, copy, modify, and distribute this software for any 6 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above 7 * purpose with or without fee is hereby granted, provided that the above
@@ -63,8 +64,8 @@ static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
63/* 64/*
64 * Translate binary channel representation in EEPROM to frequency 65 * Translate binary channel representation in EEPROM to frequency
65 */ 66 */
66static u16 ath5k_eeprom_bin2freq(struct ath5k_hw *ah, u16 bin, 67static u16 ath5k_eeprom_bin2freq(struct ath5k_eeprom_info *ee, u16 bin,
67 unsigned int mode) 68 unsigned int mode)
68{ 69{
69 u16 val; 70 u16 val;
70 71
@@ -72,13 +73,13 @@ static u16 ath5k_eeprom_bin2freq(struct ath5k_hw *ah, u16 bin,
72 return bin; 73 return bin;
73 74
74 if (mode == AR5K_EEPROM_MODE_11A) { 75 if (mode == AR5K_EEPROM_MODE_11A) {
75 if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2) 76 if (ee->ee_version > AR5K_EEPROM_VERSION_3_2)
76 val = (5 * bin) + 4800; 77 val = (5 * bin) + 4800;
77 else 78 else
78 val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 : 79 val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 :
79 (bin * 10) + 5100; 80 (bin * 10) + 5100;
80 } else { 81 } else {
81 if (ah->ah_ee_version > AR5K_EEPROM_VERSION_3_2) 82 if (ee->ee_version > AR5K_EEPROM_VERSION_3_2)
82 val = bin + 2300; 83 val = bin + 2300;
83 else 84 else
84 val = bin + 2400; 85 val = bin + 2400;
@@ -88,6 +89,71 @@ static u16 ath5k_eeprom_bin2freq(struct ath5k_hw *ah, u16 bin,
88} 89}
89 90
90/* 91/*
92 * Initialize eeprom & capabilities structs
93 */
94static int
95ath5k_eeprom_init_header(struct ath5k_hw *ah)
96{
97 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
98 int ret;
99 u16 val;
100
101 /* Initial TX thermal adjustment values */
102 ee->ee_tx_clip = 4;
103 ee->ee_pwd_84 = ee->ee_pwd_90 = 1;
104 ee->ee_gain_select = 1;
105
106 /*
107 * Read values from EEPROM and store them in the capability structure
108 */
109 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic);
110 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect);
111 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain);
112 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version);
113 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header);
114
115 /* Return if we have an old EEPROM */
116 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0)
117 return 0;
118
119#ifdef notyet
120 /*
121 * Validate the checksum of the EEPROM date. There are some
122 * devices with invalid EEPROMs.
123 */
124 for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
125 AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
126 cksum ^= val;
127 }
128 if (cksum != AR5K_EEPROM_INFO_CKSUM) {
129 ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
130 return -EIO;
131 }
132#endif
133
134 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version),
135 ee_ant_gain);
136
137 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
138 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0);
139 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1);
140 }
141
142 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) {
143 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val);
144 ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7;
145 ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7;
146
147 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val);
148 ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7;
149 ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7;
150 }
151
152 return 0;
153}
154
155
156/*
91 * Read antenna infos from eeprom 157 * Read antenna infos from eeprom
92 */ 158 */
93static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset, 159static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
@@ -100,7 +166,7 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
100 166
101 AR5K_EEPROM_READ(o++, val); 167 AR5K_EEPROM_READ(o++, val);
102 ee->ee_switch_settling[mode] = (val >> 8) & 0x7f; 168 ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
103 ee->ee_ant_tx_rx[mode] = (val >> 2) & 0x3f; 169 ee->ee_atn_tx_rx[mode] = (val >> 2) & 0x3f;
104 ee->ee_ant_control[mode][i] = (val << 4) & 0x3f; 170 ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
105 171
106 AR5K_EEPROM_READ(o++, val); 172 AR5K_EEPROM_READ(o++, val);
@@ -157,6 +223,30 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
157 u16 val; 223 u16 val;
158 int ret; 224 int ret;
159 225
226 ee->ee_n_piers[mode] = 0;
227 AR5K_EEPROM_READ(o++, val);
228 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
229 switch(mode) {
230 case AR5K_EEPROM_MODE_11A:
231 ee->ee_ob[mode][3] = (val >> 5) & 0x7;
232 ee->ee_db[mode][3] = (val >> 2) & 0x7;
233 ee->ee_ob[mode][2] = (val << 1) & 0x7;
234
235 AR5K_EEPROM_READ(o++, val);
236 ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
237 ee->ee_db[mode][2] = (val >> 12) & 0x7;
238 ee->ee_ob[mode][1] = (val >> 9) & 0x7;
239 ee->ee_db[mode][1] = (val >> 6) & 0x7;
240 ee->ee_ob[mode][0] = (val >> 3) & 0x7;
241 ee->ee_db[mode][0] = val & 0x7;
242 break;
243 case AR5K_EEPROM_MODE_11G:
244 case AR5K_EEPROM_MODE_11B:
245 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
246 ee->ee_db[mode][1] = val & 0x7;
247 break;
248 }
249
160 AR5K_EEPROM_READ(o++, val); 250 AR5K_EEPROM_READ(o++, val);
161 ee->ee_tx_end2xlna_enable[mode] = (val >> 8) & 0xff; 251 ee->ee_tx_end2xlna_enable[mode] = (val >> 8) & 0xff;
162 ee->ee_thr_62[mode] = val & 0xff; 252 ee->ee_thr_62[mode] = val & 0xff;
@@ -209,8 +299,11 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
209 AR5K_EEPROM_READ(o++, val); 299 AR5K_EEPROM_READ(o++, val);
210 ee->ee_i_gain[mode] |= (val << 3) & 0x38; 300 ee->ee_i_gain[mode] |= (val << 3) & 0x38;
211 301
212 if (mode == AR5K_EEPROM_MODE_11G) 302 if (mode == AR5K_EEPROM_MODE_11G) {
213 ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff; 303 ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff;
304 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6)
305 ee->ee_scaled_cck_delta = (val >> 11) & 0x1f;
306 }
214 } 307 }
215 308
216 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 && 309 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
@@ -219,10 +312,77 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
219 ee->ee_q_cal[mode] = (val >> 3) & 0x1f; 312 ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
220 } 313 }
221 314
222 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6 && 315 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_0)
223 mode == AR5K_EEPROM_MODE_11G) 316 goto done;
224 ee->ee_scaled_cck_delta = (val >> 11) & 0x1f; 317
318 switch(mode) {
319 case AR5K_EEPROM_MODE_11A:
320 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_1)
321 break;
322
323 AR5K_EEPROM_READ(o++, val);
324 ee->ee_margin_tx_rx[mode] = val & 0x3f;
325 break;
326 case AR5K_EEPROM_MODE_11B:
327 AR5K_EEPROM_READ(o++, val);
328
329 ee->ee_pwr_cal_b[0].freq =
330 ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
331 if (ee->ee_pwr_cal_b[0].freq != AR5K_EEPROM_CHANNEL_DIS)
332 ee->ee_n_piers[mode]++;
333
334 ee->ee_pwr_cal_b[1].freq =
335 ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode);
336 if (ee->ee_pwr_cal_b[1].freq != AR5K_EEPROM_CHANNEL_DIS)
337 ee->ee_n_piers[mode]++;
338
339 AR5K_EEPROM_READ(o++, val);
340 ee->ee_pwr_cal_b[2].freq =
341 ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
342 if (ee->ee_pwr_cal_b[2].freq != AR5K_EEPROM_CHANNEL_DIS)
343 ee->ee_n_piers[mode]++;
344
345 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
346 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
347 break;
348 case AR5K_EEPROM_MODE_11G:
349 AR5K_EEPROM_READ(o++, val);
350
351 ee->ee_pwr_cal_g[0].freq =
352 ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
353 if (ee->ee_pwr_cal_g[0].freq != AR5K_EEPROM_CHANNEL_DIS)
354 ee->ee_n_piers[mode]++;
355
356 ee->ee_pwr_cal_g[1].freq =
357 ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode);
358 if (ee->ee_pwr_cal_g[1].freq != AR5K_EEPROM_CHANNEL_DIS)
359 ee->ee_n_piers[mode]++;
360
361 AR5K_EEPROM_READ(o++, val);
362 ee->ee_turbo_max_power[mode] = val & 0x7f;
363 ee->ee_xr_power[mode] = (val >> 7) & 0x3f;
364
365 AR5K_EEPROM_READ(o++, val);
366 ee->ee_pwr_cal_g[2].freq =
367 ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
368 if (ee->ee_pwr_cal_g[2].freq != AR5K_EEPROM_CHANNEL_DIS)
369 ee->ee_n_piers[mode]++;
225 370
371 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
372 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
373
374 AR5K_EEPROM_READ(o++, val);
375 ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
376 ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
377
378 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) {
379 AR5K_EEPROM_READ(o++, val);
380 ee->ee_cck_ofdm_gain_delta = val & 0xff;
381 }
382 break;
383 }
384
385done:
226 /* return new offset */ 386 /* return new offset */
227 *offset = o; 387 *offset = o;
228 388
@@ -230,204 +390,944 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
230} 390}
231 391
232/* 392/*
233 * Initialize eeprom & capabilities structs 393 * Read turbo mode information on newer EEPROM versions
234 */ 394 */
235int ath5k_eeprom_init(struct ath5k_hw *ah) 395static int
396ath5k_eeprom_read_turbo_modes(struct ath5k_hw *ah,
397 u32 *offset, unsigned int mode)
236{ 398{
237 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 399 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
238 unsigned int mode, i; 400 u32 o = *offset;
239 int ret;
240 u32 offset;
241 u16 val; 401 u16 val;
402 int ret;
242 403
243 /* Initial TX thermal adjustment values */ 404 if (ee->ee_version < AR5K_EEPROM_VERSION_5_0)
244 ee->ee_tx_clip = 4; 405 return 0;
245 ee->ee_pwd_84 = ee->ee_pwd_90 = 1;
246 ee->ee_gain_select = 1;
247 406
248 /* 407 switch (mode){
249 * Read values from EEPROM and store them in the capability structure 408 case AR5K_EEPROM_MODE_11A:
250 */ 409 ee->ee_switch_settling_turbo[mode] = (val >> 6) & 0x7f;
251 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic);
252 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect);
253 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain);
254 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version);
255 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header);
256 410
257 /* Return if we have an old EEPROM */ 411 ee->ee_atn_tx_rx_turbo[mode] = (val >> 13) & 0x7;
258 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0) 412 AR5K_EEPROM_READ(o++, val);
259 return 0; 413 ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x7) << 3;
414 ee->ee_margin_tx_rx_turbo[mode] = (val >> 3) & 0x3f;
415
416 ee->ee_adc_desired_size_turbo[mode] = (val >> 9) & 0x7f;
417 AR5K_EEPROM_READ(o++, val);
418 ee->ee_adc_desired_size_turbo[mode] |= (val & 0x1) << 7;
419 ee->ee_pga_desired_size_turbo[mode] = (val >> 1) & 0xff;
420
421 if (AR5K_EEPROM_EEMAP(ee->ee_misc0) >=2)
422 ee->ee_pd_gain_overlap = (val >> 9) & 0xf;
423 break;
424 case AR5K_EEPROM_MODE_11G:
425 ee->ee_switch_settling_turbo[mode] = (val >> 8) & 0x7f;
426
427 ee->ee_atn_tx_rx_turbo[mode] = (val >> 15) & 0x7;
428 AR5K_EEPROM_READ(o++, val);
429 ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x1f) << 1;
430 ee->ee_margin_tx_rx_turbo[mode] = (val >> 5) & 0x3f;
431
432 ee->ee_adc_desired_size_turbo[mode] = (val >> 11) & 0x7f;
433 AR5K_EEPROM_READ(o++, val);
434 ee->ee_adc_desired_size_turbo[mode] |= (val & 0x7) << 5;
435 ee->ee_pga_desired_size_turbo[mode] = (val >> 3) & 0xff;
436 break;
437 }
438
439 /* return new offset */
440 *offset = o;
441
442 return 0;
443}
444
445
446static int
447ath5k_eeprom_init_modes(struct ath5k_hw *ah)
448{
449 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
450 u32 mode_offset[3];
451 unsigned int mode;
452 u32 offset;
453 int ret;
260 454
261#ifdef notyet
262 /* 455 /*
263 * Validate the checksum of the EEPROM date. There are some 456 * Get values for all modes
264 * devices with invalid EEPROMs.
265 */ 457 */
266 for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) { 458 mode_offset[AR5K_EEPROM_MODE_11A] = AR5K_EEPROM_MODES_11A(ah->ah_ee_version);
267 AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val); 459 mode_offset[AR5K_EEPROM_MODE_11B] = AR5K_EEPROM_MODES_11B(ah->ah_ee_version);
268 cksum ^= val; 460 mode_offset[AR5K_EEPROM_MODE_11G] = AR5K_EEPROM_MODES_11G(ah->ah_ee_version);
461
462 ee->ee_turbo_max_power[AR5K_EEPROM_MODE_11A] =
463 AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header);
464
465 for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) {
466 offset = mode_offset[mode];
467
468 ret = ath5k_eeprom_read_ants(ah, &offset, mode);
469 if (ret)
470 return ret;
471
472 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
473 if (ret)
474 return ret;
475
476 ret = ath5k_eeprom_read_turbo_modes(ah, &offset, mode);
477 if (ret)
478 return ret;
269 } 479 }
270 if (cksum != AR5K_EEPROM_INFO_CKSUM) { 480
271 ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum); 481 /* override for older eeprom versions for better performance */
272 return -EIO; 482 if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) {
483 ee->ee_thr_62[AR5K_EEPROM_MODE_11A] = 15;
484 ee->ee_thr_62[AR5K_EEPROM_MODE_11B] = 28;
485 ee->ee_thr_62[AR5K_EEPROM_MODE_11G] = 28;
273 } 486 }
274#endif
275 487
276 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version), 488 return 0;
277 ee_ant_gain); 489}
278 490
279 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) { 491static inline void
280 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0); 492ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
281 AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1); 493{
282 } 494 const static u16 intercepts3[] =
495 { 0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100 };
496 const static u16 intercepts3_2[] =
497 { 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100 };
498 const u16 *ip;
499 int i;
500
501 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2)
502 ip = intercepts3_2;
503 else
504 ip = intercepts3;
283 505
284 if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) { 506 for (i = 0; i < ARRAY_SIZE(intercepts3); i++)
285 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val); 507 *vp++ = (ip[i] * max + (100 - ip[i]) * min) / 100;
286 ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7; 508}
287 ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7;
288 509
289 AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val); 510static inline int
290 ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7; 511ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
291 ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7; 512 struct ath5k_chan_pcal_info *pc, u8 *count)
513{
514 int o = *offset;
515 int i = 0;
516 u8 f1, f2;
517 int ret;
518 u16 val;
519
520 while(i < max) {
521 AR5K_EEPROM_READ(o++, val);
522
523 f1 = (val >> 8) & 0xff;
524 f2 = val & 0xff;
525
526 if (f1)
527 pc[i++].freq = f1;
528
529 if (f2)
530 pc[i++].freq = f2;
531
532 if (!f1 || !f2)
533 break;
292 } 534 }
535 *offset = o;
536 *count = i;
293 537
294 /* 538 return 0;
295 * Get conformance test limit values 539}
296 */ 540
297 offset = AR5K_EEPROM_CTL(ah->ah_ee_version); 541static int
298 ee->ee_ctls = AR5K_EEPROM_N_CTLS(ah->ah_ee_version); 542ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
543{
544 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
545 struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a;
546 int i, ret;
547 u16 val;
548 u8 mask;
549
550 if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) {
551 ath5k_eeprom_read_freq_list(ah, &offset,
552 AR5K_EEPROM_N_5GHZ_CHAN, pcal,
553 &ee->ee_n_piers[AR5K_EEPROM_MODE_11A]);
554 } else {
555 mask = AR5K_EEPROM_FREQ_M(ah->ah_ee_version);
299 556
300 for (i = 0; i < ee->ee_ctls; i++) {
301 AR5K_EEPROM_READ(offset++, val); 557 AR5K_EEPROM_READ(offset++, val);
302 ee->ee_ctl[i] = (val >> 8) & 0xff; 558 pcal[0].freq = (val >> 9) & mask;
303 ee->ee_ctl[i + 1] = val & 0xff; 559 pcal[1].freq = (val >> 2) & mask;
560 pcal[2].freq = (val << 5) & mask;
561
562 AR5K_EEPROM_READ(offset++, val);
563 pcal[2].freq |= (val >> 11) & 0x1f;
564 pcal[3].freq = (val >> 4) & mask;
565 pcal[4].freq = (val << 3) & mask;
566
567 AR5K_EEPROM_READ(offset++, val);
568 pcal[4].freq |= (val >> 13) & 0x7;
569 pcal[5].freq = (val >> 6) & mask;
570 pcal[6].freq = (val << 1) & mask;
571
572 AR5K_EEPROM_READ(offset++, val);
573 pcal[6].freq |= (val >> 15) & 0x1;
574 pcal[7].freq = (val >> 8) & mask;
575 pcal[8].freq = (val >> 1) & mask;
576 pcal[9].freq = (val << 6) & mask;
577
578 AR5K_EEPROM_READ(offset++, val);
579 pcal[9].freq |= (val >> 10) & 0x3f;
580 ee->ee_n_piers[AR5K_EEPROM_MODE_11A] = 10;
304 } 581 }
305 582
306 /* 583 for(i = 0; i < AR5K_EEPROM_N_5GHZ_CHAN; i += 1) {
307 * Get values for 802.11a (5GHz) 584 pcal[i].freq = ath5k_eeprom_bin2freq(ee,
308 */ 585 pcal[i].freq, AR5K_EEPROM_MODE_11A);
309 mode = AR5K_EEPROM_MODE_11A; 586 }
310 587
311 ee->ee_turbo_max_power[mode] = 588 return 0;
312 AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header); 589}
313 590
314 offset = AR5K_EEPROM_MODES_11A(ah->ah_ee_version); 591static inline int
592ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
593{
594 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
595 struct ath5k_chan_pcal_info *pcal;
596 int i;
597
598 switch(mode) {
599 case AR5K_EEPROM_MODE_11B:
600 pcal = ee->ee_pwr_cal_b;
601 break;
602 case AR5K_EEPROM_MODE_11G:
603 pcal = ee->ee_pwr_cal_g;
604 break;
605 default:
606 return -EINVAL;
607 }
315 608
316 ret = ath5k_eeprom_read_ants(ah, &offset, mode); 609 ath5k_eeprom_read_freq_list(ah, &offset,
317 if (ret) 610 AR5K_EEPROM_N_2GHZ_CHAN_2413, pcal,
318 return ret; 611 &ee->ee_n_piers[mode]);
612 for(i = 0; i < AR5K_EEPROM_N_2GHZ_CHAN_2413; i += 1) {
613 pcal[i].freq = ath5k_eeprom_bin2freq(ee,
614 pcal[i].freq, mode);
615 }
319 616
320 AR5K_EEPROM_READ(offset++, val); 617 return 0;
321 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff); 618}
322 ee->ee_ob[mode][3] = (val >> 5) & 0x7;
323 ee->ee_db[mode][3] = (val >> 2) & 0x7;
324 ee->ee_ob[mode][2] = (val << 1) & 0x7;
325
326 AR5K_EEPROM_READ(offset++, val);
327 ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
328 ee->ee_db[mode][2] = (val >> 12) & 0x7;
329 ee->ee_ob[mode][1] = (val >> 9) & 0x7;
330 ee->ee_db[mode][1] = (val >> 6) & 0x7;
331 ee->ee_ob[mode][0] = (val >> 3) & 0x7;
332 ee->ee_db[mode][0] = val & 0x7;
333
334 ret = ath5k_eeprom_read_modes(ah, &offset, mode);
335 if (ret)
336 return ret;
337 619
338 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) { 620
339 AR5K_EEPROM_READ(offset++, val); 621static int
340 ee->ee_margin_tx_rx[mode] = val & 0x3f; 622ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
623{
624 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
625 struct ath5k_chan_pcal_info *pcal;
626 int offset, ret;
627 int i, j;
628 u16 val;
629
630 offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
631 switch(mode) {
632 case AR5K_EEPROM_MODE_11A:
633 if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
634 return 0;
635
636 ret = ath5k_eeprom_init_11a_pcal_freq(ah,
637 offset + AR5K_EEPROM_GROUP1_OFFSET);
638 if (ret < 0)
639 return ret;
640
641 offset += AR5K_EEPROM_GROUP2_OFFSET;
642 pcal = ee->ee_pwr_cal_a;
643 break;
644 case AR5K_EEPROM_MODE_11B:
645 if (!AR5K_EEPROM_HDR_11B(ee->ee_header) &&
646 !AR5K_EEPROM_HDR_11G(ee->ee_header))
647 return 0;
648
649 pcal = ee->ee_pwr_cal_b;
650 offset += AR5K_EEPROM_GROUP3_OFFSET;
651
652 /* fixed piers */
653 pcal[0].freq = 2412;
654 pcal[1].freq = 2447;
655 pcal[2].freq = 2484;
656 ee->ee_n_piers[mode] = 3;
657 break;
658 case AR5K_EEPROM_MODE_11G:
659 if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
660 return 0;
661
662 pcal = ee->ee_pwr_cal_g;
663 offset += AR5K_EEPROM_GROUP4_OFFSET;
664
665 /* fixed piers */
666 pcal[0].freq = 2312;
667 pcal[1].freq = 2412;
668 pcal[2].freq = 2484;
669 ee->ee_n_piers[mode] = 3;
670 break;
671 default:
672 return -EINVAL;
341 } 673 }
342 674
343 /* 675 for (i = 0; i < ee->ee_n_piers[mode]; i++) {
344 * Get values for 802.11b (2.4GHz) 676 struct ath5k_chan_pcal_info_rf5111 *cdata =
345 */ 677 &pcal[i].rf5111_info;
346 mode = AR5K_EEPROM_MODE_11B;
347 offset = AR5K_EEPROM_MODES_11B(ah->ah_ee_version);
348 678
349 ret = ath5k_eeprom_read_ants(ah, &offset, mode); 679 AR5K_EEPROM_READ(offset++, val);
350 if (ret) 680 cdata->pcdac_max = ((val >> 10) & AR5K_EEPROM_PCDAC_M);
351 return ret; 681 cdata->pcdac_min = ((val >> 4) & AR5K_EEPROM_PCDAC_M);
682 cdata->pwr[0] = ((val << 2) & AR5K_EEPROM_POWER_M);
352 683
353 AR5K_EEPROM_READ(offset++, val); 684 AR5K_EEPROM_READ(offset++, val);
354 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff); 685 cdata->pwr[0] |= ((val >> 14) & 0x3);
355 ee->ee_ob[mode][1] = (val >> 4) & 0x7; 686 cdata->pwr[1] = ((val >> 8) & AR5K_EEPROM_POWER_M);
356 ee->ee_db[mode][1] = val & 0x7; 687 cdata->pwr[2] = ((val >> 2) & AR5K_EEPROM_POWER_M);
688 cdata->pwr[3] = ((val << 4) & AR5K_EEPROM_POWER_M);
357 689
358 ret = ath5k_eeprom_read_modes(ah, &offset, mode); 690 AR5K_EEPROM_READ(offset++, val);
359 if (ret) 691 cdata->pwr[3] |= ((val >> 12) & 0xf);
360 return ret; 692 cdata->pwr[4] = ((val >> 6) & AR5K_EEPROM_POWER_M);
693 cdata->pwr[5] = (val & AR5K_EEPROM_POWER_M);
361 694
362 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
363 AR5K_EEPROM_READ(offset++, val); 695 AR5K_EEPROM_READ(offset++, val);
364 ee->ee_cal_pier[mode][0] = 696 cdata->pwr[6] = ((val >> 10) & AR5K_EEPROM_POWER_M);
365 ath5k_eeprom_bin2freq(ah, val & 0xff, mode); 697 cdata->pwr[7] = ((val >> 4) & AR5K_EEPROM_POWER_M);
366 ee->ee_cal_pier[mode][1] = 698 cdata->pwr[8] = ((val << 2) & AR5K_EEPROM_POWER_M);
367 ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
368 699
369 AR5K_EEPROM_READ(offset++, val); 700 AR5K_EEPROM_READ(offset++, val);
370 ee->ee_cal_pier[mode][2] = 701 cdata->pwr[8] |= ((val >> 14) & 0x3);
371 ath5k_eeprom_bin2freq(ah, val & 0xff, mode); 702 cdata->pwr[9] = ((val >> 8) & AR5K_EEPROM_POWER_M);
703 cdata->pwr[10] = ((val >> 2) & AR5K_EEPROM_POWER_M);
704
705 ath5k_get_pcdac_intercepts(ah, cdata->pcdac_min,
706 cdata->pcdac_max, cdata->pcdac);
707
708 for (j = 0; j < AR5K_EEPROM_N_PCDAC; j++) {
709 cdata->pwr[j] = (u16)
710 (AR5K_EEPROM_POWER_STEP * cdata->pwr[j]);
711 }
372 } 712 }
373 713
374 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) 714 return 0;
375 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f; 715}
376 716
377 /* 717static int
378 * Get values for 802.11g (2.4GHz) 718ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
379 */ 719{
380 mode = AR5K_EEPROM_MODE_11G; 720 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
381 offset = AR5K_EEPROM_MODES_11G(ah->ah_ee_version); 721 struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info;
722 struct ath5k_chan_pcal_info *gen_chan_info;
723 u32 offset;
724 unsigned int i, c;
725 u16 val;
726 int ret;
382 727
383 ret = ath5k_eeprom_read_ants(ah, &offset, mode); 728 switch (mode) {
384 if (ret) 729 case AR5K_EEPROM_MODE_11A:
385 return ret; 730 /*
731 * Read 5GHz EEPROM channels
732 */
733 offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
734 ath5k_eeprom_init_11a_pcal_freq(ah, offset);
735
736 offset += AR5K_EEPROM_GROUP2_OFFSET;
737 gen_chan_info = ee->ee_pwr_cal_a;
738 break;
739 case AR5K_EEPROM_MODE_11B:
740 offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
741 if (AR5K_EEPROM_HDR_11A(ee->ee_header))
742 offset += AR5K_EEPROM_GROUP3_OFFSET;
743
744 /* NB: frequency piers parsed during mode init */
745 gen_chan_info = ee->ee_pwr_cal_b;
746 break;
747 case AR5K_EEPROM_MODE_11G:
748 offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
749 if (AR5K_EEPROM_HDR_11A(ee->ee_header))
750 offset += AR5K_EEPROM_GROUP4_OFFSET;
751 else if (AR5K_EEPROM_HDR_11B(ee->ee_header))
752 offset += AR5K_EEPROM_GROUP2_OFFSET;
753
754 /* NB: frequency piers parsed during mode init */
755 gen_chan_info = ee->ee_pwr_cal_g;
756 break;
757 default:
758 return -EINVAL;
759 }
386 760
387 AR5K_EEPROM_READ(offset++, val); 761 for (i = 0; i < ee->ee_n_piers[mode]; i++) {
388 ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff); 762 chan_pcal_info = &gen_chan_info[i].rf5112_info;
389 ee->ee_ob[mode][1] = (val >> 4) & 0x7;
390 ee->ee_db[mode][1] = val & 0x7;
391 763
392 ret = ath5k_eeprom_read_modes(ah, &offset, mode); 764 /* Power values in dBm * 4
393 if (ret) 765 * for the lower xpd gain curve
394 return ret; 766 * (0 dBm -> higher output power) */
767 for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) {
768 AR5K_EEPROM_READ(offset++, val);
769 chan_pcal_info->pwr_x0[c] = (val & 0xff);
770 chan_pcal_info->pwr_x0[++c] = ((val >> 8) & 0xff);
771 }
395 772
396 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) { 773 /* PCDAC steps
774 * corresponding to the above power
775 * measurements */
397 AR5K_EEPROM_READ(offset++, val); 776 AR5K_EEPROM_READ(offset++, val);
398 ee->ee_cal_pier[mode][0] = 777 chan_pcal_info->pcdac_x0[1] = (val & 0x1f);
399 ath5k_eeprom_bin2freq(ah, val & 0xff, mode); 778 chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f);
400 ee->ee_cal_pier[mode][1] = 779 chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f);
401 ath5k_eeprom_bin2freq(ah, (val >> 8) & 0xff, mode);
402 780
781 /* Power values in dBm * 4
782 * for the higher xpd gain curve
783 * (18 dBm -> lower output power) */
403 AR5K_EEPROM_READ(offset++, val); 784 AR5K_EEPROM_READ(offset++, val);
404 ee->ee_turbo_max_power[mode] = val & 0x7f; 785 chan_pcal_info->pwr_x3[0] = (val & 0xff);
405 ee->ee_xr_power[mode] = (val >> 7) & 0x3f; 786 chan_pcal_info->pwr_x3[1] = ((val >> 8) & 0xff);
406 787
407 AR5K_EEPROM_READ(offset++, val); 788 AR5K_EEPROM_READ(offset++, val);
408 ee->ee_cal_pier[mode][2] = 789 chan_pcal_info->pwr_x3[2] = (val & 0xff);
409 ath5k_eeprom_bin2freq(ah, val & 0xff, mode); 790
791 /* PCDAC steps
792 * corresponding to the above power
793 * measurements (static) */
794 chan_pcal_info->pcdac_x3[0] = 20;
795 chan_pcal_info->pcdac_x3[1] = 35;
796 chan_pcal_info->pcdac_x3[2] = 63;
797
798 if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) {
799 chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0xff);
800
801 /* Last xpd0 power level is also channel maximum */
802 gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3];
803 } else {
804 chan_pcal_info->pcdac_x0[0] = 1;
805 gen_chan_info[i].max_pwr = ((val >> 8) & 0xff);
806 }
410 807
411 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1) 808 /* Recreate pcdac_x0 table for this channel using pcdac steps */
412 ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f; 809 chan_pcal_info->pcdac_x0[1] += chan_pcal_info->pcdac_x0[0];
810 chan_pcal_info->pcdac_x0[2] += chan_pcal_info->pcdac_x0[1];
811 chan_pcal_info->pcdac_x0[3] += chan_pcal_info->pcdac_x0[2];
812 }
813
814 return 0;
815}
816
817static inline unsigned int
818ath5k_pdgains_size_2413(struct ath5k_eeprom_info *ee, unsigned int mode)
819{
820 static const unsigned int pdgains_size[] = { 4, 6, 9, 12 };
821 unsigned int sz;
822
823 sz = pdgains_size[ee->ee_pd_gains[mode] - 1];
824 sz *= ee->ee_n_piers[mode];
825
826 return sz;
827}
828
829static unsigned int
830ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
831{
832 u32 offset = AR5K_EEPROM_CAL_DATA_START(ee->ee_misc4);
833
834 switch(mode) {
835 case AR5K_EEPROM_MODE_11G:
836 if (AR5K_EEPROM_HDR_11B(ee->ee_header))
837 offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11B) + 2;
838 /* fall through */
839 case AR5K_EEPROM_MODE_11B:
840 if (AR5K_EEPROM_HDR_11A(ee->ee_header))
841 offset += ath5k_pdgains_size_2413(ee, AR5K_EEPROM_MODE_11A) + 5;
842 /* fall through */
843 case AR5K_EEPROM_MODE_11A:
844 break;
845 default:
846 break;
847 }
848
849 return offset;
850}
851
852static int
853ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
854{
855 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
856 struct ath5k_chan_pcal_info_rf2413 *chan_pcal_info;
857 struct ath5k_chan_pcal_info *gen_chan_info;
858 unsigned int i, c;
859 u32 offset;
860 int ret;
861 u16 val;
862 u8 pd_gains = 0;
863
864 if (ee->ee_x_gain[mode] & 0x1) pd_gains++;
865 if ((ee->ee_x_gain[mode] >> 1) & 0x1) pd_gains++;
866 if ((ee->ee_x_gain[mode] >> 2) & 0x1) pd_gains++;
867 if ((ee->ee_x_gain[mode] >> 3) & 0x1) pd_gains++;
868 ee->ee_pd_gains[mode] = pd_gains;
869
870 offset = ath5k_cal_data_offset_2413(ee, mode);
871 switch (mode) {
872 case AR5K_EEPROM_MODE_11A:
873 if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
874 return 0;
875
876 ath5k_eeprom_init_11a_pcal_freq(ah, offset);
877 offset += AR5K_EEPROM_N_5GHZ_CHAN / 2;
878 gen_chan_info = ee->ee_pwr_cal_a;
879 break;
880 case AR5K_EEPROM_MODE_11B:
881 if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
882 return 0;
413 883
884 ath5k_eeprom_init_11bg_2413(ah, mode, offset);
885 offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
886 gen_chan_info = ee->ee_pwr_cal_b;
887 break;
888 case AR5K_EEPROM_MODE_11G:
889 if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
890 return 0;
891
892 ath5k_eeprom_init_11bg_2413(ah, mode, offset);
893 offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
894 gen_chan_info = ee->ee_pwr_cal_g;
895 break;
896 default:
897 return -EINVAL;
898 }
899
900 if (pd_gains == 0)
901 return 0;
902
903 for (i = 0; i < ee->ee_n_piers[mode]; i++) {
904 chan_pcal_info = &gen_chan_info[i].rf2413_info;
905
906 /*
907 * Read pwr_i, pddac_i and the first
908 * 2 pd points (pwr, pddac)
909 */
414 AR5K_EEPROM_READ(offset++, val); 910 AR5K_EEPROM_READ(offset++, val);
415 ee->ee_i_cal[mode] = (val >> 8) & 0x3f; 911 chan_pcal_info->pwr_i[0] = val & 0x1f;
416 ee->ee_q_cal[mode] = (val >> 3) & 0x1f; 912 chan_pcal_info->pddac_i[0] = (val >> 5) & 0x7f;
913 chan_pcal_info->pwr[0][0] =
914 (val >> 12) & 0xf;
417 915
418 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) { 916 AR5K_EEPROM_READ(offset++, val);
917 chan_pcal_info->pddac[0][0] = val & 0x3f;
918 chan_pcal_info->pwr[0][1] = (val >> 6) & 0xf;
919 chan_pcal_info->pddac[0][1] =
920 (val >> 10) & 0x3f;
921
922 AR5K_EEPROM_READ(offset++, val);
923 chan_pcal_info->pwr[0][2] = val & 0xf;
924 chan_pcal_info->pddac[0][2] =
925 (val >> 4) & 0x3f;
926
927 chan_pcal_info->pwr[0][3] = 0;
928 chan_pcal_info->pddac[0][3] = 0;
929
930 if (pd_gains > 1) {
931 /*
932 * Pd gain 0 is not the last pd gain
933 * so it only has 2 pd points.
934 * Continue wih pd gain 1.
935 */
936 chan_pcal_info->pwr_i[1] = (val >> 10) & 0x1f;
937
938 chan_pcal_info->pddac_i[1] = (val >> 15) & 0x1;
419 AR5K_EEPROM_READ(offset++, val); 939 AR5K_EEPROM_READ(offset++, val);
420 ee->ee_cck_ofdm_gain_delta = val & 0xff; 940 chan_pcal_info->pddac_i[1] |= (val & 0x3F) << 1;
941
942 chan_pcal_info->pwr[1][0] = (val >> 6) & 0xf;
943 chan_pcal_info->pddac[1][0] =
944 (val >> 10) & 0x3f;
945
946 AR5K_EEPROM_READ(offset++, val);
947 chan_pcal_info->pwr[1][1] = val & 0xf;
948 chan_pcal_info->pddac[1][1] =
949 (val >> 4) & 0x3f;
950 chan_pcal_info->pwr[1][2] =
951 (val >> 10) & 0xf;
952
953 chan_pcal_info->pddac[1][2] =
954 (val >> 14) & 0x3;
955 AR5K_EEPROM_READ(offset++, val);
956 chan_pcal_info->pddac[1][2] |=
957 (val & 0xF) << 2;
958
959 chan_pcal_info->pwr[1][3] = 0;
960 chan_pcal_info->pddac[1][3] = 0;
961 } else if (pd_gains == 1) {
962 /*
963 * Pd gain 0 is the last one so
964 * read the extra point.
965 */
966 chan_pcal_info->pwr[0][3] =
967 (val >> 10) & 0xf;
968
969 chan_pcal_info->pddac[0][3] =
970 (val >> 14) & 0x3;
971 AR5K_EEPROM_READ(offset++, val);
972 chan_pcal_info->pddac[0][3] |=
973 (val & 0xF) << 2;
974 }
975
976 /*
977 * Proceed with the other pd_gains
978 * as above.
979 */
980 if (pd_gains > 2) {
981 chan_pcal_info->pwr_i[2] = (val >> 4) & 0x1f;
982 chan_pcal_info->pddac_i[2] = (val >> 9) & 0x7f;
983
984 AR5K_EEPROM_READ(offset++, val);
985 chan_pcal_info->pwr[2][0] =
986 (val >> 0) & 0xf;
987 chan_pcal_info->pddac[2][0] =
988 (val >> 4) & 0x3f;
989 chan_pcal_info->pwr[2][1] =
990 (val >> 10) & 0xf;
991
992 chan_pcal_info->pddac[2][1] =
993 (val >> 14) & 0x3;
994 AR5K_EEPROM_READ(offset++, val);
995 chan_pcal_info->pddac[2][1] |=
996 (val & 0xF) << 2;
997
998 chan_pcal_info->pwr[2][2] =
999 (val >> 4) & 0xf;
1000 chan_pcal_info->pddac[2][2] =
1001 (val >> 8) & 0x3f;
1002
1003 chan_pcal_info->pwr[2][3] = 0;
1004 chan_pcal_info->pddac[2][3] = 0;
1005 } else if (pd_gains == 2) {
1006 chan_pcal_info->pwr[1][3] =
1007 (val >> 4) & 0xf;
1008 chan_pcal_info->pddac[1][3] =
1009 (val >> 8) & 0x3f;
1010 }
1011
1012 if (pd_gains > 3) {
1013 chan_pcal_info->pwr_i[3] = (val >> 14) & 0x3;
1014 AR5K_EEPROM_READ(offset++, val);
1015 chan_pcal_info->pwr_i[3] |= ((val >> 0) & 0x7) << 2;
1016
1017 chan_pcal_info->pddac_i[3] = (val >> 3) & 0x7f;
1018 chan_pcal_info->pwr[3][0] =
1019 (val >> 10) & 0xf;
1020 chan_pcal_info->pddac[3][0] =
1021 (val >> 14) & 0x3;
1022
1023 AR5K_EEPROM_READ(offset++, val);
1024 chan_pcal_info->pddac[3][0] |=
1025 (val & 0xF) << 2;
1026 chan_pcal_info->pwr[3][1] =
1027 (val >> 4) & 0xf;
1028 chan_pcal_info->pddac[3][1] =
1029 (val >> 8) & 0x3f;
1030
1031 chan_pcal_info->pwr[3][2] =
1032 (val >> 14) & 0x3;
1033 AR5K_EEPROM_READ(offset++, val);
1034 chan_pcal_info->pwr[3][2] |=
1035 ((val >> 0) & 0x3) << 2;
1036
1037 chan_pcal_info->pddac[3][2] =
1038 (val >> 2) & 0x3f;
1039 chan_pcal_info->pwr[3][3] =
1040 (val >> 8) & 0xf;
1041
1042 chan_pcal_info->pddac[3][3] =
1043 (val >> 12) & 0xF;
1044 AR5K_EEPROM_READ(offset++, val);
1045 chan_pcal_info->pddac[3][3] |=
1046 ((val >> 0) & 0x3) << 4;
1047 } else if (pd_gains == 3) {
1048 chan_pcal_info->pwr[2][3] =
1049 (val >> 14) & 0x3;
1050 AR5K_EEPROM_READ(offset++, val);
1051 chan_pcal_info->pwr[2][3] |=
1052 ((val >> 0) & 0x3) << 2;
1053
1054 chan_pcal_info->pddac[2][3] =
1055 (val >> 2) & 0x3f;
1056 }
1057
1058 for (c = 0; c < pd_gains; c++) {
1059 /* Recreate pwr table for this channel using pwr steps */
1060 chan_pcal_info->pwr[c][0] += chan_pcal_info->pwr_i[c] * 2;
1061 chan_pcal_info->pwr[c][1] += chan_pcal_info->pwr[c][0];
1062 chan_pcal_info->pwr[c][2] += chan_pcal_info->pwr[c][1];
1063 chan_pcal_info->pwr[c][3] += chan_pcal_info->pwr[c][2];
1064 if (chan_pcal_info->pwr[c][3] == chan_pcal_info->pwr[c][2])
1065 chan_pcal_info->pwr[c][3] = 0;
1066
1067 /* Recreate pddac table for this channel using pddac steps */
1068 chan_pcal_info->pddac[c][0] += chan_pcal_info->pddac_i[c];
1069 chan_pcal_info->pddac[c][1] += chan_pcal_info->pddac[c][0];
1070 chan_pcal_info->pddac[c][2] += chan_pcal_info->pddac[c][1];
1071 chan_pcal_info->pddac[c][3] += chan_pcal_info->pddac[c][2];
1072 if (chan_pcal_info->pddac[c][3] == chan_pcal_info->pddac[c][2])
1073 chan_pcal_info->pddac[c][3] = 0;
421 } 1074 }
422 } 1075 }
423 1076
424 /* 1077 return 0;
425 * Read 5GHz EEPROM channels 1078}
426 */ 1079
1080/*
1081 * Read per rate target power (this is the maximum tx power
1082 * supported by the card). This info is used when setting
1083 * tx power, no matter the channel.
1084 *
1085 * This also works for v5 EEPROMs.
1086 */
1087static int ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
1088{
1089 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1090 struct ath5k_rate_pcal_info *rate_pcal_info;
1091 u16 *rate_target_pwr_num;
1092 u32 offset;
1093 u16 val;
1094 int ret, i;
1095
1096 offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1);
1097 rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode];
1098 switch (mode) {
1099 case AR5K_EEPROM_MODE_11A:
1100 offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
1101 rate_pcal_info = ee->ee_rate_tpwr_a;
1102 ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN;
1103 break;
1104 case AR5K_EEPROM_MODE_11B:
1105 offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
1106 rate_pcal_info = ee->ee_rate_tpwr_b;
1107 ee->ee_rate_target_pwr_num[mode] = 2; /* 3rd is g mode's 1st */
1108 break;
1109 case AR5K_EEPROM_MODE_11G:
1110 offset += AR5K_EEPROM_TARGET_PWR_OFF_11G(ee->ee_version);
1111 rate_pcal_info = ee->ee_rate_tpwr_g;
1112 ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_2GHZ_CHAN;
1113 break;
1114 default:
1115 return -EINVAL;
1116 }
1117
1118 /* Different freq mask for older eeproms (<= v3.2) */
1119 if (ee->ee_version <= AR5K_EEPROM_VERSION_3_2) {
1120 for (i = 0; i < (*rate_target_pwr_num); i++) {
1121 AR5K_EEPROM_READ(offset++, val);
1122 rate_pcal_info[i].freq =
1123 ath5k_eeprom_bin2freq(ee, (val >> 9) & 0x7f, mode);
1124
1125 rate_pcal_info[i].target_power_6to24 = ((val >> 3) & 0x3f);
1126 rate_pcal_info[i].target_power_36 = (val << 3) & 0x3f;
1127
1128 AR5K_EEPROM_READ(offset++, val);
1129
1130 if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS ||
1131 val == 0) {
1132 (*rate_target_pwr_num) = i;
1133 break;
1134 }
1135
1136 rate_pcal_info[i].target_power_36 |= ((val >> 13) & 0x7);
1137 rate_pcal_info[i].target_power_48 = ((val >> 7) & 0x3f);
1138 rate_pcal_info[i].target_power_54 = ((val >> 1) & 0x3f);
1139 }
1140 } else {
1141 for (i = 0; i < (*rate_target_pwr_num); i++) {
1142 AR5K_EEPROM_READ(offset++, val);
1143 rate_pcal_info[i].freq =
1144 ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode);
1145
1146 rate_pcal_info[i].target_power_6to24 = ((val >> 2) & 0x3f);
1147 rate_pcal_info[i].target_power_36 = (val << 4) & 0x3f;
1148
1149 AR5K_EEPROM_READ(offset++, val);
1150
1151 if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS ||
1152 val == 0) {
1153 (*rate_target_pwr_num) = i;
1154 break;
1155 }
1156
1157 rate_pcal_info[i].target_power_36 |= (val >> 12) & 0xf;
1158 rate_pcal_info[i].target_power_48 = ((val >> 6) & 0x3f);
1159 rate_pcal_info[i].target_power_54 = (val & 0x3f);
1160 }
1161 }
1162
1163 return 0;
1164}
1165
1166static int
1167ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
1168{
1169 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1170 int (*read_pcal)(struct ath5k_hw *hw, int mode);
1171 int mode;
1172 int err;
1173
1174 if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) &&
1175 (AR5K_EEPROM_EEMAP(ee->ee_misc0) == 1))
1176 read_pcal = ath5k_eeprom_read_pcal_info_5112;
1177 else if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0) &&
1178 (AR5K_EEPROM_EEMAP(ee->ee_misc0) == 2))
1179 read_pcal = ath5k_eeprom_read_pcal_info_2413;
1180 else
1181 read_pcal = ath5k_eeprom_read_pcal_info_5111;
1182
1183 for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) {
1184 err = read_pcal(ah, mode);
1185 if (err)
1186 return err;
1187
1188 err = ath5k_eeprom_read_target_rate_pwr_info(ah, mode);
1189 if (err < 0)
1190 return err;
1191 }
1192
1193 return 0;
1194}
1195
1196/* Read conformance test limits */
1197static int
1198ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
1199{
1200 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1201 struct ath5k_edge_power *rep;
1202 unsigned int fmask, pmask;
1203 unsigned int ctl_mode;
1204 int ret, i, j;
1205 u32 offset;
1206 u16 val;
1207
1208 pmask = AR5K_EEPROM_POWER_M;
1209 fmask = AR5K_EEPROM_FREQ_M(ee->ee_version);
1210 offset = AR5K_EEPROM_CTL(ee->ee_version);
1211 ee->ee_ctls = AR5K_EEPROM_N_CTLS(ee->ee_version);
1212 for (i = 0; i < ee->ee_ctls; i += 2) {
1213 AR5K_EEPROM_READ(offset++, val);
1214 ee->ee_ctl[i] = (val >> 8) & 0xff;
1215 ee->ee_ctl[i + 1] = val & 0xff;
1216 }
1217
1218 offset = AR5K_EEPROM_GROUP8_OFFSET;
1219 if (ee->ee_version >= AR5K_EEPROM_VERSION_4_0)
1220 offset += AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1) -
1221 AR5K_EEPROM_GROUP5_OFFSET;
1222 else
1223 offset += AR5K_EEPROM_GROUPS_START(ee->ee_version);
1224
1225 rep = ee->ee_ctl_pwr;
1226 for(i = 0; i < ee->ee_ctls; i++) {
1227 switch(ee->ee_ctl[i] & AR5K_CTL_MODE_M) {
1228 case AR5K_CTL_11A:
1229 case AR5K_CTL_TURBO:
1230 ctl_mode = AR5K_EEPROM_MODE_11A;
1231 break;
1232 default:
1233 ctl_mode = AR5K_EEPROM_MODE_11G;
1234 break;
1235 }
1236 if (ee->ee_ctl[i] == 0) {
1237 if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3)
1238 offset += 8;
1239 else
1240 offset += 7;
1241 rep += AR5K_EEPROM_N_EDGES;
1242 continue;
1243 }
1244 if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) {
1245 for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) {
1246 AR5K_EEPROM_READ(offset++, val);
1247 rep[j].freq = (val >> 8) & fmask;
1248 rep[j + 1].freq = val & fmask;
1249 }
1250 for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) {
1251 AR5K_EEPROM_READ(offset++, val);
1252 rep[j].edge = (val >> 8) & pmask;
1253 rep[j].flag = (val >> 14) & 1;
1254 rep[j + 1].edge = val & pmask;
1255 rep[j + 1].flag = (val >> 6) & 1;
1256 }
1257 } else {
1258 AR5K_EEPROM_READ(offset++, val);
1259 rep[0].freq = (val >> 9) & fmask;
1260 rep[1].freq = (val >> 2) & fmask;
1261 rep[2].freq = (val << 5) & fmask;
1262
1263 AR5K_EEPROM_READ(offset++, val);
1264 rep[2].freq |= (val >> 11) & 0x1f;
1265 rep[3].freq = (val >> 4) & fmask;
1266 rep[4].freq = (val << 3) & fmask;
1267
1268 AR5K_EEPROM_READ(offset++, val);
1269 rep[4].freq |= (val >> 13) & 0x7;
1270 rep[5].freq = (val >> 6) & fmask;
1271 rep[6].freq = (val << 1) & fmask;
1272
1273 AR5K_EEPROM_READ(offset++, val);
1274 rep[6].freq |= (val >> 15) & 0x1;
1275 rep[7].freq = (val >> 8) & fmask;
1276
1277 rep[0].edge = (val >> 2) & pmask;
1278 rep[1].edge = (val << 4) & pmask;
1279
1280 AR5K_EEPROM_READ(offset++, val);
1281 rep[1].edge |= (val >> 12) & 0xf;
1282 rep[2].edge = (val >> 6) & pmask;
1283 rep[3].edge = val & pmask;
1284
1285 AR5K_EEPROM_READ(offset++, val);
1286 rep[4].edge = (val >> 10) & pmask;
1287 rep[5].edge = (val >> 4) & pmask;
1288 rep[6].edge = (val << 2) & pmask;
1289
1290 AR5K_EEPROM_READ(offset++, val);
1291 rep[6].edge |= (val >> 14) & 0x3;
1292 rep[7].edge = (val >> 8) & pmask;
1293 }
1294 for (j = 0; j < AR5K_EEPROM_N_EDGES; j++) {
1295 rep[j].freq = ath5k_eeprom_bin2freq(ee,
1296 rep[j].freq, ctl_mode);
1297 }
1298 rep += AR5K_EEPROM_N_EDGES;
1299 }
427 1300
428 return 0; 1301 return 0;
429} 1302}
430 1303
1304
1305/*
1306 * Initialize eeprom power tables
1307 */
1308int
1309ath5k_eeprom_init(struct ath5k_hw *ah)
1310{
1311 int err;
1312
1313 err = ath5k_eeprom_init_header(ah);
1314 if (err < 0)
1315 return err;
1316
1317 err = ath5k_eeprom_init_modes(ah);
1318 if (err < 0)
1319 return err;
1320
1321 err = ath5k_eeprom_read_pcal_info(ah);
1322 if (err < 0)
1323 return err;
1324
1325 err = ath5k_eeprom_read_ctl_info(ah);
1326 if (err < 0)
1327 return err;
1328
1329 return 0;
1330}
431/* 1331/*
432 * Read the MAC address from eeprom 1332 * Read the MAC address from eeprom
433 */ 1333 */
diff --git a/drivers/net/wireless/ath5k/eeprom.h b/drivers/net/wireless/ath5k/eeprom.h
index a468ecfbb18a..09eb7d0176a4 100644
--- a/drivers/net/wireless/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath5k/eeprom.h
@@ -25,24 +25,8 @@
25#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */ 25#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
26#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */ 26#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
27 27
28#define AR5K_EEPROM_PROTECT 0x003f /* EEPROM protect status */
29#define AR5K_EEPROM_PROTECT_RD_0_31 0x0001 /* Read protection bit for offsets 0x0 - 0x1f */
30#define AR5K_EEPROM_PROTECT_WR_0_31 0x0002 /* Write protection bit for offsets 0x0 - 0x1f */
31#define AR5K_EEPROM_PROTECT_RD_32_63 0x0004 /* 0x20 - 0x3f */
32#define AR5K_EEPROM_PROTECT_WR_32_63 0x0008
33#define AR5K_EEPROM_PROTECT_RD_64_127 0x0010 /* 0x40 - 0x7f */
34#define AR5K_EEPROM_PROTECT_WR_64_127 0x0020
35#define AR5K_EEPROM_PROTECT_RD_128_191 0x0040 /* 0x80 - 0xbf (regdom) */
36#define AR5K_EEPROM_PROTECT_WR_128_191 0x0080
37#define AR5K_EEPROM_PROTECT_RD_192_207 0x0100 /* 0xc0 - 0xcf */
38#define AR5K_EEPROM_PROTECT_WR_192_207 0x0200
39#define AR5K_EEPROM_PROTECT_RD_208_223 0x0400 /* 0xd0 - 0xdf */
40#define AR5K_EEPROM_PROTECT_WR_208_223 0x0800
41#define AR5K_EEPROM_PROTECT_RD_224_239 0x1000 /* 0xe0 - 0xef */
42#define AR5K_EEPROM_PROTECT_WR_224_239 0x2000
43#define AR5K_EEPROM_PROTECT_RD_240_255 0x4000 /* 0xf0 - 0xff */
44#define AR5K_EEPROM_PROTECT_WR_240_255 0x8000
45#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */ 28#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
29#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
46#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */ 30#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
47#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE) 31#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
48#define AR5K_EEPROM_INFO_CKSUM 0xffff 32#define AR5K_EEPROM_INFO_CKSUM 0xffff
@@ -53,15 +37,19 @@
53#define AR5K_EEPROM_VERSION_3_1 0x3001 /* ob/db values for 2Ghz (ar5211_rfregs) */ 37#define AR5K_EEPROM_VERSION_3_1 0x3001 /* ob/db values for 2Ghz (ar5211_rfregs) */
54#define AR5K_EEPROM_VERSION_3_2 0x3002 /* different frequency representation (eeprom_bin2freq) */ 38#define AR5K_EEPROM_VERSION_3_2 0x3002 /* different frequency representation (eeprom_bin2freq) */
55#define AR5K_EEPROM_VERSION_3_3 0x3003 /* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */ 39#define AR5K_EEPROM_VERSION_3_3 0x3003 /* offsets changed, has 32 CTLs (see below) and ee_false_detect (eeprom_read_modes) */
56#define AR5K_EEPROM_VERSION_3_4 0x3004 /* has ee_i_gain ee_cck_ofdm_power_delta (eeprom_read_modes) */ 40#define AR5K_EEPROM_VERSION_3_4 0x3004 /* has ee_i_gain, ee_cck_ofdm_power_delta (eeprom_read_modes) */
57#define AR5K_EEPROM_VERSION_4_0 0x4000 /* has ee_misc*, ee_cal_pier, ee_turbo_max_power and ee_xr_power (eeprom_init) */ 41#define AR5K_EEPROM_VERSION_4_0 0x4000 /* has ee_misc, ee_cal_pier, ee_turbo_max_power and ee_xr_power (eeprom_init) */
58#define AR5K_EEPROM_VERSION_4_1 0x4001 /* has ee_margin_tx_rx (eeprom_init) */ 42#define AR5K_EEPROM_VERSION_4_1 0x4001 /* has ee_margin_tx_rx (eeprom_init) */
59#define AR5K_EEPROM_VERSION_4_2 0x4002 /* has ee_cck_ofdm_gain_delta (eeprom_init) */ 43#define AR5K_EEPROM_VERSION_4_2 0x4002 /* has ee_cck_ofdm_gain_delta (eeprom_init) */
60#define AR5K_EEPROM_VERSION_4_3 0x4003 44#define AR5K_EEPROM_VERSION_4_3 0x4003 /* power calibration changes */
61#define AR5K_EEPROM_VERSION_4_4 0x4004 45#define AR5K_EEPROM_VERSION_4_4 0x4004
62#define AR5K_EEPROM_VERSION_4_5 0x4005 46#define AR5K_EEPROM_VERSION_4_5 0x4005
63#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */ 47#define AR5K_EEPROM_VERSION_4_6 0x4006 /* has ee_scaled_cck_delta */
64#define AR5K_EEPROM_VERSION_4_7 0x4007 48#define AR5K_EEPROM_VERSION_4_7 0x3007 /* 4007 ? */
49#define AR5K_EEPROM_VERSION_4_9 0x4009 /* EAR futureproofing */
50#define AR5K_EEPROM_VERSION_5_0 0x5000 /* Has 2413 PDADC calibration etc */
51#define AR5K_EEPROM_VERSION_5_1 0x5001 /* Has capability values */
52#define AR5K_EEPROM_VERSION_5_3 0x5003 /* Has spur mitigation tables */
65 53
66#define AR5K_EEPROM_MODE_11A 0 54#define AR5K_EEPROM_MODE_11A 0
67#define AR5K_EEPROM_MODE_11B 1 55#define AR5K_EEPROM_MODE_11B 1
@@ -74,8 +62,8 @@
74#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */ 62#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */
75#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */ 63#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */
76#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) 64#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7)
77#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz (?) */
78#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */ 65#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
66#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */
79 67
80#define AR5K_EEPROM_RFKILL_GPIO_SEL 0x0000001c 68#define AR5K_EEPROM_RFKILL_GPIO_SEL 0x0000001c
81#define AR5K_EEPROM_RFKILL_GPIO_SEL_S 2 69#define AR5K_EEPROM_RFKILL_GPIO_SEL_S 2
@@ -87,27 +75,95 @@
87 (((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0) 75 (((_v) >= AR5K_EEPROM_VERSION_3_3) ? _v3_3 : _v3_0)
88 76
89#define AR5K_EEPROM_ANT_GAIN(_v) AR5K_EEPROM_OFF(_v, 0x00c4, 0x00c3) 77#define AR5K_EEPROM_ANT_GAIN(_v) AR5K_EEPROM_OFF(_v, 0x00c4, 0x00c3)
90#define AR5K_EEPROM_ANT_GAIN_5GHZ(_v) ((int8_t)(((_v) >> 8) & 0xff)) 78#define AR5K_EEPROM_ANT_GAIN_5GHZ(_v) ((s8)(((_v) >> 8) & 0xff))
91#define AR5K_EEPROM_ANT_GAIN_2GHZ(_v) ((int8_t)((_v) & 0xff)) 79#define AR5K_EEPROM_ANT_GAIN_2GHZ(_v) ((s8)((_v) & 0xff))
80
81/* Misc values available since EEPROM 4.0 */
82#define AR5K_EEPROM_MISC0 AR5K_EEPROM_INFO(4)
83#define AR5K_EEPROM_EARSTART(_v) ((_v) & 0xfff)
84#define AR5K_EEPROM_HDR_XR2_DIS(_v) (((_v) >> 12) & 0x1)
85#define AR5K_EEPROM_HDR_XR5_DIS(_v) (((_v) >> 13) & 0x1)
86#define AR5K_EEPROM_EEMAP(_v) (((_v) >> 14) & 0x3)
87
88#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5)
89#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
90#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1)
91#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1)
92
93#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6)
94#define AR5K_EEPROM_EEP_FILE_VERSION(_v) (((_v) >> 8) & 0xff)
95#define AR5K_EEPROM_EAR_FILE_VERSION(_v) ((_v) & 0xff)
96
97#define AR5K_EEPROM_MISC3 AR5K_EEPROM_INFO(7)
98#define AR5K_EEPROM_ART_BUILD_NUM(_v) (((_v) >> 10) & 0x3f)
99#define AR5K_EEPROM_EAR_FILE_ID(_v) ((_v) & 0xff)
100
101#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8)
102#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff)
103#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3)
104#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3)
105
106#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9)
107#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1)
108#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1)
109#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1)
110#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1)
111#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf)
112#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1)
113#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf)
114
115#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10)
116#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x8)
117#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x8)
118#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1)
119#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1)
120#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1)
121#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 9) & 0x1)
122#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 10) & 0x1)
92 123
93/* calibration settings */ 124/* calibration settings */
94#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4) 125#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
95#define AR5K_EEPROM_MODES_11B(_v) AR5K_EEPROM_OFF(_v, 0x00d0, 0x00f2) 126#define AR5K_EEPROM_MODES_11B(_v) AR5K_EEPROM_OFF(_v, 0x00d0, 0x00f2)
96#define AR5K_EEPROM_MODES_11G(_v) AR5K_EEPROM_OFF(_v, 0x00da, 0x010d) 127#define AR5K_EEPROM_MODES_11G(_v) AR5K_EEPROM_OFF(_v, 0x00da, 0x010d)
97#define AR5K_EEPROM_CTL(_v) AR5K_EEPROM_OFF(_v, 0x00e4, 0x0128) /* Conformance test limits */ 128#define AR5K_EEPROM_CTL(_v) AR5K_EEPROM_OFF(_v, 0x00e4, 0x0128) /* Conformance test limits */
129#define AR5K_EEPROM_GROUPS_START(_v) AR5K_EEPROM_OFF(_v, 0x0100, 0x0150) /* Start of Groups */
130#define AR5K_EEPROM_GROUP1_OFFSET 0x0
131#define AR5K_EEPROM_GROUP2_OFFSET 0x5
132#define AR5K_EEPROM_GROUP3_OFFSET 0x37
133#define AR5K_EEPROM_GROUP4_OFFSET 0x46
134#define AR5K_EEPROM_GROUP5_OFFSET 0x55
135#define AR5K_EEPROM_GROUP6_OFFSET 0x65
136#define AR5K_EEPROM_GROUP7_OFFSET 0x69
137#define AR5K_EEPROM_GROUP8_OFFSET 0x6f
138
139#define AR5K_EEPROM_TARGET_PWR_OFF_11A(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \
140 AR5K_EEPROM_GROUP5_OFFSET, 0x0000)
141#define AR5K_EEPROM_TARGET_PWR_OFF_11B(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \
142 AR5K_EEPROM_GROUP6_OFFSET, 0x0010)
143#define AR5K_EEPROM_TARGET_PWR_OFF_11G(_v) AR5K_EEPROM_OFF(_v, AR5K_EEPROM_GROUPS_START(_v) + \
144 AR5K_EEPROM_GROUP7_OFFSET, 0x0014)
98 145
99/* [3.1 - 3.3] */ 146/* [3.1 - 3.3] */
100#define AR5K_EEPROM_OBDB0_2GHZ 0x00ec 147#define AR5K_EEPROM_OBDB0_2GHZ 0x00ec
101#define AR5K_EEPROM_OBDB1_2GHZ 0x00ed 148#define AR5K_EEPROM_OBDB1_2GHZ 0x00ed
102 149
103/* Misc values available since EEPROM 4.0 */ 150#define AR5K_EEPROM_PROTECT 0x003f /* EEPROM protect status */
104#define AR5K_EEPROM_MISC0 0x00c4 151#define AR5K_EEPROM_PROTECT_RD_0_31 0x0001 /* Read protection bit for offsets 0x0 - 0x1f */
105#define AR5K_EEPROM_EARSTART(_v) ((_v) & 0xfff) 152#define AR5K_EEPROM_PROTECT_WR_0_31 0x0002 /* Write protection bit for offsets 0x0 - 0x1f */
106#define AR5K_EEPROM_EEMAP(_v) (((_v) >> 14) & 0x3) 153#define AR5K_EEPROM_PROTECT_RD_32_63 0x0004 /* 0x20 - 0x3f */
107#define AR5K_EEPROM_MISC1 0x00c5 154#define AR5K_EEPROM_PROTECT_WR_32_63 0x0008
108#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff) 155#define AR5K_EEPROM_PROTECT_RD_64_127 0x0010 /* 0x40 - 0x7f */
109#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) 156#define AR5K_EEPROM_PROTECT_WR_64_127 0x0020
110 157#define AR5K_EEPROM_PROTECT_RD_128_191 0x0040 /* 0x80 - 0xbf (regdom) */
158#define AR5K_EEPROM_PROTECT_WR_128_191 0x0080
159#define AR5K_EEPROM_PROTECT_RD_192_207 0x0100 /* 0xc0 - 0xcf */
160#define AR5K_EEPROM_PROTECT_WR_192_207 0x0200
161#define AR5K_EEPROM_PROTECT_RD_208_223 0x0400 /* 0xd0 - 0xdf */
162#define AR5K_EEPROM_PROTECT_WR_208_223 0x0800
163#define AR5K_EEPROM_PROTECT_RD_224_239 0x1000 /* 0xe0 - 0xef */
164#define AR5K_EEPROM_PROTECT_WR_224_239 0x2000
165#define AR5K_EEPROM_PROTECT_RD_240_255 0x4000 /* 0xf0 - 0xff */
166#define AR5K_EEPROM_PROTECT_WR_240_255 0x8000
111 167
112/* Some EEPROM defines */ 168/* Some EEPROM defines */
113#define AR5K_EEPROM_EEP_SCALE 100 169#define AR5K_EEPROM_EEP_SCALE 100
@@ -115,8 +171,11 @@
115#define AR5K_EEPROM_N_MODES 3 171#define AR5K_EEPROM_N_MODES 3
116#define AR5K_EEPROM_N_5GHZ_CHAN 10 172#define AR5K_EEPROM_N_5GHZ_CHAN 10
117#define AR5K_EEPROM_N_2GHZ_CHAN 3 173#define AR5K_EEPROM_N_2GHZ_CHAN 3
174#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
118#define AR5K_EEPROM_MAX_CHAN 10 175#define AR5K_EEPROM_MAX_CHAN 10
176#define AR5K_EEPROM_N_PWR_POINTS_5111 11
119#define AR5K_EEPROM_N_PCDAC 11 177#define AR5K_EEPROM_N_PCDAC 11
178#define AR5K_EEPROM_N_PHASE_CAL 5
120#define AR5K_EEPROM_N_TEST_FREQ 8 179#define AR5K_EEPROM_N_TEST_FREQ 8
121#define AR5K_EEPROM_N_EDGES 8 180#define AR5K_EEPROM_N_EDGES 8
122#define AR5K_EEPROM_N_INTERCEPTS 11 181#define AR5K_EEPROM_N_INTERCEPTS 11
@@ -136,6 +195,8 @@
136#define AR5K_EEPROM_N_XPD_PER_CHANNEL 4 195#define AR5K_EEPROM_N_XPD_PER_CHANNEL 4
137#define AR5K_EEPROM_N_XPD0_POINTS 4 196#define AR5K_EEPROM_N_XPD0_POINTS 4
138#define AR5K_EEPROM_N_XPD3_POINTS 3 197#define AR5K_EEPROM_N_XPD3_POINTS 3
198#define AR5K_EEPROM_N_PD_GAINS 4
199#define AR5K_EEPROM_N_PD_POINTS 5
139#define AR5K_EEPROM_N_INTERCEPT_10_2GHZ 35 200#define AR5K_EEPROM_N_INTERCEPT_10_2GHZ 35
140#define AR5K_EEPROM_N_INTERCEPT_10_5GHZ 55 201#define AR5K_EEPROM_N_INTERCEPT_10_5GHZ 55
141#define AR5K_EEPROM_POWER_M 0x3f 202#define AR5K_EEPROM_POWER_M 0x3f
@@ -158,8 +219,99 @@
158#define AR5K_EEPROM_READ_HDR(_o, _v) \ 219#define AR5K_EEPROM_READ_HDR(_o, _v) \
159 AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v); \ 220 AR5K_EEPROM_READ(_o, ah->ah_capabilities.cap_eeprom._v); \
160 221
161/* Struct to hold EEPROM calibration data */ 222enum ath5k_ant_setting {
223 AR5K_ANT_VARIABLE = 0, /* variable by programming */
224 AR5K_ANT_FIXED_A = 1, /* fixed to 11a frequencies */
225 AR5K_ANT_FIXED_B = 2, /* fixed to 11b frequencies */
226 AR5K_ANT_MAX = 3,
227};
228
229enum ath5k_ctl_mode {
230 AR5K_CTL_11A = 0,
231 AR5K_CTL_11B = 1,
232 AR5K_CTL_11G = 2,
233 AR5K_CTL_TURBO = 3,
234 AR5K_CTL_108G = 4,
235 AR5K_CTL_2GHT20 = 5,
236 AR5K_CTL_5GHT20 = 6,
237 AR5K_CTL_2GHT40 = 7,
238 AR5K_CTL_5GHT40 = 8,
239 AR5K_CTL_MODE_M = 15,
240};
241
242/* Per channel calibration data, used for power table setup */
243struct ath5k_chan_pcal_info_rf5111 {
244 /* Power levels in half dbm units
245 * for one power curve. */
246 u8 pwr[AR5K_EEPROM_N_PWR_POINTS_5111];
247 /* PCDAC table steps
248 * for the above values */
249 u8 pcdac[AR5K_EEPROM_N_PWR_POINTS_5111];
250 /* Starting PCDAC step */
251 u8 pcdac_min;
252 /* Final PCDAC step */
253 u8 pcdac_max;
254};
255
256struct ath5k_chan_pcal_info_rf5112 {
257 /* Power levels in quarter dBm units
258 * for lower (0) and higher (3)
259 * level curves */
260 s8 pwr_x0[AR5K_EEPROM_N_XPD0_POINTS];
261 s8 pwr_x3[AR5K_EEPROM_N_XPD3_POINTS];
262 /* PCDAC table steps
263 * for the above values */
264 u8 pcdac_x0[AR5K_EEPROM_N_XPD0_POINTS];
265 u8 pcdac_x3[AR5K_EEPROM_N_XPD3_POINTS];
266};
267
268struct ath5k_chan_pcal_info_rf2413 {
269 /* Starting pwr/pddac values */
270 s8 pwr_i[AR5K_EEPROM_N_PD_GAINS];
271 u8 pddac_i[AR5K_EEPROM_N_PD_GAINS];
272 /* (pwr,pddac) points */
273 s8 pwr[AR5K_EEPROM_N_PD_GAINS]
274 [AR5K_EEPROM_N_PD_POINTS];
275 u8 pddac[AR5K_EEPROM_N_PD_GAINS]
276 [AR5K_EEPROM_N_PD_POINTS];
277};
278
279struct ath5k_chan_pcal_info {
280 /* Frequency */
281 u16 freq;
282 /* Max available power */
283 s8 max_pwr;
284 union {
285 struct ath5k_chan_pcal_info_rf5111 rf5111_info;
286 struct ath5k_chan_pcal_info_rf5112 rf5112_info;
287 struct ath5k_chan_pcal_info_rf2413 rf2413_info;
288 };
289};
290
291/* Per rate calibration data for each mode, used for power table setup */
292struct ath5k_rate_pcal_info {
293 u16 freq; /* Frequency */
294 /* Power level for 6-24Mbit/s rates */
295 u16 target_power_6to24;
296 /* Power level for 36Mbit rate */
297 u16 target_power_36;
298 /* Power level for 48Mbit rate */
299 u16 target_power_48;
300 /* Power level for 54Mbit rate */
301 u16 target_power_54;
302};
303
304/* Power edges for conformance test limits */
305struct ath5k_edge_power {
306 u16 freq;
307 u16 edge; /* in half dBm */
308 bool flag;
309};
310
311/* EEPROM calibration data */
162struct ath5k_eeprom_info { 312struct ath5k_eeprom_info {
313
314 /* Header information */
163 u16 ee_magic; 315 u16 ee_magic;
164 u16 ee_protect; 316 u16 ee_protect;
165 u16 ee_regdomain; 317 u16 ee_regdomain;
@@ -168,6 +320,11 @@ struct ath5k_eeprom_info {
168 u16 ee_ant_gain; 320 u16 ee_ant_gain;
169 u16 ee_misc0; 321 u16 ee_misc0;
170 u16 ee_misc1; 322 u16 ee_misc1;
323 u16 ee_misc2;
324 u16 ee_misc3;
325 u16 ee_misc4;
326 u16 ee_misc5;
327 u16 ee_misc6;
171 u16 ee_cck_ofdm_gain_delta; 328 u16 ee_cck_ofdm_gain_delta;
172 u16 ee_cck_ofdm_power_delta; 329 u16 ee_cck_ofdm_power_delta;
173 u16 ee_scaled_cck_delta; 330 u16 ee_scaled_cck_delta;
@@ -185,7 +342,7 @@ struct ath5k_eeprom_info {
185 u16 ee_turbo_max_power[AR5K_EEPROM_N_MODES]; 342 u16 ee_turbo_max_power[AR5K_EEPROM_N_MODES];
186 u16 ee_xr_power[AR5K_EEPROM_N_MODES]; 343 u16 ee_xr_power[AR5K_EEPROM_N_MODES];
187 u16 ee_switch_settling[AR5K_EEPROM_N_MODES]; 344 u16 ee_switch_settling[AR5K_EEPROM_N_MODES];
188 u16 ee_ant_tx_rx[AR5K_EEPROM_N_MODES]; 345 u16 ee_atn_tx_rx[AR5K_EEPROM_N_MODES];
189 u16 ee_ant_control[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PCDAC]; 346 u16 ee_ant_control[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_PCDAC];
190 u16 ee_ob[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB]; 347 u16 ee_ob[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
191 u16 ee_db[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB]; 348 u16 ee_db[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_OBDB];
@@ -198,18 +355,40 @@ struct ath5k_eeprom_info {
198 u16 ee_x_gain[AR5K_EEPROM_N_MODES]; 355 u16 ee_x_gain[AR5K_EEPROM_N_MODES];
199 u16 ee_i_gain[AR5K_EEPROM_N_MODES]; 356 u16 ee_i_gain[AR5K_EEPROM_N_MODES];
200 u16 ee_margin_tx_rx[AR5K_EEPROM_N_MODES]; 357 u16 ee_margin_tx_rx[AR5K_EEPROM_N_MODES];
358 u16 ee_switch_settling_turbo[AR5K_EEPROM_N_MODES];
359 u16 ee_margin_tx_rx_turbo[AR5K_EEPROM_N_MODES];
360 u16 ee_atn_tx_rx_turbo[AR5K_EEPROM_N_MODES];
201 361
202 /* Unused */ 362 /* Power calibration data */
203 u16 ee_false_detect[AR5K_EEPROM_N_MODES]; 363 u16 ee_false_detect[AR5K_EEPROM_N_MODES];
204 u16 ee_cal_pier[AR5K_EEPROM_N_MODES][AR5K_EEPROM_N_2GHZ_CHAN]; 364
205 u16 ee_channel[AR5K_EEPROM_N_MODES][AR5K_EEPROM_MAX_CHAN]; /*empty*/ 365 /* Number of pd gain curves per mode (RF2413) */
366 u8 ee_pd_gains[AR5K_EEPROM_N_MODES];
367
368 u8 ee_n_piers[AR5K_EEPROM_N_MODES];
369 struct ath5k_chan_pcal_info ee_pwr_cal_a[AR5K_EEPROM_N_5GHZ_CHAN];
370 struct ath5k_chan_pcal_info ee_pwr_cal_b[AR5K_EEPROM_N_2GHZ_CHAN];
371 struct ath5k_chan_pcal_info ee_pwr_cal_g[AR5K_EEPROM_N_2GHZ_CHAN];
372
373 /* Per rate target power levels */
374 u16 ee_rate_target_pwr_num[AR5K_EEPROM_N_MODES];
375 struct ath5k_rate_pcal_info ee_rate_tpwr_a[AR5K_EEPROM_N_5GHZ_CHAN];
376 struct ath5k_rate_pcal_info ee_rate_tpwr_b[AR5K_EEPROM_N_2GHZ_CHAN];
377 struct ath5k_rate_pcal_info ee_rate_tpwr_g[AR5K_EEPROM_N_2GHZ_CHAN];
206 378
207 /* Conformance test limits (Unused) */ 379 /* Conformance test limits (Unused) */
208 u16 ee_ctls; 380 u16 ee_ctls;
209 u16 ee_ctl[AR5K_EEPROM_MAX_CTLS]; 381 u16 ee_ctl[AR5K_EEPROM_MAX_CTLS];
382 struct ath5k_edge_power ee_ctl_pwr[AR5K_EEPROM_N_EDGES * AR5K_EEPROM_MAX_CTLS];
210 383
211 /* Noise Floor Calibration settings */ 384 /* Noise Floor Calibration settings */
212 s16 ee_noise_floor_thr[AR5K_EEPROM_N_MODES]; 385 s16 ee_noise_floor_thr[AR5K_EEPROM_N_MODES];
213 s8 ee_adc_desired_size[AR5K_EEPROM_N_MODES]; 386 s8 ee_adc_desired_size[AR5K_EEPROM_N_MODES];
214 s8 ee_pga_desired_size[AR5K_EEPROM_N_MODES]; 387 s8 ee_pga_desired_size[AR5K_EEPROM_N_MODES];
388 s8 ee_adc_desired_size_turbo[AR5K_EEPROM_N_MODES];
389 s8 ee_pga_desired_size_turbo[AR5K_EEPROM_N_MODES];
390 s8 ee_pd_gain_overlap;
391
392 u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
215}; 393};
394
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index ceaa6c475c06..450bd6e945ff 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -1681,7 +1681,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1681 */ 1681 */
1682 1682
1683 /* For AR5212 and combatible */ 1683 /* For AR5212 and combatible */
1684 if (ah->ah_version == AR5K_AR5212){ 1684 if (ah->ah_version == AR5K_AR5212) {
1685 1685
1686 /* First set of mode-specific settings */ 1686 /* First set of mode-specific settings */
1687 ath5k_hw_ini_mode_registers(ah, 1687 ath5k_hw_ini_mode_registers(ah,
@@ -1695,7 +1695,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1695 ar5212_ini, change_channel); 1695 ar5212_ini, change_channel);
1696 1696
1697 /* Second set of mode-specific settings */ 1697 /* Second set of mode-specific settings */
1698 if (ah->ah_radio == AR5K_RF5111){ 1698 if (ah->ah_radio == AR5K_RF5111) {
1699 1699
1700 ath5k_hw_ini_mode_registers(ah, 1700 ath5k_hw_ini_mode_registers(ah,
1701 ARRAY_SIZE(ar5212_rf5111_ini_mode_end), 1701 ARRAY_SIZE(ar5212_rf5111_ini_mode_end),
@@ -1706,7 +1706,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1706 ARRAY_SIZE(rf5111_ini_bbgain), 1706 ARRAY_SIZE(rf5111_ini_bbgain),
1707 rf5111_ini_bbgain, change_channel); 1707 rf5111_ini_bbgain, change_channel);
1708 1708
1709 } else if (ah->ah_radio == AR5K_RF5112){ 1709 } else if (ah->ah_radio == AR5K_RF5112) {
1710 1710
1711 ath5k_hw_ini_mode_registers(ah, 1711 ath5k_hw_ini_mode_registers(ah,
1712 ARRAY_SIZE(ar5212_rf5112_ini_mode_end), 1712 ARRAY_SIZE(ar5212_rf5112_ini_mode_end),
@@ -1716,7 +1716,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1716 ARRAY_SIZE(rf5112_ini_bbgain), 1716 ARRAY_SIZE(rf5112_ini_bbgain),
1717 rf5112_ini_bbgain, change_channel); 1717 rf5112_ini_bbgain, change_channel);
1718 1718
1719 } else if (ah->ah_radio == AR5K_RF5413){ 1719 } else if (ah->ah_radio == AR5K_RF5413) {
1720 1720
1721 ath5k_hw_ini_mode_registers(ah, 1721 ath5k_hw_ini_mode_registers(ah,
1722 ARRAY_SIZE(rf5413_ini_mode_end), 1722 ARRAY_SIZE(rf5413_ini_mode_end),
diff --git a/drivers/net/wireless/ath5k/pcu.c b/drivers/net/wireless/ath5k/pcu.c
index a47df9a24aa1..0cac05c6a9ce 100644
--- a/drivers/net/wireless/ath5k/pcu.c
+++ b/drivers/net/wireless/ath5k/pcu.c
@@ -46,34 +46,45 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
46{ 46{
47 u32 pcu_reg, beacon_reg, low_id, high_id; 47 u32 pcu_reg, beacon_reg, low_id, high_id;
48 48
49 pcu_reg = 0; 49
50 /* Preserve rest settings */
51 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
52 pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP
53 | AR5K_STA_ID1_KEYSRCH_MODE
54 | (ah->ah_version == AR5K_AR5210 ?
55 (AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0));
56
50 beacon_reg = 0; 57 beacon_reg = 0;
51 58
52 ATH5K_TRACE(ah->ah_sc); 59 ATH5K_TRACE(ah->ah_sc);
53 60
54 switch (ah->ah_op_mode) { 61 switch (ah->ah_op_mode) {
55 case NL80211_IFTYPE_ADHOC: 62 case NL80211_IFTYPE_ADHOC:
56 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_DESC_ANTENNA | 63 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
57 (ah->ah_version == AR5K_AR5210 ?
58 AR5K_STA_ID1_NO_PSPOLL : 0);
59 beacon_reg |= AR5K_BCR_ADHOC; 64 beacon_reg |= AR5K_BCR_ADHOC;
65 if (ah->ah_version == AR5K_AR5210)
66 pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
67 else
68 AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_ADHOC);
60 break; 69 break;
61 70
62 case NL80211_IFTYPE_AP: 71 case NL80211_IFTYPE_AP:
63 case NL80211_IFTYPE_MESH_POINT: 72 case NL80211_IFTYPE_MESH_POINT:
64 pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_RTS_DEF_ANTENNA | 73 pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE;
65 (ah->ah_version == AR5K_AR5210 ?
66 AR5K_STA_ID1_NO_PSPOLL : 0);
67 beacon_reg |= AR5K_BCR_AP; 74 beacon_reg |= AR5K_BCR_AP;
75 if (ah->ah_version == AR5K_AR5210)
76 pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
77 else
78 AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_ADHOC);
68 break; 79 break;
69 80
70 case NL80211_IFTYPE_STATION: 81 case NL80211_IFTYPE_STATION:
71 pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA | 82 pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
72 (ah->ah_version == AR5K_AR5210 ? 83 | (ah->ah_version == AR5K_AR5210 ?
73 AR5K_STA_ID1_PWR_SV : 0); 84 AR5K_STA_ID1_PWR_SV : 0);
74 case NL80211_IFTYPE_MONITOR: 85 case NL80211_IFTYPE_MONITOR:
75 pcu_reg |= AR5K_STA_ID1_DEFAULT_ANTENNA | 86 pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
76 (ah->ah_version == AR5K_AR5210 ? 87 | (ah->ah_version == AR5K_AR5210 ?
77 AR5K_STA_ID1_NO_PSPOLL : 0); 88 AR5K_STA_ID1_NO_PSPOLL : 0);
78 break; 89 break;
79 90
@@ -130,6 +141,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
130 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR); 141 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
131 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE); 142 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
132 } 143 }
144
145 /* TODO: Handle ANI stats */
133} 146}
134 147
135/** 148/**
@@ -258,16 +271,19 @@ void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
258int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) 271int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
259{ 272{
260 u32 low_id, high_id; 273 u32 low_id, high_id;
274 u32 pcu_reg;
261 275
262 ATH5K_TRACE(ah->ah_sc); 276 ATH5K_TRACE(ah->ah_sc);
263 /* Set new station ID */ 277 /* Set new station ID */
264 memcpy(ah->ah_sta_id, mac, ETH_ALEN); 278 memcpy(ah->ah_sta_id, mac, ETH_ALEN);
265 279
280 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
281
266 low_id = AR5K_LOW_ID(mac); 282 low_id = AR5K_LOW_ID(mac);
267 high_id = AR5K_HIGH_ID(mac); 283 high_id = AR5K_HIGH_ID(mac);
268 284
269 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0); 285 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
270 ath5k_hw_reg_write(ah, high_id, AR5K_STA_ID1); 286 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
271 287
272 return 0; 288 return 0;
273} 289}
@@ -290,8 +306,10 @@ void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id)
290 * Set simple BSSID mask on 5212 306 * Set simple BSSID mask on 5212
291 */ 307 */
292 if (ah->ah_version == AR5K_AR5212) { 308 if (ah->ah_version == AR5K_AR5212) {
293 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM0); 309 ath5k_hw_reg_write(ah, AR5K_LOW_ID(ah->ah_bssid_mask),
294 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM1); 310 AR5K_BSS_IDM0);
311 ath5k_hw_reg_write(ah, AR5K_HIGH_ID(ah->ah_bssid_mask),
312 AR5K_BSS_IDM1);
295 } 313 }
296 314
297 /* 315 /*
@@ -415,6 +433,9 @@ int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
415 u32 low_id, high_id; 433 u32 low_id, high_id;
416 ATH5K_TRACE(ah->ah_sc); 434 ATH5K_TRACE(ah->ah_sc);
417 435
436 /* Cache bssid mask so that we can restore it
437 * on reset */
438 memcpy(ah->ah_bssid_mask, mask, ETH_ALEN);
418 if (ah->ah_version == AR5K_AR5212) { 439 if (ah->ah_version == AR5K_AR5212) {
419 low_id = AR5K_LOW_ID(mask); 440 low_id = AR5K_LOW_ID(mask);
420 high_id = AR5K_HIGH_ID(mask); 441 high_id = AR5K_HIGH_ID(mask);
@@ -576,7 +597,7 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
576 filter |= AR5K_RX_FILTER_PROM; 597 filter |= AR5K_RX_FILTER_PROM;
577 } 598 }
578 599
579 /*Zero length DMA*/ 600 /*Zero length DMA (phy error reporting) */
580 if (data) 601 if (data)
581 AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA); 602 AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
582 else 603 else
@@ -661,7 +682,12 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
661 * Set the additional timers by mode 682 * Set the additional timers by mode
662 */ 683 */
663 switch (ah->ah_op_mode) { 684 switch (ah->ah_op_mode) {
685 case NL80211_IFTYPE_MONITOR:
664 case NL80211_IFTYPE_STATION: 686 case NL80211_IFTYPE_STATION:
687 /* In STA mode timer1 is used as next wakeup
688 * timer and timer2 as next CFP duration start
689 * timer. Both in 1/8TUs. */
690 /* TODO: PCF handling */
665 if (ah->ah_version == AR5K_AR5210) { 691 if (ah->ah_version == AR5K_AR5210) {
666 timer1 = 0xffffffff; 692 timer1 = 0xffffffff;
667 timer2 = 0xffffffff; 693 timer2 = 0xffffffff;
@@ -669,27 +695,60 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
669 timer1 = 0x0000ffff; 695 timer1 = 0x0000ffff;
670 timer2 = 0x0007ffff; 696 timer2 = 0x0007ffff;
671 } 697 }
698 /* Mark associated AP as PCF incapable for now */
699 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PCF);
672 break; 700 break;
673 701 case NL80211_IFTYPE_ADHOC:
702 AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM);
674 default: 703 default:
704 /* On non-STA modes timer1 is used as next DMA
705 * beacon alert (DBA) timer and timer2 as next
706 * software beacon alert. Both in 1/8TUs. */
675 timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3; 707 timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
676 timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3; 708 timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
709 break;
677 } 710 }
678 711
712 /* Timer3 marks the end of our ATIM window
713 * a zero length window is not allowed because
714 * we 'll get no beacons */
679 timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1); 715 timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1);
680 716
681 /* 717 /*
682 * Set the beacon register and enable all timers. 718 * Set the beacon register and enable all timers.
683 * (next beacon, DMA beacon, software beacon, ATIM window time)
684 */ 719 */
685 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0); 720 /* When in AP mode zero timer0 to start TSF */
721 if (ah->ah_op_mode == NL80211_IFTYPE_AP)
722 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
723 else
724 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
686 ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1); 725 ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
687 ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2); 726 ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
688 ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3); 727 ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
689 728
729 /* Force a TSF reset if requested and enable beacons */
730 if (interval & AR5K_BEACON_RESET_TSF)
731 ath5k_hw_reset_tsf(ah);
732
690 ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD | 733 ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
691 AR5K_BEACON_RESET_TSF | AR5K_BEACON_ENABLE), 734 AR5K_BEACON_ENABLE),
692 AR5K_BEACON); 735 AR5K_BEACON);
736
737 /* Flush any pending BMISS interrupts on ISR by
738 * performing a clear-on-write operation on PISR
739 * register for the BMISS bit (writing a bit on
740 * ISR togles a reset for that bit and leaves
741 * the rest bits intact) */
742 if (ah->ah_version == AR5K_AR5210)
743 ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_ISR);
744 else
745 ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_PISR);
746
747 /* TODO: Set enchanced sleep registers on AR5212
748 * based on vif->bss_conf params, until then
749 * disable power save reporting.*/
750 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PWR_SV);
751
693} 752}
694 753
695#if 0 754#if 0
@@ -899,14 +958,26 @@ int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
899 */ 958 */
900int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry) 959int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
901{ 960{
902 unsigned int i; 961 unsigned int i, type;
962 u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
903 963
904 ATH5K_TRACE(ah->ah_sc); 964 ATH5K_TRACE(ah->ah_sc);
905 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); 965 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
906 966
967 type = ath5k_hw_reg_read(ah, AR5K_KEYTABLE_TYPE(entry));
968
907 for (i = 0; i < AR5K_KEYCACHE_SIZE; i++) 969 for (i = 0; i < AR5K_KEYCACHE_SIZE; i++)
908 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i)); 970 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i));
909 971
972 /* Reset associated MIC entry if TKIP
973 * is enabled located at offset (entry + 64) */
974 if (type == AR5K_KEYTABLE_TYPE_TKIP) {
975 AR5K_ASSERT_ENTRY(micentry, AR5K_KEYTABLE_SIZE);
976 for (i = 0; i < AR5K_KEYCACHE_SIZE / 2 ; i++)
977 ath5k_hw_reg_write(ah, 0,
978 AR5K_KEYTABLE_OFF(micentry, i));
979 }
980
910 /* 981 /*
911 * Set NULL encryption on AR5212+ 982 * Set NULL encryption on AR5212+
912 * 983 *
@@ -916,10 +987,16 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
916 * Note2: Windows driver (ndiswrapper) sets this to 987 * Note2: Windows driver (ndiswrapper) sets this to
917 * 0x00000714 instead of 0x00000007 988 * 0x00000714 instead of 0x00000007
918 */ 989 */
919 if (ah->ah_version > AR5K_AR5211) 990 if (ah->ah_version > AR5K_AR5211) {
920 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL, 991 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
921 AR5K_KEYTABLE_TYPE(entry)); 992 AR5K_KEYTABLE_TYPE(entry));
922 993
994 if (type == AR5K_KEYTABLE_TYPE_TKIP) {
995 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
996 AR5K_KEYTABLE_TYPE(micentry));
997 }
998 }
999
923 return 0; 1000 return 0;
924} 1001}
925 1002
@@ -936,6 +1013,23 @@ int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
936 AR5K_KEYTABLE_VALID; 1013 AR5K_KEYTABLE_VALID;
937} 1014}
938 1015
1016static
1017int ath5k_keycache_type(const struct ieee80211_key_conf *key)
1018{
1019 switch (key->alg) {
1020 case ALG_TKIP:
1021 return AR5K_KEYTABLE_TYPE_TKIP;
1022 case ALG_CCMP:
1023 return AR5K_KEYTABLE_TYPE_CCM;
1024 case ALG_WEP:
1025 if (key->keylen == LEN_WEP40)
1026 return AR5K_KEYTABLE_TYPE_40;
1027 else if (key->keylen == LEN_WEP104)
1028 return AR5K_KEYTABLE_TYPE_104;
1029 }
1030 return -EINVAL;
1031}
1032
939/* 1033/*
940 * Set a key entry on the table 1034 * Set a key entry on the table
941 */ 1035 */
@@ -943,40 +1037,53 @@ int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
943 const struct ieee80211_key_conf *key, const u8 *mac) 1037 const struct ieee80211_key_conf *key, const u8 *mac)
944{ 1038{
945 unsigned int i; 1039 unsigned int i;
1040 int keylen;
946 __le32 key_v[5] = {}; 1041 __le32 key_v[5] = {};
1042 __le32 key0 = 0, key1 = 0;
1043 __le32 *rxmic, *txmic;
947 u32 keytype; 1044 u32 keytype;
1045 u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET;
1046 bool is_tkip;
1047 const u8 *key_ptr;
948 1048
949 ATH5K_TRACE(ah->ah_sc); 1049 ATH5K_TRACE(ah->ah_sc);
950 1050
951 /* key->keylen comes in from mac80211 in bytes */ 1051 is_tkip = (key->alg == ALG_TKIP);
952 1052
953 if (key->keylen > AR5K_KEYTABLE_SIZE / 8) 1053 /*
1054 * key->keylen comes in from mac80211 in bytes.
1055 * TKIP is 128 bit + 128 bit mic
1056 */
1057 keylen = (is_tkip) ? (128 / 8) : key->keylen;
1058
1059 if (entry > AR5K_KEYTABLE_SIZE ||
1060 (is_tkip && micentry > AR5K_KEYTABLE_SIZE))
954 return -EOPNOTSUPP; 1061 return -EOPNOTSUPP;
955 1062
956 switch (key->keylen) { 1063 if (unlikely(keylen > 16))
957 /* WEP 40-bit = 40-bit entered key + 24 bit IV = 64-bit */ 1064 return -EOPNOTSUPP;
958 case 40 / 8:
959 memcpy(&key_v[0], key->key, 5);
960 keytype = AR5K_KEYTABLE_TYPE_40;
961 break;
962 1065
963 /* WEP 104-bit = 104-bit entered key + 24-bit IV = 128-bit */ 1066 keytype = ath5k_keycache_type(key);
964 case 104 / 8: 1067 if (keytype < 0)
965 memcpy(&key_v[0], &key->key[0], 6); 1068 return keytype;
966 memcpy(&key_v[2], &key->key[6], 6);
967 memcpy(&key_v[4], &key->key[12], 1);
968 keytype = AR5K_KEYTABLE_TYPE_104;
969 break;
970 /* WEP 128-bit = 128-bit entered key + 24 bit IV = 152-bit */
971 case 128 / 8:
972 memcpy(&key_v[0], &key->key[0], 6);
973 memcpy(&key_v[2], &key->key[6], 6);
974 memcpy(&key_v[4], &key->key[12], 4);
975 keytype = AR5K_KEYTABLE_TYPE_128;
976 break;
977 1069
978 default: 1070 /*
979 return -EINVAL; /* shouldn't happen */ 1071 * each key block is 6 bytes wide, written as pairs of
1072 * alternating 32 and 16 bit le values.
1073 */
1074 key_ptr = key->key;
1075 for (i = 0; keylen >= 6; keylen -= 6) {
1076 memcpy(&key_v[i], key_ptr, 6);
1077 i += 2;
1078 key_ptr += 6;
1079 }
1080 if (keylen)
1081 memcpy(&key_v[i], key_ptr, keylen);
1082
1083 /* intentionally corrupt key until mic is installed */
1084 if (is_tkip) {
1085 key0 = key_v[0] = ~key_v[0];
1086 key1 = key_v[1] = ~key_v[1];
980 } 1087 }
981 1088
982 for (i = 0; i < ARRAY_SIZE(key_v); i++) 1089 for (i = 0; i < ARRAY_SIZE(key_v); i++)
@@ -985,6 +1092,40 @@ int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
985 1092
986 ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry)); 1093 ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry));
987 1094
1095 if (is_tkip) {
1096 /* Install rx/tx MIC */
1097 rxmic = (__le32 *) &key->key[16];
1098 txmic = (__le32 *) &key->key[24];
1099
1100 if (ah->ah_combined_mic) {
1101 key_v[0] = rxmic[0];
1102 key_v[1] = cpu_to_le32(le32_to_cpu(txmic[0]) >> 16);
1103 key_v[2] = rxmic[1];
1104 key_v[3] = cpu_to_le32(le32_to_cpu(txmic[0]) & 0xffff);
1105 key_v[4] = txmic[1];
1106 } else {
1107 key_v[0] = rxmic[0];
1108 key_v[1] = 0;
1109 key_v[2] = rxmic[1];
1110 key_v[3] = 0;
1111 key_v[4] = 0;
1112 }
1113 for (i = 0; i < ARRAY_SIZE(key_v); i++)
1114 ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]),
1115 AR5K_KEYTABLE_OFF(micentry, i));
1116
1117 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
1118 AR5K_KEYTABLE_TYPE(micentry));
1119 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC0(micentry));
1120 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC1(micentry));
1121
1122 /* restore first 2 words of key */
1123 ath5k_hw_reg_write(ah, le32_to_cpu(~key0),
1124 AR5K_KEYTABLE_OFF(entry, 0));
1125 ath5k_hw_reg_write(ah, le32_to_cpu(~key1),
1126 AR5K_KEYTABLE_OFF(entry, 1));
1127 }
1128
988 return ath5k_hw_set_key_lladdr(ah, entry, mac); 1129 return ath5k_hw_set_key_lladdr(ah, entry, mac);
989} 1130}
990 1131
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index e43f6563e61a..7ba18e09463b 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -1412,7 +1412,8 @@ static int ath5k_hw_rf5112_rfregs(struct ath5k_hw *ah,
1412 rf_ini = rfregs_2112a; 1412 rf_ini = rfregs_2112a;
1413 rf_size = ARRAY_SIZE(rfregs_5112a); 1413 rf_size = ARRAY_SIZE(rfregs_5112a);
1414 if (mode < 2) { 1414 if (mode < 2) {
1415 ATH5K_ERR(ah->ah_sc,"invalid channel mode: %i\n",mode); 1415 ATH5K_ERR(ah->ah_sc, "invalid channel mode: %i\n",
1416 mode);
1416 return -EINVAL; 1417 return -EINVAL;
1417 } 1418 }
1418 mode = mode - 2; /*no a/turboa modes for 2112*/ 1419 mode = mode - 2; /*no a/turboa modes for 2112*/
@@ -1708,7 +1709,7 @@ enum ath5k_rfgain ath5k_hw_get_rf_gain(struct ath5k_hw *ah)
1708 if (ah->ah_radio >= AR5K_RF5112) { 1709 if (ah->ah_radio >= AR5K_RF5112) {
1709 ath5k_hw_rfregs_gainf_corr(ah); 1710 ath5k_hw_rfregs_gainf_corr(ah);
1710 ah->ah_gain.g_current = 1711 ah->ah_gain.g_current =
1711 ah->ah_gain.g_current>=ah->ah_gain.g_f_corr ? 1712 ah->ah_gain.g_current >= ah->ah_gain.g_f_corr ?
1712 (ah->ah_gain.g_current-ah->ah_gain.g_f_corr) : 1713 (ah->ah_gain.g_current-ah->ah_gain.g_f_corr) :
1713 0; 1714 0;
1714 } 1715 }
@@ -2195,9 +2196,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
2195 return ret; 2196 return ret;
2196 } 2197 }
2197 2198
2198 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 2199 ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
2199 if (ret)
2200 return ret;
2201 2200
2202 /* 2201 /*
2203 * Re-enable RX/TX and beacons 2202 * Re-enable RX/TX and beacons
diff --git a/drivers/net/wireless/ath5k/qcu.c b/drivers/net/wireless/ath5k/qcu.c
index 01bf09176d23..1b7bc50ea8eb 100644
--- a/drivers/net/wireless/ath5k/qcu.c
+++ b/drivers/net/wireless/ath5k/qcu.c
@@ -432,13 +432,30 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
432 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE) 432 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
433 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue); 433 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
434 434
435 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
436 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
437
438 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
439 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
440
441 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
442 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
443
444 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
445 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
435 446
436 /* Update secondary interrupt mask registers */ 447 /* Update secondary interrupt mask registers */
448
449 /* Filter out inactive queues */
437 ah->ah_txq_imr_txok &= ah->ah_txq_status; 450 ah->ah_txq_imr_txok &= ah->ah_txq_status;
438 ah->ah_txq_imr_txerr &= ah->ah_txq_status; 451 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
439 ah->ah_txq_imr_txurn &= ah->ah_txq_status; 452 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
440 ah->ah_txq_imr_txdesc &= ah->ah_txq_status; 453 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
441 ah->ah_txq_imr_txeol &= ah->ah_txq_status; 454 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
455 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
456 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
457 ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
458 ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
442 459
443 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok, 460 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
444 AR5K_SIMR0_QCU_TXOK) | 461 AR5K_SIMR0_QCU_TXOK) |
@@ -448,8 +465,24 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
448 AR5K_SIMR1_QCU_TXERR) | 465 AR5K_SIMR1_QCU_TXERR) |
449 AR5K_REG_SM(ah->ah_txq_imr_txeol, 466 AR5K_REG_SM(ah->ah_txq_imr_txeol,
450 AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1); 467 AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
451 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txurn, 468 /* Update simr2 but don't overwrite rest simr2 settings */
452 AR5K_SIMR2_QCU_TXURN), AR5K_SIMR2); 469 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
470 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
471 AR5K_REG_SM(ah->ah_txq_imr_txurn,
472 AR5K_SIMR2_QCU_TXURN));
473 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
474 AR5K_SIMR3_QCBRORN) |
475 AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
476 AR5K_SIMR3_QCBRURN), AR5K_SIMR3);
477 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
478 AR5K_SIMR4_QTRIG), AR5K_SIMR4);
479 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
480 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
481 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
482 /* No queue has TXNOFRM enabled, disable the interrupt
483 * by setting AR5K_TXNOFRM to zero */
484 if (ah->ah_txq_imr_nofrm == 0)
485 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
453 } 486 }
454 487
455 return 0; 488 return 0;
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index e557fe178bbf..91aaeaf88199 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -234,6 +234,7 @@
234#define AR5K_TXNOFRM 0x004c 234#define AR5K_TXNOFRM 0x004c
235#define AR5K_TXNOFRM_M 0x000003ff 235#define AR5K_TXNOFRM_M 0x000003ff
236#define AR5K_TXNOFRM_QCU 0x000ffc00 236#define AR5K_TXNOFRM_QCU 0x000ffc00
237#define AR5K_TXNOFRM_QCU_S 10
237 238
238/* 239/*
239 * Receive frame gap timeout register 240 * Receive frame gap timeout register
@@ -350,7 +351,7 @@
350 351
351#define AR5K_SISR3 0x0090 /* Register Address [5211+] */ 352#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
352#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */ 353#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */
353#define AR5K_SISR3_QCBORN_S 0 354#define AR5K_SISR3_QCBRORN_S 0
354#define AR5K_SISR3_QCBRURN 0x03ff0000 /* Mask for QCBRURN */ 355#define AR5K_SISR3_QCBRURN 0x03ff0000 /* Mask for QCBRURN */
355#define AR5K_SISR3_QCBRURN_S 16 356#define AR5K_SISR3_QCBRURN_S 16
356 357
@@ -1113,14 +1114,16 @@
1113#define AR5K_PCU_MAX 0x8fff 1114#define AR5K_PCU_MAX 0x8fff
1114 1115
1115/* 1116/*
1116 * First station id register (MAC address in lower 32 bits) 1117 * First station id register (Lower 32 bits of MAC address)
1117 */ 1118 */
1118#define AR5K_STA_ID0 0x8000 1119#define AR5K_STA_ID0 0x8000
1120#define AR5K_STA_ID0_ARRD_L32 0xffffffff
1119 1121
1120/* 1122/*
1121 * Second station id register (MAC address in upper 16 bits) 1123 * Second station id register (Upper 16 bits of MAC address + PCU settings)
1122 */ 1124 */
1123#define AR5K_STA_ID1 0x8004 /* Register Address */ 1125#define AR5K_STA_ID1 0x8004 /* Register Address */
1126#define AR5K_STA_ID1_ADDR_U16 0x0000ffff /* Upper 16 bits of MAC addres */
1124#define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */ 1127#define AR5K_STA_ID1_AP 0x00010000 /* Set AP mode */
1125#define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */ 1128#define AR5K_STA_ID1_ADHOC 0x00020000 /* Set Ad-Hoc mode */
1126#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */ 1129#define AR5K_STA_ID1_PWR_SV 0x00040000 /* Power save reporting */
@@ -1726,6 +1729,7 @@
1726#define AR5K_MISC_MODE 0x8120 /* Register Address */ 1729#define AR5K_MISC_MODE 0x8120 /* Register Address */
1727#define AR5K_MISC_MODE_FBSSID_MATCH 0x00000001 /* Force BSSID match */ 1730#define AR5K_MISC_MODE_FBSSID_MATCH 0x00000001 /* Force BSSID match */
1728#define AR5K_MISC_MODE_ACKSIFS_MEM 0x00000002 /* ACK SIFS memory (?) */ 1731#define AR5K_MISC_MODE_ACKSIFS_MEM 0x00000002 /* ACK SIFS memory (?) */
1732#define AR5K_MISC_MODE_COMBINED_MIC 0x00000004 /* use rx/tx MIC key */
1729/* more bits */ 1733/* more bits */
1730 1734
1731/* 1735/*
@@ -1810,6 +1814,10 @@
1810#define AR5K_KEYTABLE_MAC1(_n) AR5K_KEYTABLE_OFF(_n, 7) 1814#define AR5K_KEYTABLE_MAC1(_n) AR5K_KEYTABLE_OFF(_n, 7)
1811#define AR5K_KEYTABLE_VALID 0x00008000 1815#define AR5K_KEYTABLE_VALID 0x00008000
1812 1816
1817/* If key type is TKIP and MIC is enabled
1818 * MIC key goes in offset entry + 64 */
1819#define AR5K_KEYTABLE_MIC_OFFSET 64
1820
1813/* WEP 40-bit = 40-bit entered key + 24 bit IV = 64-bit 1821/* WEP 40-bit = 40-bit entered key + 24 bit IV = 64-bit
1814 * WEP 104-bit = 104-bit entered key + 24-bit IV = 128-bit 1822 * WEP 104-bit = 104-bit entered key + 24-bit IV = 128-bit
1815 * WEP 128-bit = 128-bit entered key + 24 bit IV = 152-bit 1823 * WEP 128-bit = 128-bit entered key + 24 bit IV = 152-bit
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
index 1b6d45b6772d..dc2d7d8bdb7a 100644
--- a/drivers/net/wireless/ath5k/reset.c
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -674,7 +674,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
674 (ee->ee_switch_settling[ee_mode] << 7) & 0x3f80, 674 (ee->ee_switch_settling[ee_mode] << 7) & 0x3f80,
675 0xffffc07f); 675 0xffffc07f);
676 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN, 676 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_GAIN,
677 (ee->ee_ant_tx_rx[ee_mode] << 12) & 0x3f000, 677 (ee->ee_atn_tx_rx[ee_mode] << 12) & 0x3f000,
678 0xfffc0fff); 678 0xfffc0fff);
679 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE, 679 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_DESIRED_SIZE,
680 (ee->ee_adc_desired_size[ee_mode] & 0x00ff) | 680 (ee->ee_adc_desired_size[ee_mode] & 0x00ff) |
@@ -842,9 +842,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
842 * 842 *
843 * XXX: Find an interval that's OK for all cards... 843 * XXX: Find an interval that's OK for all cards...
844 */ 844 */
845 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 845 ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
846 if (ret)
847 return ret;
848 846
849 /* 847 /*
850 * Reset queues and start beacon timers at the end of the reset routine 848 * Reset queues and start beacon timers at the end of the reset routine
@@ -864,8 +862,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
864 862
865 /* Pre-enable interrupts on 5211/5212*/ 863 /* Pre-enable interrupts on 5211/5212*/
866 if (ah->ah_version != AR5K_AR5210) 864 if (ah->ah_version != AR5K_AR5210)
867 ath5k_hw_set_imr(ah, AR5K_INT_RX | AR5K_INT_TX | 865 ath5k_hw_set_imr(ah, ah->ah_imr);
868 AR5K_INT_FATAL);
869 866
870 /* 867 /*
871 * Set RF kill flags if supported by the device (read from the EEPROM) 868 * Set RF kill flags if supported by the device (read from the EEPROM)
diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath9k/Kconfig
index 80a692430413..c43bd321f97f 100644
--- a/drivers/net/wireless/ath9k/Kconfig
+++ b/drivers/net/wireless/ath9k/Kconfig
@@ -9,3 +9,14 @@ config ATH9K
9 Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets. 9 Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets.
10 10
11 If you choose to build a module, it'll be called ath9k. 11 If you choose to build a module, it'll be called ath9k.
12
13config ATH9K_DEBUG
14 bool "Atheros ath9k debugging"
15 depends on ATH9K
16 ---help---
17 Say Y, if you need ath9k to display debug messages.
18 Pass the debug mask as a module parameter:
19
20 modprobe ath9k debug=0x00002000
21
22 Look in ath9k/core.h for possible debug masks
diff --git a/drivers/net/wireless/ath9k/Makefile b/drivers/net/wireless/ath9k/Makefile
index a6411517e5f8..1209d14613ac 100644
--- a/drivers/net/wireless/ath9k/Makefile
+++ b/drivers/net/wireless/ath9k/Makefile
@@ -1,11 +1,16 @@
1ath9k-y += hw.o \ 1ath9k-y += hw.o \
2 eeprom.o \
3 mac.o \
4 calib.o \
5 ani.o \
2 phy.o \ 6 phy.o \
3 regd.o \ 7 regd.o \
4 beacon.o \ 8 beacon.o \
5 main.o \ 9 main.o \
6 recv.o \ 10 recv.o \
7 xmit.o \ 11 xmit.o \
8 rc.o \ 12 rc.o
9 core.o 13
14ath9k-$(CONFIG_ATH9K_DEBUG) += debug.o
10 15
11obj-$(CONFIG_ATH9K) += ath9k.o 16obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath9k/ani.c b/drivers/net/wireless/ath9k/ani.c
new file mode 100644
index 000000000000..251e2d9a7a4a
--- /dev/null
+++ b/drivers/net/wireless/ath9k/ani.c
@@ -0,0 +1,852 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21
22static int ath9k_hw_get_ani_channel_idx(struct ath_hal *ah,
23 struct ath9k_channel *chan)
24{
25 struct ath_hal_5416 *ahp = AH5416(ah);
26 int i;
27
28 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
29 if (ahp->ah_ani[i].c.channel == chan->channel)
30 return i;
31 if (ahp->ah_ani[i].c.channel == 0) {
32 ahp->ah_ani[i].c.channel = chan->channel;
33 ahp->ah_ani[i].c.channelFlags = chan->channelFlags;
34 return i;
35 }
36 }
37
38 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
39 "No more channel states left. Using channel 0\n");
40
41 return 0;
42}
43
44static bool ath9k_hw_ani_control(struct ath_hal *ah,
45 enum ath9k_ani_cmd cmd, int param)
46{
47 struct ath_hal_5416 *ahp = AH5416(ah);
48 struct ar5416AniState *aniState = ahp->ah_curani;
49
50 switch (cmd & ahp->ah_ani_function) {
51 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
52 u32 level = param;
53
54 if (level >= ARRAY_SIZE(ahp->ah_totalSizeDesired)) {
55 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
56 "level out of range (%u > %u)\n",
57 level,
58 (unsigned)ARRAY_SIZE(ahp->ah_totalSizeDesired));
59 return false;
60 }
61
62 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
63 AR_PHY_DESIRED_SZ_TOT_DES,
64 ahp->ah_totalSizeDesired[level]);
65 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
66 AR_PHY_AGC_CTL1_COARSE_LOW,
67 ahp->ah_coarseLow[level]);
68 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
69 AR_PHY_AGC_CTL1_COARSE_HIGH,
70 ahp->ah_coarseHigh[level]);
71 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
72 AR_PHY_FIND_SIG_FIRPWR,
73 ahp->ah_firpwr[level]);
74
75 if (level > aniState->noiseImmunityLevel)
76 ahp->ah_stats.ast_ani_niup++;
77 else if (level < aniState->noiseImmunityLevel)
78 ahp->ah_stats.ast_ani_nidown++;
79 aniState->noiseImmunityLevel = level;
80 break;
81 }
82 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
83 const int m1ThreshLow[] = { 127, 50 };
84 const int m2ThreshLow[] = { 127, 40 };
85 const int m1Thresh[] = { 127, 0x4d };
86 const int m2Thresh[] = { 127, 0x40 };
87 const int m2CountThr[] = { 31, 16 };
88 const int m2CountThrLow[] = { 63, 48 };
89 u32 on = param ? 1 : 0;
90
91 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
92 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
93 m1ThreshLow[on]);
94 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
95 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
96 m2ThreshLow[on]);
97 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
98 AR_PHY_SFCORR_M1_THRESH,
99 m1Thresh[on]);
100 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
101 AR_PHY_SFCORR_M2_THRESH,
102 m2Thresh[on]);
103 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
104 AR_PHY_SFCORR_M2COUNT_THR,
105 m2CountThr[on]);
106 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
107 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
108 m2CountThrLow[on]);
109
110 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
111 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
112 m1ThreshLow[on]);
113 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
114 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
115 m2ThreshLow[on]);
116 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
117 AR_PHY_SFCORR_EXT_M1_THRESH,
118 m1Thresh[on]);
119 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
120 AR_PHY_SFCORR_EXT_M2_THRESH,
121 m2Thresh[on]);
122
123 if (on)
124 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
125 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
126 else
127 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
128 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
129
130 if (!on != aniState->ofdmWeakSigDetectOff) {
131 if (on)
132 ahp->ah_stats.ast_ani_ofdmon++;
133 else
134 ahp->ah_stats.ast_ani_ofdmoff++;
135 aniState->ofdmWeakSigDetectOff = !on;
136 }
137 break;
138 }
139 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
140 const int weakSigThrCck[] = { 8, 6 };
141 u32 high = param ? 1 : 0;
142
143 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
144 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
145 weakSigThrCck[high]);
146 if (high != aniState->cckWeakSigThreshold) {
147 if (high)
148 ahp->ah_stats.ast_ani_cckhigh++;
149 else
150 ahp->ah_stats.ast_ani_ccklow++;
151 aniState->cckWeakSigThreshold = high;
152 }
153 break;
154 }
155 case ATH9K_ANI_FIRSTEP_LEVEL:{
156 const int firstep[] = { 0, 4, 8 };
157 u32 level = param;
158
159 if (level >= ARRAY_SIZE(firstep)) {
160 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
161 "level out of range (%u > %u)\n",
162 level,
163 (unsigned) ARRAY_SIZE(firstep));
164 return false;
165 }
166 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
167 AR_PHY_FIND_SIG_FIRSTEP,
168 firstep[level]);
169 if (level > aniState->firstepLevel)
170 ahp->ah_stats.ast_ani_stepup++;
171 else if (level < aniState->firstepLevel)
172 ahp->ah_stats.ast_ani_stepdown++;
173 aniState->firstepLevel = level;
174 break;
175 }
176 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
177 const int cycpwrThr1[] =
178 { 2, 4, 6, 8, 10, 12, 14, 16 };
179 u32 level = param;
180
181 if (level >= ARRAY_SIZE(cycpwrThr1)) {
182 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
183 "level out of range (%u > %u)\n",
184 level,
185 (unsigned)
186 ARRAY_SIZE(cycpwrThr1));
187 return false;
188 }
189 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
190 AR_PHY_TIMING5_CYCPWR_THR1,
191 cycpwrThr1[level]);
192 if (level > aniState->spurImmunityLevel)
193 ahp->ah_stats.ast_ani_spurup++;
194 else if (level < aniState->spurImmunityLevel)
195 ahp->ah_stats.ast_ani_spurdown++;
196 aniState->spurImmunityLevel = level;
197 break;
198 }
199 case ATH9K_ANI_PRESENT:
200 break;
201 default:
202 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
203 "invalid cmd %u\n", cmd);
204 return false;
205 }
206
207 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "ANI parameters:\n");
208 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
209 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
210 "ofdmWeakSigDetectOff=%d\n",
211 aniState->noiseImmunityLevel, aniState->spurImmunityLevel,
212 !aniState->ofdmWeakSigDetectOff);
213 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
214 "cckWeakSigThreshold=%d, "
215 "firstepLevel=%d, listenTime=%d\n",
216 aniState->cckWeakSigThreshold, aniState->firstepLevel,
217 aniState->listenTime);
218 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
219 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
220 aniState->cycleCount, aniState->ofdmPhyErrCount,
221 aniState->cckPhyErrCount);
222
223 return true;
224}
225
226static void ath9k_hw_update_mibstats(struct ath_hal *ah,
227 struct ath9k_mib_stats *stats)
228{
229 stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL);
230 stats->rts_bad += REG_READ(ah, AR_RTS_FAIL);
231 stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL);
232 stats->rts_good += REG_READ(ah, AR_RTS_OK);
233 stats->beacons += REG_READ(ah, AR_BEACON_CNT);
234}
235
236static void ath9k_ani_restart(struct ath_hal *ah)
237{
238 struct ath_hal_5416 *ahp = AH5416(ah);
239 struct ar5416AniState *aniState;
240
241 if (!DO_ANI(ah))
242 return;
243
244 aniState = ahp->ah_curani;
245
246 aniState->listenTime = 0;
247 if (ahp->ah_hasHwPhyCounters) {
248 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) {
249 aniState->ofdmPhyErrBase = 0;
250 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
251 "OFDM Trigger is too high for hw counters\n");
252 } else {
253 aniState->ofdmPhyErrBase =
254 AR_PHY_COUNTMAX - aniState->ofdmTrigHigh;
255 }
256 if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) {
257 aniState->cckPhyErrBase = 0;
258 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
259 "CCK Trigger is too high for hw counters\n");
260 } else {
261 aniState->cckPhyErrBase =
262 AR_PHY_COUNTMAX - aniState->cckTrigHigh;
263 }
264 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
265 "Writing ofdmbase=%u cckbase=%u\n",
266 aniState->ofdmPhyErrBase,
267 aniState->cckPhyErrBase);
268 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
269 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
270 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
271 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
272
273 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
274 }
275 aniState->ofdmPhyErrCount = 0;
276 aniState->cckPhyErrCount = 0;
277}
278
279static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hal *ah)
280{
281 struct ath_hal_5416 *ahp = AH5416(ah);
282 struct ath9k_channel *chan = ah->ah_curchan;
283 struct ar5416AniState *aniState;
284 enum wireless_mode mode;
285 int32_t rssi;
286
287 if (!DO_ANI(ah))
288 return;
289
290 aniState = ahp->ah_curani;
291
292 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
293 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
294 aniState->noiseImmunityLevel + 1)) {
295 return;
296 }
297 }
298
299 if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) {
300 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
301 aniState->spurImmunityLevel + 1)) {
302 return;
303 }
304 }
305
306 if (ah->ah_opmode == NL80211_IFTYPE_AP) {
307 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
308 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
309 aniState->firstepLevel + 1);
310 }
311 return;
312 }
313 rssi = BEACON_RSSI(ahp);
314 if (rssi > aniState->rssiThrHigh) {
315 if (!aniState->ofdmWeakSigDetectOff) {
316 if (ath9k_hw_ani_control(ah,
317 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
318 false)) {
319 ath9k_hw_ani_control(ah,
320 ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
321 return;
322 }
323 }
324 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
325 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
326 aniState->firstepLevel + 1);
327 return;
328 }
329 } else if (rssi > aniState->rssiThrLow) {
330 if (aniState->ofdmWeakSigDetectOff)
331 ath9k_hw_ani_control(ah,
332 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
333 true);
334 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
335 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
336 aniState->firstepLevel + 1);
337 return;
338 } else {
339 mode = ath9k_hw_chan2wmode(ah, chan);
340 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
341 if (!aniState->ofdmWeakSigDetectOff)
342 ath9k_hw_ani_control(ah,
343 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
344 false);
345 if (aniState->firstepLevel > 0)
346 ath9k_hw_ani_control(ah,
347 ATH9K_ANI_FIRSTEP_LEVEL, 0);
348 return;
349 }
350 }
351}
352
353static void ath9k_hw_ani_cck_err_trigger(struct ath_hal *ah)
354{
355 struct ath_hal_5416 *ahp = AH5416(ah);
356 struct ath9k_channel *chan = ah->ah_curchan;
357 struct ar5416AniState *aniState;
358 enum wireless_mode mode;
359 int32_t rssi;
360
361 if (!DO_ANI(ah))
362 return;
363
364 aniState = ahp->ah_curani;
365 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
366 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
367 aniState->noiseImmunityLevel + 1)) {
368 return;
369 }
370 }
371 if (ah->ah_opmode == NL80211_IFTYPE_AP) {
372 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
373 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
374 aniState->firstepLevel + 1);
375 }
376 return;
377 }
378 rssi = BEACON_RSSI(ahp);
379 if (rssi > aniState->rssiThrLow) {
380 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
381 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
382 aniState->firstepLevel + 1);
383 } else {
384 mode = ath9k_hw_chan2wmode(ah, chan);
385 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
386 if (aniState->firstepLevel > 0)
387 ath9k_hw_ani_control(ah,
388 ATH9K_ANI_FIRSTEP_LEVEL, 0);
389 }
390 }
391}
392
393static void ath9k_hw_ani_lower_immunity(struct ath_hal *ah)
394{
395 struct ath_hal_5416 *ahp = AH5416(ah);
396 struct ar5416AniState *aniState;
397 int32_t rssi;
398
399 aniState = ahp->ah_curani;
400
401 if (ah->ah_opmode == NL80211_IFTYPE_AP) {
402 if (aniState->firstepLevel > 0) {
403 if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
404 aniState->firstepLevel - 1))
405 return;
406 }
407 } else {
408 rssi = BEACON_RSSI(ahp);
409 if (rssi > aniState->rssiThrHigh) {
410 /* XXX: Handle me */
411 } else if (rssi > aniState->rssiThrLow) {
412 if (aniState->ofdmWeakSigDetectOff) {
413 if (ath9k_hw_ani_control(ah,
414 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
415 true) == true)
416 return;
417 }
418 if (aniState->firstepLevel > 0) {
419 if (ath9k_hw_ani_control(ah,
420 ATH9K_ANI_FIRSTEP_LEVEL,
421 aniState->firstepLevel - 1) == true)
422 return;
423 }
424 } else {
425 if (aniState->firstepLevel > 0) {
426 if (ath9k_hw_ani_control(ah,
427 ATH9K_ANI_FIRSTEP_LEVEL,
428 aniState->firstepLevel - 1) == true)
429 return;
430 }
431 }
432 }
433
434 if (aniState->spurImmunityLevel > 0) {
435 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
436 aniState->spurImmunityLevel - 1))
437 return;
438 }
439
440 if (aniState->noiseImmunityLevel > 0) {
441 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
442 aniState->noiseImmunityLevel - 1);
443 return;
444 }
445}
446
447static int32_t ath9k_hw_ani_get_listen_time(struct ath_hal *ah)
448{
449 struct ath_hal_5416 *ahp = AH5416(ah);
450 struct ar5416AniState *aniState;
451 u32 txFrameCount, rxFrameCount, cycleCount;
452 int32_t listenTime;
453
454 txFrameCount = REG_READ(ah, AR_TFCNT);
455 rxFrameCount = REG_READ(ah, AR_RFCNT);
456 cycleCount = REG_READ(ah, AR_CCCNT);
457
458 aniState = ahp->ah_curani;
459 if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
460
461 listenTime = 0;
462 ahp->ah_stats.ast_ani_lzero++;
463 } else {
464 int32_t ccdelta = cycleCount - aniState->cycleCount;
465 int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
466 int32_t tfdelta = txFrameCount - aniState->txFrameCount;
467 listenTime = (ccdelta - rfdelta - tfdelta) / 44000;
468 }
469 aniState->cycleCount = cycleCount;
470 aniState->txFrameCount = txFrameCount;
471 aniState->rxFrameCount = rxFrameCount;
472
473 return listenTime;
474}
475
476void ath9k_ani_reset(struct ath_hal *ah)
477{
478 struct ath_hal_5416 *ahp = AH5416(ah);
479 struct ar5416AniState *aniState;
480 struct ath9k_channel *chan = ah->ah_curchan;
481 int index;
482
483 if (!DO_ANI(ah))
484 return;
485
486 index = ath9k_hw_get_ani_channel_idx(ah, chan);
487 aniState = &ahp->ah_ani[index];
488 ahp->ah_curani = aniState;
489
490 if (DO_ANI(ah) && ah->ah_opmode != NL80211_IFTYPE_STATION
491 && ah->ah_opmode != NL80211_IFTYPE_ADHOC) {
492 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
493 "Reset ANI state opmode %u\n", ah->ah_opmode);
494 ahp->ah_stats.ast_ani_reset++;
495
496 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
497 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
498 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0);
499 ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
500 !ATH9K_ANI_USE_OFDM_WEAK_SIG);
501 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
502 ATH9K_ANI_CCK_WEAK_SIG_THR);
503
504 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) |
505 ATH9K_RX_FILTER_PHYERR);
506
507 if (ah->ah_opmode == NL80211_IFTYPE_AP) {
508 ahp->ah_curani->ofdmTrigHigh =
509 ah->ah_config.ofdm_trig_high;
510 ahp->ah_curani->ofdmTrigLow =
511 ah->ah_config.ofdm_trig_low;
512 ahp->ah_curani->cckTrigHigh =
513 ah->ah_config.cck_trig_high;
514 ahp->ah_curani->cckTrigLow =
515 ah->ah_config.cck_trig_low;
516 }
517 ath9k_ani_restart(ah);
518 return;
519 }
520
521 if (aniState->noiseImmunityLevel != 0)
522 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
523 aniState->noiseImmunityLevel);
524 if (aniState->spurImmunityLevel != 0)
525 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
526 aniState->spurImmunityLevel);
527 if (aniState->ofdmWeakSigDetectOff)
528 ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
529 !aniState->ofdmWeakSigDetectOff);
530 if (aniState->cckWeakSigThreshold)
531 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
532 aniState->cckWeakSigThreshold);
533 if (aniState->firstepLevel != 0)
534 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
535 aniState->firstepLevel);
536 if (ahp->ah_hasHwPhyCounters) {
537 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
538 ~ATH9K_RX_FILTER_PHYERR);
539 ath9k_ani_restart(ah);
540 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
541 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
542
543 } else {
544 ath9k_ani_restart(ah);
545 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) |
546 ATH9K_RX_FILTER_PHYERR);
547 }
548}
549
550void ath9k_hw_ani_monitor(struct ath_hal *ah,
551 const struct ath9k_node_stats *stats,
552 struct ath9k_channel *chan)
553{
554 struct ath_hal_5416 *ahp = AH5416(ah);
555 struct ar5416AniState *aniState;
556 int32_t listenTime;
557
558 aniState = ahp->ah_curani;
559 ahp->ah_stats.ast_nodestats = *stats;
560
561 listenTime = ath9k_hw_ani_get_listen_time(ah);
562 if (listenTime < 0) {
563 ahp->ah_stats.ast_ani_lneg++;
564 ath9k_ani_restart(ah);
565 return;
566 }
567
568 aniState->listenTime += listenTime;
569
570 if (ahp->ah_hasHwPhyCounters) {
571 u32 phyCnt1, phyCnt2;
572 u32 ofdmPhyErrCnt, cckPhyErrCnt;
573
574 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
575
576 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
577 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
578
579 if (phyCnt1 < aniState->ofdmPhyErrBase ||
580 phyCnt2 < aniState->cckPhyErrBase) {
581 if (phyCnt1 < aniState->ofdmPhyErrBase) {
582 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
583 "phyCnt1 0x%x, resetting "
584 "counter value to 0x%x\n",
585 phyCnt1,
586 aniState->ofdmPhyErrBase);
587 REG_WRITE(ah, AR_PHY_ERR_1,
588 aniState->ofdmPhyErrBase);
589 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
590 AR_PHY_ERR_OFDM_TIMING);
591 }
592 if (phyCnt2 < aniState->cckPhyErrBase) {
593 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
594 "phyCnt2 0x%x, resetting "
595 "counter value to 0x%x\n",
596 phyCnt2,
597 aniState->cckPhyErrBase);
598 REG_WRITE(ah, AR_PHY_ERR_2,
599 aniState->cckPhyErrBase);
600 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
601 AR_PHY_ERR_CCK_TIMING);
602 }
603 return;
604 }
605
606 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
607 ahp->ah_stats.ast_ani_ofdmerrs +=
608 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
609 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
610
611 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
612 ahp->ah_stats.ast_ani_cckerrs +=
613 cckPhyErrCnt - aniState->cckPhyErrCount;
614 aniState->cckPhyErrCount = cckPhyErrCnt;
615 }
616
617 if (!DO_ANI(ah))
618 return;
619
620 if (aniState->listenTime > 5 * ahp->ah_aniPeriod) {
621 if (aniState->ofdmPhyErrCount <= aniState->listenTime *
622 aniState->ofdmTrigLow / 1000 &&
623 aniState->cckPhyErrCount <= aniState->listenTime *
624 aniState->cckTrigLow / 1000)
625 ath9k_hw_ani_lower_immunity(ah);
626 ath9k_ani_restart(ah);
627 } else if (aniState->listenTime > ahp->ah_aniPeriod) {
628 if (aniState->ofdmPhyErrCount > aniState->listenTime *
629 aniState->ofdmTrigHigh / 1000) {
630 ath9k_hw_ani_ofdm_err_trigger(ah);
631 ath9k_ani_restart(ah);
632 } else if (aniState->cckPhyErrCount >
633 aniState->listenTime * aniState->cckTrigHigh /
634 1000) {
635 ath9k_hw_ani_cck_err_trigger(ah);
636 ath9k_ani_restart(ah);
637 }
638 }
639}
640
641bool ath9k_hw_phycounters(struct ath_hal *ah)
642{
643 struct ath_hal_5416 *ahp = AH5416(ah);
644
645 return ahp->ah_hasHwPhyCounters ? true : false;
646}
647
648void ath9k_enable_mib_counters(struct ath_hal *ah)
649{
650 struct ath_hal_5416 *ahp = AH5416(ah);
651
652 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable MIB counters\n");
653
654 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
655
656 REG_WRITE(ah, AR_FILT_OFDM, 0);
657 REG_WRITE(ah, AR_FILT_CCK, 0);
658 REG_WRITE(ah, AR_MIBC,
659 ~(AR_MIBC_COW | AR_MIBC_FMC | AR_MIBC_CMC | AR_MIBC_MCS)
660 & 0x0f);
661 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
662 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
663}
664
665void ath9k_hw_disable_mib_counters(struct ath_hal *ah)
666{
667 struct ath_hal_5416 *ahp = AH5416(ah);
668
669 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disable MIB counters\n");
670
671 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC | AR_MIBC_CMC);
672
673 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
674
675 REG_WRITE(ah, AR_FILT_OFDM, 0);
676 REG_WRITE(ah, AR_FILT_CCK, 0);
677}
678
679u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
680 u32 *rxc_pcnt,
681 u32 *rxf_pcnt,
682 u32 *txf_pcnt)
683{
684 static u32 cycles, rx_clear, rx_frame, tx_frame;
685 u32 good = 1;
686
687 u32 rc = REG_READ(ah, AR_RCCNT);
688 u32 rf = REG_READ(ah, AR_RFCNT);
689 u32 tf = REG_READ(ah, AR_TFCNT);
690 u32 cc = REG_READ(ah, AR_CCCNT);
691
692 if (cycles == 0 || cycles > cc) {
693 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
694 "cycle counter wrap. ExtBusy = 0\n");
695 good = 0;
696 } else {
697 u32 cc_d = cc - cycles;
698 u32 rc_d = rc - rx_clear;
699 u32 rf_d = rf - rx_frame;
700 u32 tf_d = tf - tx_frame;
701
702 if (cc_d != 0) {
703 *rxc_pcnt = rc_d * 100 / cc_d;
704 *rxf_pcnt = rf_d * 100 / cc_d;
705 *txf_pcnt = tf_d * 100 / cc_d;
706 } else {
707 good = 0;
708 }
709 }
710
711 cycles = cc;
712 rx_frame = rf;
713 rx_clear = rc;
714 tx_frame = tf;
715
716 return good;
717}
718
719/*
720 * Process a MIB interrupt. We may potentially be invoked because
721 * any of the MIB counters overflow/trigger so don't assume we're
722 * here because a PHY error counter triggered.
723 */
724void ath9k_hw_procmibevent(struct ath_hal *ah,
725 const struct ath9k_node_stats *stats)
726{
727 struct ath_hal_5416 *ahp = AH5416(ah);
728 u32 phyCnt1, phyCnt2;
729
730 /* Reset these counters regardless */
731 REG_WRITE(ah, AR_FILT_OFDM, 0);
732 REG_WRITE(ah, AR_FILT_CCK, 0);
733 if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
734 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
735
736 /* Clear the mib counters and save them in the stats */
737 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
738 ahp->ah_stats.ast_nodestats = *stats;
739
740 if (!DO_ANI(ah))
741 return;
742
743 /* NB: these are not reset-on-read */
744 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
745 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
746 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
747 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
748 struct ar5416AniState *aniState = ahp->ah_curani;
749 u32 ofdmPhyErrCnt, cckPhyErrCnt;
750
751 /* NB: only use ast_ani_*errs with AH_PRIVATE_DIAG */
752 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
753 ahp->ah_stats.ast_ani_ofdmerrs +=
754 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
755 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
756
757 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
758 ahp->ah_stats.ast_ani_cckerrs +=
759 cckPhyErrCnt - aniState->cckPhyErrCount;
760 aniState->cckPhyErrCount = cckPhyErrCnt;
761
762 /*
763 * NB: figure out which counter triggered. If both
764 * trigger we'll only deal with one as the processing
765 * clobbers the error counter so the trigger threshold
766 * check will never be true.
767 */
768 if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
769 ath9k_hw_ani_ofdm_err_trigger(ah);
770 if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
771 ath9k_hw_ani_cck_err_trigger(ah);
772 /* NB: always restart to insure the h/w counters are reset */
773 ath9k_ani_restart(ah);
774 }
775}
776
777void ath9k_hw_ani_setup(struct ath_hal *ah)
778{
779 struct ath_hal_5416 *ahp = AH5416(ah);
780 int i;
781
782 const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
783 const int coarseHigh[] = { -14, -14, -14, -14, -12 };
784 const int coarseLow[] = { -64, -64, -64, -64, -70 };
785 const int firpwr[] = { -78, -78, -78, -78, -80 };
786
787 for (i = 0; i < 5; i++) {
788 ahp->ah_totalSizeDesired[i] = totalSizeDesired[i];
789 ahp->ah_coarseHigh[i] = coarseHigh[i];
790 ahp->ah_coarseLow[i] = coarseLow[i];
791 ahp->ah_firpwr[i] = firpwr[i];
792 }
793}
794
795void ath9k_hw_ani_attach(struct ath_hal *ah)
796{
797 struct ath_hal_5416 *ahp = AH5416(ah);
798 int i;
799
800 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Attach ANI\n");
801
802 ahp->ah_hasHwPhyCounters = 1;
803
804 memset(ahp->ah_ani, 0, sizeof(ahp->ah_ani));
805 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
806 ahp->ah_ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH;
807 ahp->ah_ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW;
808 ahp->ah_ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH;
809 ahp->ah_ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW;
810 ahp->ah_ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
811 ahp->ah_ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
812 ahp->ah_ani[i].ofdmWeakSigDetectOff =
813 !ATH9K_ANI_USE_OFDM_WEAK_SIG;
814 ahp->ah_ani[i].cckWeakSigThreshold =
815 ATH9K_ANI_CCK_WEAK_SIG_THR;
816 ahp->ah_ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
817 ahp->ah_ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
818 if (ahp->ah_hasHwPhyCounters) {
819 ahp->ah_ani[i].ofdmPhyErrBase =
820 AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH;
821 ahp->ah_ani[i].cckPhyErrBase =
822 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
823 }
824 }
825 if (ahp->ah_hasHwPhyCounters) {
826 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
827 "Setting OfdmErrBase = 0x%08x\n",
828 ahp->ah_ani[0].ofdmPhyErrBase);
829 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
830 ahp->ah_ani[0].cckPhyErrBase);
831
832 REG_WRITE(ah, AR_PHY_ERR_1, ahp->ah_ani[0].ofdmPhyErrBase);
833 REG_WRITE(ah, AR_PHY_ERR_2, ahp->ah_ani[0].cckPhyErrBase);
834 ath9k_enable_mib_counters(ah);
835 }
836 ahp->ah_aniPeriod = ATH9K_ANI_PERIOD;
837 if (ah->ah_config.enable_ani)
838 ahp->ah_procPhyErr |= HAL_PROCESS_ANI;
839}
840
841void ath9k_hw_ani_detach(struct ath_hal *ah)
842{
843 struct ath_hal_5416 *ahp = AH5416(ah);
844
845 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Detach ANI\n");
846
847 if (ahp->ah_hasHwPhyCounters) {
848 ath9k_hw_disable_mib_counters(ah);
849 REG_WRITE(ah, AR_PHY_ERR_1, 0);
850 REG_WRITE(ah, AR_PHY_ERR_2, 0);
851 }
852}
diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
index accace5f7efb..d27813502953 100644
--- a/drivers/net/wireless/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath9k/ath9k.h
@@ -26,6 +26,7 @@
26#define AR9160_DEVID_PCI 0x0027 26#define AR9160_DEVID_PCI 0x0027
27#define AR9280_DEVID_PCI 0x0029 27#define AR9280_DEVID_PCI 0x0029
28#define AR9280_DEVID_PCIE 0x002a 28#define AR9280_DEVID_PCIE 0x002a
29#define AR9285_DEVID_PCIE 0x002b
29 30
30#define AR5416_AR9100_DEVID 0x000b 31#define AR5416_AR9100_DEVID 0x000b
31 32
@@ -138,6 +139,19 @@ struct ath_desc {
138#define ATH9K_TXDESC_NOACK 0x0002 139#define ATH9K_TXDESC_NOACK 0x0002
139#define ATH9K_TXDESC_RTSENA 0x0004 140#define ATH9K_TXDESC_RTSENA 0x0004
140#define ATH9K_TXDESC_CTSENA 0x0008 141#define ATH9K_TXDESC_CTSENA 0x0008
142/* ATH9K_TXDESC_INTREQ forces a tx interrupt to be generated for
143 * the descriptor its marked on. We take a tx interrupt to reap
144 * descriptors when the h/w hits an EOL condition or
145 * when the descriptor is specifically marked to generate
146 * an interrupt with this flag. Descriptors should be
147 * marked periodically to insure timely replenishing of the
148 * supply needed for sending frames. Defering interrupts
149 * reduces system load and potentially allows more concurrent
150 * work to be done but if done to aggressively can cause
151 * senders to backup. When the hardware queue is left too
152 * large rate control information may also be too out of
153 * date. An Alternative for this is TX interrupt mitigation
154 * but this needs more testing. */
141#define ATH9K_TXDESC_INTREQ 0x0010 155#define ATH9K_TXDESC_INTREQ 0x0010
142#define ATH9K_TXDESC_VEOL 0x0020 156#define ATH9K_TXDESC_VEOL 0x0020
143#define ATH9K_TXDESC_EXT_ONLY 0x0040 157#define ATH9K_TXDESC_EXT_ONLY 0x0040
@@ -388,22 +402,6 @@ enum ath9k_int {
388 ATH9K_INT_NOCARD = 0xffffffff 402 ATH9K_INT_NOCARD = 0xffffffff
389}; 403};
390 404
391struct ath9k_rate_table {
392 int rateCount;
393 u8 rateCodeToIndex[256];
394 struct {
395 u8 valid;
396 u8 phy;
397 u32 rateKbps;
398 u8 rateCode;
399 u8 shortPreamble;
400 u8 dot11Rate;
401 u8 controlRate;
402 u16 lpAckDuration;
403 u16 spAckDuration;
404 } info[32];
405};
406
407#define ATH9K_RATESERIES_RTS_CTS 0x0001 405#define ATH9K_RATESERIES_RTS_CTS 0x0001
408#define ATH9K_RATESERIES_2040 0x0002 406#define ATH9K_RATESERIES_2040 0x0002
409#define ATH9K_RATESERIES_HALFGI 0x0004 407#define ATH9K_RATESERIES_HALFGI 0x0004
@@ -479,12 +477,10 @@ struct ath9k_channel {
479 (((_c)->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20) || \ 477 (((_c)->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20) || \
480 (((_c)->channelFlags & CHANNEL_A_HT40PLUS) == CHANNEL_A_HT40PLUS) || \ 478 (((_c)->channelFlags & CHANNEL_A_HT40PLUS) == CHANNEL_A_HT40PLUS) || \
481 (((_c)->channelFlags & CHANNEL_A_HT40MINUS) == CHANNEL_A_HT40MINUS)) 479 (((_c)->channelFlags & CHANNEL_A_HT40MINUS) == CHANNEL_A_HT40MINUS))
482#define IS_CHAN_B(_c) (((_c)->channelFlags & CHANNEL_B) == CHANNEL_B)
483#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \ 480#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
484 (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \ 481 (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
485 (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \ 482 (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
486 (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS)) 483 (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
487#define IS_CHAN_CCK(_c) (((_c)->channelFlags & CHANNEL_CCK) != 0)
488#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0) 484#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
489#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0) 485#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
490#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0) 486#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
@@ -493,6 +489,7 @@ struct ath9k_channel {
493#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0) 489#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
494 490
495/* These macros check chanmode and not channelFlags */ 491/* These macros check chanmode and not channelFlags */
492#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
496#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \ 493#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \
497 ((_c)->chanmode == CHANNEL_G_HT20)) 494 ((_c)->chanmode == CHANNEL_G_HT20))
498#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \ 495#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \
@@ -651,13 +648,6 @@ enum ath9k_ant_setting {
651 ATH9K_ANT_FIXED_B 648 ATH9K_ANT_FIXED_B
652}; 649};
653 650
654enum ath9k_opmode {
655 ATH9K_M_STA = 1,
656 ATH9K_M_IBSS = 0,
657 ATH9K_M_HOSTAP = 6,
658 ATH9K_M_MONITOR = 8
659};
660
661#define ATH9K_SLOT_TIME_6 6 651#define ATH9K_SLOT_TIME_6 6
662#define ATH9K_SLOT_TIME_9 9 652#define ATH9K_SLOT_TIME_9 9
663#define ATH9K_SLOT_TIME_20 20 653#define ATH9K_SLOT_TIME_20 20
@@ -689,13 +679,19 @@ enum ath9k_ani_cmd {
689 ATH9K_ANI_ALL = 0xff 679 ATH9K_ANI_ALL = 0xff
690}; 680};
691 681
692enum phytype { 682enum {
693 PHY_DS, 683 WLAN_RC_PHY_OFDM,
694 PHY_FH, 684 WLAN_RC_PHY_CCK,
695 PHY_OFDM, 685 WLAN_RC_PHY_HT_20_SS,
696 PHY_HT, 686 WLAN_RC_PHY_HT_20_DS,
687 WLAN_RC_PHY_HT_40_SS,
688 WLAN_RC_PHY_HT_40_DS,
689 WLAN_RC_PHY_HT_20_SS_HGI,
690 WLAN_RC_PHY_HT_20_DS_HGI,
691 WLAN_RC_PHY_HT_40_SS_HGI,
692 WLAN_RC_PHY_HT_40_DS_HGI,
693 WLAN_RC_PHY_MAX
697}; 694};
698#define PHY_CCK PHY_DS
699 695
700enum ath9k_tp_scale { 696enum ath9k_tp_scale {
701 ATH9K_TP_SCALE_MAX = 0, 697 ATH9K_TP_SCALE_MAX = 0,
@@ -778,7 +774,8 @@ struct ath_hal {
778 774
779 void __iomem *ah_sh; 775 void __iomem *ah_sh;
780 struct ath_softc *ah_sc; 776 struct ath_softc *ah_sc;
781 enum ath9k_opmode ah_opmode; 777
778 enum nl80211_iftype ah_opmode;
782 struct ath9k_ops_config ah_config; 779 struct ath9k_ops_config ah_config;
783 struct ath9k_hw_capabilities ah_caps; 780 struct ath9k_hw_capabilities ah_caps;
784 781
@@ -815,195 +812,246 @@ struct chan_centers {
815 u16 ext_center; 812 u16 ext_center;
816}; 813};
817 814
818int ath_hal_getcapability(struct ath_hal *ah, 815struct ath_rate_table;
819 enum ath9k_capability_type type, 816
820 u32 capability, 817/* Helpers */
821 u32 *result); 818
822const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah, 819enum wireless_mode ath9k_hw_chan2wmode(struct ath_hal *ah,
823 u32 mode); 820 const struct ath9k_channel *chan);
824void ath9k_hw_detach(struct ath_hal *ah); 821bool ath9k_hw_wait(struct ath_hal *ah, u32 reg, u32 mask, u32 val);
825struct ath_hal *ath9k_hw_attach(u16 devid, 822u32 ath9k_hw_reverse_bits(u32 val, u32 n);
826 struct ath_softc *sc, 823bool ath9k_get_channel_edges(struct ath_hal *ah,
827 void __iomem *mem, 824 u16 flags, u16 *low,
828 int *error); 825 u16 *high);
829bool ath9k_regd_init_channels(struct ath_hal *ah, 826u16 ath9k_hw_computetxtime(struct ath_hal *ah,
830 u32 maxchans, u32 *nchans, 827 struct ath_rate_table *rates,
831 u8 *regclassids, 828 u32 frameLen, u16 rateix,
832 u32 maxregids, u32 *nregids, 829 bool shortPreamble);
833 u16 cc,
834 bool enableOutdoor,
835 bool enableExtendedChannels);
836u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags); 830u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
837enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, 831void ath9k_hw_get_channel_centers(struct ath_hal *ah,
838 enum ath9k_int ints); 832 struct ath9k_channel *chan,
839bool ath9k_hw_reset(struct ath_hal *ah, 833 struct chan_centers *centers);
840 struct ath9k_channel *chan, 834
835/* Attach, Detach */
836
837const char *ath9k_hw_probe(u16 vendorid, u16 devid);
838void ath9k_hw_detach(struct ath_hal *ah);
839struct ath_hal *ath9k_hw_attach(u16 devid, struct ath_softc *sc,
840 void __iomem *mem, int *error);
841void ath9k_hw_rfdetach(struct ath_hal *ah);
842
843
844/* HW Reset */
845
846bool ath9k_hw_reset(struct ath_hal *ah, struct ath9k_channel *chan,
841 enum ath9k_ht_macmode macmode, 847 enum ath9k_ht_macmode macmode,
842 u8 txchainmask, u8 rxchainmask, 848 u8 txchainmask, u8 rxchainmask,
843 enum ath9k_ht_extprotspacing extprotspacing, 849 enum ath9k_ht_extprotspacing extprotspacing,
844 bool bChannelChange, 850 bool bChannelChange, int *status);
845 int *status); 851
846bool ath9k_hw_phy_disable(struct ath_hal *ah); 852/* Key Cache Management */
847void ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan, 853
848 bool *isCalDone);
849void ath9k_hw_ani_monitor(struct ath_hal *ah,
850 const struct ath9k_node_stats *stats,
851 struct ath9k_channel *chan);
852bool ath9k_hw_calibrate(struct ath_hal *ah,
853 struct ath9k_channel *chan,
854 u8 rxchainmask,
855 bool longcal,
856 bool *isCalDone);
857s16 ath9k_hw_getchan_noise(struct ath_hal *ah,
858 struct ath9k_channel *chan);
859void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
860 u16 assocId);
861void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits);
862void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid,
863 u16 assocId);
864bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q);
865void ath9k_hw_reset_tsf(struct ath_hal *ah);
866bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry);
867bool ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry,
868 const u8 *mac);
869bool ath9k_hw_set_keycache_entry(struct ath_hal *ah,
870 u16 entry,
871 const struct ath9k_keyval *k,
872 const u8 *mac,
873 int xorKey);
874bool ath9k_hw_set_tsfadjust(struct ath_hal *ah,
875 u32 setting);
876void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore);
877bool ath9k_hw_intrpend(struct ath_hal *ah);
878bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked);
879bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah,
880 bool bIncTrigLevel);
881void ath9k_hw_procmibevent(struct ath_hal *ah,
882 const struct ath9k_node_stats *stats);
883bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set);
884void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode);
885bool ath9k_hw_phycounters(struct ath_hal *ah);
886bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry); 854bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry);
887bool ath9k_hw_getcapability(struct ath_hal *ah, 855bool ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry, const u8 *mac);
888 enum ath9k_capability_type type, 856bool ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
889 u32 capability, 857 const struct ath9k_keyval *k,
890 u32 *result); 858 const u8 *mac, int xorKey);
891bool ath9k_hw_setcapability(struct ath_hal *ah, 859bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry);
892 enum ath9k_capability_type type, 860
893 u32 capability, 861/* Power Management */
894 u32 setting, 862
895 int *status);
896u32 ath9k_hw_getdefantenna(struct ath_hal *ah);
897void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac);
898void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask);
899bool ath9k_hw_setbssidmask(struct ath_hal *ah,
900 const u8 *mask);
901bool ath9k_hw_setpower(struct ath_hal *ah, 863bool ath9k_hw_setpower(struct ath_hal *ah,
902 enum ath9k_power_mode mode); 864 enum ath9k_power_mode mode);
903enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah); 865void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore);
904u64 ath9k_hw_gettsf64(struct ath_hal *ah); 866
867/* Beacon timers */
868
869void ath9k_hw_beaconinit(struct ath_hal *ah, u32 next_beacon, u32 beacon_period);
870void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
871 const struct ath9k_beacon_state *bs);
872/* HW Capabilities */
873
874bool ath9k_hw_fill_cap_info(struct ath_hal *ah);
875bool ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type,
876 u32 capability, u32 *result);
877bool ath9k_hw_setcapability(struct ath_hal *ah, enum ath9k_capability_type type,
878 u32 capability, u32 setting, int *status);
879
880/* GPIO / RFKILL / Antennae */
881
882void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio);
883u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio);
884void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
885 u32 ah_signal_type);
886void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 val);
887#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
888void ath9k_enable_rfkill(struct ath_hal *ah);
889#endif
890int ath9k_hw_select_antconfig(struct ath_hal *ah, u32 cfg);
905u32 ath9k_hw_getdefantenna(struct ath_hal *ah); 891u32 ath9k_hw_getdefantenna(struct ath_hal *ah);
906bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us); 892void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna);
907bool ath9k_hw_setantennaswitch(struct ath_hal *ah, 893bool ath9k_hw_setantennaswitch(struct ath_hal *ah,
908 enum ath9k_ant_setting settings, 894 enum ath9k_ant_setting settings,
909 struct ath9k_channel *chan, 895 struct ath9k_channel *chan,
910 u8 *tx_chainmask, 896 u8 *tx_chainmask,
911 u8 *rx_chainmask, 897 u8 *rx_chainmask,
912 u8 *antenna_cfgd); 898 u8 *antenna_cfgd);
913void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna); 899
914int ath9k_hw_select_antconfig(struct ath_hal *ah, 900/* General Operation */
915 u32 cfg); 901
916bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q, 902u32 ath9k_hw_getrxfilter(struct ath_hal *ah);
917 u32 txdp); 903void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits);
904bool ath9k_hw_phy_disable(struct ath_hal *ah);
905bool ath9k_hw_disable(struct ath_hal *ah);
906bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit);
907void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac);
908bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac);
909void ath9k_hw_setopmode(struct ath_hal *ah);
910void ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0, u32 filter1);
911void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask);
912bool ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask);
913void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid, u16 assocId);
914u64 ath9k_hw_gettsf64(struct ath_hal *ah);
915void ath9k_hw_reset_tsf(struct ath_hal *ah);
916bool ath9k_hw_set_tsfadjust(struct ath_hal *ah, u32 setting);
917bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us);
918void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode);
919
920/* Regulatory */
921
922bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah);
923struct ath9k_channel* ath9k_regd_check_channel(struct ath_hal *ah,
924 const struct ath9k_channel *c);
925u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan);
926u32 ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
927 struct ath9k_channel *chan);
928bool ath9k_regd_init_channels(struct ath_hal *ah,
929 u32 maxchans, u32 *nchans, u8 *regclassids,
930 u32 maxregids, u32 *nregids, u16 cc,
931 bool enableOutdoor, bool enableExtendedChannels);
932
933/* ANI */
934
935void ath9k_ani_reset(struct ath_hal *ah);
936void ath9k_hw_ani_monitor(struct ath_hal *ah,
937 const struct ath9k_node_stats *stats,
938 struct ath9k_channel *chan);
939bool ath9k_hw_phycounters(struct ath_hal *ah);
940void ath9k_enable_mib_counters(struct ath_hal *ah);
941void ath9k_hw_disable_mib_counters(struct ath_hal *ah);
942u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
943 u32 *rxc_pcnt,
944 u32 *rxf_pcnt,
945 u32 *txf_pcnt);
946void ath9k_hw_procmibevent(struct ath_hal *ah,
947 const struct ath9k_node_stats *stats);
948void ath9k_hw_ani_setup(struct ath_hal *ah);
949void ath9k_hw_ani_attach(struct ath_hal *ah);
950void ath9k_hw_ani_detach(struct ath_hal *ah);
951
952/* Calibration */
953
954void ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
955 bool *isCalDone);
956void ath9k_hw_start_nfcal(struct ath_hal *ah);
957void ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan);
958int16_t ath9k_hw_getnf(struct ath_hal *ah,
959 struct ath9k_channel *chan);
960void ath9k_init_nfcal_hist_buffer(struct ath_hal *ah);
961s16 ath9k_hw_getchan_noise(struct ath_hal *ah, struct ath9k_channel *chan);
962bool ath9k_hw_calibrate(struct ath_hal *ah, struct ath9k_channel *chan,
963 u8 rxchainmask, bool longcal,
964 bool *isCalDone);
965bool ath9k_hw_init_cal(struct ath_hal *ah,
966 struct ath9k_channel *chan);
967
968
969/* EEPROM */
970
971int ath9k_hw_set_txpower(struct ath_hal *ah,
972 struct ath9k_channel *chan,
973 u16 cfgCtl,
974 u8 twiceAntennaReduction,
975 u8 twiceMaxRegulatoryPower,
976 u8 powerLimit);
977void ath9k_hw_set_addac(struct ath_hal *ah, struct ath9k_channel *chan);
978bool ath9k_hw_set_power_per_rate_table(struct ath_hal *ah,
979 struct ath9k_channel *chan,
980 int16_t *ratesArray,
981 u16 cfgCtl,
982 u8 AntennaReduction,
983 u8 twiceMaxRegulatoryPower,
984 u8 powerLimit);
985bool ath9k_hw_set_power_cal_table(struct ath_hal *ah,
986 struct ath9k_channel *chan,
987 int16_t *pTxPowerIndexOffset);
988bool ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
989 struct ath9k_channel *chan);
990int ath9k_hw_get_eeprom_antenna_cfg(struct ath_hal *ah,
991 struct ath9k_channel *chan,
992 u8 index, u16 *config);
993u8 ath9k_hw_get_num_ant_config(struct ath_hal *ah,
994 enum ieee80211_band freq_band);
995u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah, u16 i, bool is2GHz);
996int ath9k_hw_eeprom_attach(struct ath_hal *ah);
997
998/* Interrupt Handling */
999
1000bool ath9k_hw_intrpend(struct ath_hal *ah);
1001bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked);
1002enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah);
1003enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints);
1004
1005/* MAC (PCU/QCU) */
1006
1007u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q);
1008bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q, u32 txdp);
918bool ath9k_hw_txstart(struct ath_hal *ah, u32 q); 1009bool ath9k_hw_txstart(struct ath_hal *ah, u32 q);
919u16 ath9k_hw_computetxtime(struct ath_hal *ah, 1010u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q);
920 const struct ath9k_rate_table *rates, 1011bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel);
921 u32 frameLen, u16 rateix, 1012bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q);
922 bool shortPreamble); 1013bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
1014 u32 segLen, bool firstSeg,
1015 bool lastSeg, const struct ath_desc *ds0);
1016void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds);
1017int ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds);
1018void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
1019 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
1020 u32 keyIx, enum ath9k_key_type keyType, u32 flags);
923void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds, 1021void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
924 struct ath_desc *lastds, 1022 struct ath_desc *lastds,
925 u32 durUpdateEn, u32 rtsctsRate, 1023 u32 durUpdateEn, u32 rtsctsRate,
926 u32 rtsctsDuration, 1024 u32 rtsctsDuration,
927 struct ath9k_11n_rate_series series[], 1025 struct ath9k_11n_rate_series series[],
928 u32 nseries, u32 flags); 1026 u32 nseries, u32 flags);
929void ath9k_hw_set11n_burstduration(struct ath_hal *ah, 1027void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
930 struct ath_desc *ds, 1028 u32 aggrLen);
1029void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
1030 u32 numDelims);
1031void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds);
1032void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds);
1033void ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
931 u32 burstDuration); 1034 u32 burstDuration);
932void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds); 1035void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
933u32 ath9k_hw_reverse_bits(u32 val, u32 n); 1036 u32 vmf);
934bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q); 1037void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs);
935u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan);
936u32 ath9k_regd_get_antenna_allowed(struct ath_hal *ah,
937 struct ath9k_channel *chan);
938u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags);
939bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
940 struct ath9k_tx_queue_info *qinfo);
941bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q, 1038bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
942 const struct ath9k_tx_queue_info *qinfo); 1039 const struct ath9k_tx_queue_info *qinfo);
943struct ath9k_channel *ath9k_regd_check_channel(struct ath_hal *ah, 1040bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
944 const struct ath9k_channel *c); 1041 struct ath9k_tx_queue_info *qinfo);
945void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds, 1042int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
946 u32 pktLen, enum ath9k_pkt_type type, 1043 const struct ath9k_tx_queue_info *qinfo);
947 u32 txPower, u32 keyIx, 1044bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q);
948 enum ath9k_key_type keyType, u32 flags); 1045bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q);
949bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds, 1046int ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
950 u32 segLen, bool firstSeg, 1047 u32 pa, struct ath_desc *nds, u64 tsf);
951 bool lastSeg,
952 const struct ath_desc *ds0);
953u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah,
954 u32 *rxc_pcnt,
955 u32 *rxf_pcnt,
956 u32 *txf_pcnt);
957void ath9k_hw_dmaRegDump(struct ath_hal *ah);
958void ath9k_hw_beaconinit(struct ath_hal *ah,
959 u32 next_beacon, u32 beacon_period);
960void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
961 const struct ath9k_beacon_state *bs);
962bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds, 1048bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
963 u32 size, u32 flags); 1049 u32 size, u32 flags);
1050bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set);
964void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp); 1051void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp);
965void ath9k_hw_rxena(struct ath_hal *ah); 1052void ath9k_hw_rxena(struct ath_hal *ah);
966void ath9k_hw_setopmode(struct ath_hal *ah);
967bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac);
968void ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0,
969 u32 filter1);
970u32 ath9k_hw_getrxfilter(struct ath_hal *ah);
971void ath9k_hw_startpcureceive(struct ath_hal *ah); 1053void ath9k_hw_startpcureceive(struct ath_hal *ah);
972void ath9k_hw_stoppcurecv(struct ath_hal *ah); 1054void ath9k_hw_stoppcurecv(struct ath_hal *ah);
973bool ath9k_hw_stopdmarecv(struct ath_hal *ah); 1055bool ath9k_hw_stopdmarecv(struct ath_hal *ah);
974int ath9k_hw_rxprocdesc(struct ath_hal *ah, 1056
975 struct ath_desc *ds, u32 pa,
976 struct ath_desc *nds, u64 tsf);
977u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q);
978int ath9k_hw_txprocdesc(struct ath_hal *ah,
979 struct ath_desc *ds);
980void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
981 u32 numDelims);
982void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
983 u32 aggrLen);
984void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds);
985bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q);
986void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs);
987void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds);
988void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah,
989 struct ath_desc *ds, u32 vmf);
990bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit);
991bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah);
992int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
993 const struct ath9k_tx_queue_info *qinfo);
994u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q);
995const char *ath9k_hw_probe(u16 vendorid, u16 devid);
996bool ath9k_hw_disable(struct ath_hal *ah);
997void ath9k_hw_rfdetach(struct ath_hal *ah);
998void ath9k_hw_get_channel_centers(struct ath_hal *ah,
999 struct ath9k_channel *chan,
1000 struct chan_centers *centers);
1001bool ath9k_get_channel_edges(struct ath_hal *ah,
1002 u16 flags, u16 *low,
1003 u16 *high);
1004void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
1005 u32 ah_signal_type);
1006void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 value);
1007u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio);
1008void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio);
1009#endif 1057#endif
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 4dd1c1bda0fb..3ab0b43aaf93 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -14,13 +14,9 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17 /* Implementation of beacon processing. */
18
19#include "core.h" 17#include "core.h"
20 18
21/* 19/*
22 * Configure parameters for the beacon queue
23 *
24 * This function will modify certain transmit queue properties depending on 20 * This function will modify certain transmit queue properties depending on
25 * the operating mode of the station (AP or AdHoc). Parameters are AIFS 21 * the operating mode of the station (AP or AdHoc). Parameters are AIFS
26 * settings and channel width min/max 22 * settings and channel width min/max
@@ -30,33 +26,38 @@ static int ath_beaconq_config(struct ath_softc *sc)
30 struct ath_hal *ah = sc->sc_ah; 26 struct ath_hal *ah = sc->sc_ah;
31 struct ath9k_tx_queue_info qi; 27 struct ath9k_tx_queue_info qi;
32 28
33 ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi); 29 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
34 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) { 30 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) {
35 /* Always burst out beacon and CAB traffic. */ 31 /* Always burst out beacon and CAB traffic. */
36 qi.tqi_aifs = 1; 32 qi.tqi_aifs = 1;
37 qi.tqi_cwmin = 0; 33 qi.tqi_cwmin = 0;
38 qi.tqi_cwmax = 0; 34 qi.tqi_cwmax = 0;
39 } else { 35 } else {
40 /* Adhoc mode; important thing is to use 2x cwmin. */ 36 /* Adhoc mode; important thing is to use 2x cwmin. */
41 qi.tqi_aifs = sc->sc_beacon_qi.tqi_aifs; 37 qi.tqi_aifs = sc->beacon.beacon_qi.tqi_aifs;
42 qi.tqi_cwmin = 2*sc->sc_beacon_qi.tqi_cwmin; 38 qi.tqi_cwmin = 2*sc->beacon.beacon_qi.tqi_cwmin;
43 qi.tqi_cwmax = sc->sc_beacon_qi.tqi_cwmax; 39 qi.tqi_cwmax = sc->beacon.beacon_qi.tqi_cwmax;
44 } 40 }
45 41
46 if (!ath9k_hw_set_txq_props(ah, sc->sc_bhalq, &qi)) { 42 if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) {
47 DPRINTF(sc, ATH_DBG_FATAL, 43 DPRINTF(sc, ATH_DBG_FATAL,
48 "%s: unable to update h/w beacon queue parameters\n", 44 "unable to update h/w beacon queue parameters\n");
49 __func__);
50 return 0; 45 return 0;
51 } else { 46 } else {
52 ath9k_hw_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ 47 ath9k_hw_resettxqueue(ah, sc->beacon.beaconq); /* push to h/w */
53 return 1; 48 return 1;
54 } 49 }
55} 50}
56 51
52static void ath_bstuck_process(struct ath_softc *sc)
53{
54 DPRINTF(sc, ATH_DBG_BEACON,
55 "stuck beacon; resetting (bmiss count %u)\n",
56 sc->beacon.bmisscnt);
57 ath_reset(sc, false);
58}
59
57/* 60/*
58 * Setup the beacon frame for transmit.
59 *
60 * Associates the beacon frame buffer with a transmit descriptor. Will set 61 * Associates the beacon frame buffer with a transmit descriptor. Will set
61 * up all required antenna switch parameters, rate codes, and channel flags. 62 * up all required antenna switch parameters, rate codes, and channel flags.
62 * Beacons are always sent out at the lowest rate, and are not retried. 63 * Beacons are always sent out at the lowest rate, and are not retried.
@@ -68,21 +69,20 @@ static void ath_beacon_setup(struct ath_softc *sc,
68 struct ath_hal *ah = sc->sc_ah; 69 struct ath_hal *ah = sc->sc_ah;
69 struct ath_desc *ds; 70 struct ath_desc *ds;
70 struct ath9k_11n_rate_series series[4]; 71 struct ath9k_11n_rate_series series[4];
71 const struct ath9k_rate_table *rt; 72 struct ath_rate_table *rt;
72 int flags, antenna; 73 int flags, antenna;
73 u8 rix, rate; 74 u8 rix, rate;
74 int ctsrate = 0; 75 int ctsrate = 0;
75 int ctsduration = 0; 76 int ctsduration = 0;
76 77
77 DPRINTF(sc, ATH_DBG_BEACON, "%s: m %p len %u\n", 78 DPRINTF(sc, ATH_DBG_BEACON, "m %p len %u\n", skb, skb->len);
78 __func__, skb, skb->len);
79 79
80 /* setup descriptors */ 80 /* setup descriptors */
81 ds = bf->bf_desc; 81 ds = bf->bf_desc;
82 82
83 flags = ATH9K_TXDESC_NOACK; 83 flags = ATH9K_TXDESC_NOACK;
84 84
85 if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS && 85 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC &&
86 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 86 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
87 ds->ds_link = bf->bf_daddr; /* self-linked */ 87 ds->ds_link = bf->bf_daddr; /* self-linked */
88 flags |= ATH9K_TXDESC_VEOL; 88 flags |= ATH9K_TXDESC_VEOL;
@@ -96,7 +96,7 @@ static void ath_beacon_setup(struct ath_softc *sc,
96 * SWBA's 96 * SWBA's
97 * XXX assumes two antenna 97 * XXX assumes two antenna
98 */ 98 */
99 antenna = ((sc->ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1); 99 antenna = ((sc->beacon.ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1);
100 } 100 }
101 101
102 ds->ds_data = bf->bf_buf_addr; 102 ds->ds_data = bf->bf_buf_addr;
@@ -106,15 +106,15 @@ static void ath_beacon_setup(struct ath_softc *sc,
106 * XXX everything at min xmit rate 106 * XXX everything at min xmit rate
107 */ 107 */
108 rix = 0; 108 rix = 0;
109 rt = sc->sc_currates; 109 rt = sc->cur_rate_table;
110 rate = rt->info[rix].rateCode; 110 rate = rt->info[rix].ratecode;
111 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) 111 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
112 rate |= rt->info[rix].shortPreamble; 112 rate |= rt->info[rix].short_preamble;
113 113
114 ath9k_hw_set11n_txdesc(ah, ds, 114 ath9k_hw_set11n_txdesc(ah, ds,
115 skb->len + FCS_LEN, /* frame length */ 115 skb->len + FCS_LEN, /* frame length */
116 ATH9K_PKT_TYPE_BEACON, /* Atheros packet type */ 116 ATH9K_PKT_TYPE_BEACON, /* Atheros packet type */
117 avp->av_btxctl.txpower, /* txpower XXX */ 117 MAX_RATE_POWER, /* FIXME */
118 ATH9K_TXKEYIX_INVALID, /* no encryption */ 118 ATH9K_TXKEYIX_INVALID, /* no encryption */
119 ATH9K_KEY_TYPE_CLEAR, /* no encryption */ 119 ATH9K_KEY_TYPE_CLEAR, /* no encryption */
120 flags /* no ack, 120 flags /* no ack,
@@ -138,31 +138,26 @@ static void ath_beacon_setup(struct ath_softc *sc,
138 ctsrate, ctsduration, series, 4, 0); 138 ctsrate, ctsduration, series, 4, 0);
139} 139}
140 140
141/* 141/* Generate beacon frame and queue cab data for a vap */
142 * Generate beacon frame and queue cab data for a vap.
143 *
144 * Updates the contents of the beacon frame. It is assumed that the buffer for
145 * the beacon frame has been allocated in the ATH object, and simply needs to
146 * be filled for this cycle. Also, any CAB (crap after beacon?) traffic will
147 * be added to the beacon frame at this point.
148*/
149static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) 142static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
150{ 143{
151 struct ath_buf *bf; 144 struct ath_buf *bf;
152 struct ath_vap *avp; 145 struct ath_vap *avp;
153 struct sk_buff *skb; 146 struct sk_buff *skb;
154 struct ath_txq *cabq; 147 struct ath_txq *cabq;
148 struct ieee80211_vif *vif;
155 struct ieee80211_tx_info *info; 149 struct ieee80211_tx_info *info;
156 int cabq_depth; 150 int cabq_depth;
157 151
158 avp = sc->sc_vaps[if_id]; 152 vif = sc->sc_vaps[if_id];
159 ASSERT(avp); 153 ASSERT(vif);
160 154
161 cabq = sc->sc_cabq; 155 avp = (void *)vif->drv_priv;
156 cabq = sc->beacon.cabq;
162 157
163 if (avp->av_bcbuf == NULL) { 158 if (avp->av_bcbuf == NULL) {
164 DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n", 159 DPRINTF(sc, ATH_DBG_BEACON, "avp=%p av_bcbuf=%p\n",
165 __func__, avp, avp->av_bcbuf); 160 avp, avp->av_bcbuf);
166 return NULL; 161 return NULL;
167 } 162 }
168 163
@@ -172,9 +167,10 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
172 pci_unmap_single(sc->pdev, bf->bf_dmacontext, 167 pci_unmap_single(sc->pdev, bf->bf_dmacontext,
173 skb->len, 168 skb->len,
174 PCI_DMA_TODEVICE); 169 PCI_DMA_TODEVICE);
170 dev_kfree_skb_any(skb);
175 } 171 }
176 172
177 skb = ieee80211_beacon_get(sc->hw, avp->av_if_data); 173 skb = ieee80211_beacon_get(sc->hw, vif);
178 bf->bf_mpdu = skb; 174 bf->bf_mpdu = skb;
179 if (skb == NULL) 175 if (skb == NULL)
180 return NULL; 176 return NULL;
@@ -186,17 +182,24 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
186 * TX frames) 182 * TX frames)
187 */ 183 */
188 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 184 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
189 sc->seq_no += 0x10; 185 sc->tx.seq_no += 0x10;
190 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 186 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
191 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no); 187 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
192 } 188 }
193 189
194 bf->bf_buf_addr = bf->bf_dmacontext = 190 bf->bf_buf_addr = bf->bf_dmacontext =
195 pci_map_single(sc->pdev, skb->data, 191 pci_map_single(sc->pdev, skb->data,
196 skb->len, 192 skb->len,
197 PCI_DMA_TODEVICE); 193 PCI_DMA_TODEVICE);
194 if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_buf_addr))) {
195 dev_kfree_skb_any(skb);
196 bf->bf_mpdu = NULL;
197 DPRINTF(sc, ATH_DBG_CONFIG,
198 "pci_dma_mapping_error() on beaconing\n");
199 return NULL;
200 }
198 201
199 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data); 202 skb = ieee80211_get_buffered_bc(sc->hw, vif);
200 203
201 /* 204 /*
202 * if the CABQ traffic from previous DTIM is pending and the current 205 * if the CABQ traffic from previous DTIM is pending and the current
@@ -219,7 +222,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
219 if (sc->sc_nvaps > 1) { 222 if (sc->sc_nvaps > 1) {
220 ath_tx_draintxq(sc, cabq, false); 223 ath_tx_draintxq(sc, cabq, false);
221 DPRINTF(sc, ATH_DBG_BEACON, 224 DPRINTF(sc, ATH_DBG_BEACON,
222 "%s: flush previous cabq traffic\n", __func__); 225 "flush previous cabq traffic\n");
223 } 226 }
224 } 227 }
225 228
@@ -232,7 +235,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
232 */ 235 */
233 while (skb) { 236 while (skb) {
234 ath_tx_cabq(sc, skb); 237 ath_tx_cabq(sc, skb);
235 skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data); 238 skb = ieee80211_get_buffered_bc(sc->hw, vif);
236 } 239 }
237 240
238 return bf; 241 return bf;
@@ -244,17 +247,20 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
244*/ 247*/
245static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id) 248static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
246{ 249{
250 struct ieee80211_vif *vif;
247 struct ath_hal *ah = sc->sc_ah; 251 struct ath_hal *ah = sc->sc_ah;
248 struct ath_buf *bf; 252 struct ath_buf *bf;
249 struct ath_vap *avp; 253 struct ath_vap *avp;
250 struct sk_buff *skb; 254 struct sk_buff *skb;
251 255
252 avp = sc->sc_vaps[if_id]; 256 vif = sc->sc_vaps[if_id];
253 ASSERT(avp); 257 ASSERT(vif);
258
259 avp = (void *)vif->drv_priv;
254 260
255 if (avp->av_bcbuf == NULL) { 261 if (avp->av_bcbuf == NULL) {
256 DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n", 262 DPRINTF(sc, ATH_DBG_BEACON, "avp=%p av_bcbuf=%p\n",
257 __func__, avp, avp != NULL ? avp->av_bcbuf : NULL); 263 avp, avp != NULL ? avp->av_bcbuf : NULL);
258 return; 264 return;
259 } 265 }
260 bf = avp->av_bcbuf; 266 bf = avp->av_bcbuf;
@@ -264,20 +270,12 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
264 ath_beacon_setup(sc, avp, bf); 270 ath_beacon_setup(sc, avp, bf);
265 271
266 /* NB: caller is known to have already stopped tx dma */ 272 /* NB: caller is known to have already stopped tx dma */
267 ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); 273 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
268 ath9k_hw_txstart(ah, sc->sc_bhalq); 274 ath9k_hw_txstart(ah, sc->beacon.beaconq);
269 DPRINTF(sc, ATH_DBG_BEACON, "%s: TXDP%u = %llx (%p)\n", __func__, 275 DPRINTF(sc, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n",
270 sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc); 276 sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc);
271} 277}
272 278
273/*
274 * Setup a h/w transmit queue for beacons.
275 *
276 * This function allocates an information structure (struct ath9k_txq_info)
277 * on the stack, sets some specific parameters (zero out channel width
278 * min/max, and enable aifs). The info structure does not need to be
279 * persistant.
280*/
281int ath_beaconq_setup(struct ath_hal *ah) 279int ath_beaconq_setup(struct ath_hal *ah)
282{ 280{
283 struct ath9k_tx_queue_info qi; 281 struct ath9k_tx_queue_info qi;
@@ -290,35 +288,29 @@ int ath_beaconq_setup(struct ath_hal *ah)
290 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); 288 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
291} 289}
292 290
293
294/*
295 * Allocate and setup an initial beacon frame.
296 *
297 * Allocate a beacon state variable for a specific VAP instance created on
298 * the ATH interface. This routine also calculates the beacon "slot" for
299 * staggared beacons in the mBSSID case.
300*/
301int ath_beacon_alloc(struct ath_softc *sc, int if_id) 291int ath_beacon_alloc(struct ath_softc *sc, int if_id)
302{ 292{
293 struct ieee80211_vif *vif;
303 struct ath_vap *avp; 294 struct ath_vap *avp;
304 struct ieee80211_hdr *hdr; 295 struct ieee80211_hdr *hdr;
305 struct ath_buf *bf; 296 struct ath_buf *bf;
306 struct sk_buff *skb; 297 struct sk_buff *skb;
307 __le64 tstamp; 298 __le64 tstamp;
308 299
309 avp = sc->sc_vaps[if_id]; 300 vif = sc->sc_vaps[if_id];
310 ASSERT(avp); 301 ASSERT(vif);
302
303 avp = (void *)vif->drv_priv;
311 304
312 /* Allocate a beacon descriptor if we haven't done so. */ 305 /* Allocate a beacon descriptor if we haven't done so. */
313 if (!avp->av_bcbuf) { 306 if (!avp->av_bcbuf) {
314 /* Allocate beacon state for hostap/ibss. We know 307 /* Allocate beacon state for hostap/ibss. We know
315 * a buffer is available. */ 308 * a buffer is available. */
316 309 avp->av_bcbuf = list_first_entry(&sc->beacon.bbuf,
317 avp->av_bcbuf = list_first_entry(&sc->sc_bbuf,
318 struct ath_buf, list); 310 struct ath_buf, list);
319 list_del(&avp->av_bcbuf->list); 311 list_del(&avp->av_bcbuf->list);
320 312
321 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP || 313 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP ||
322 !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) { 314 !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) {
323 int slot; 315 int slot;
324 /* 316 /*
@@ -327,13 +319,13 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
327 */ 319 */
328 avp->av_bslot = 0; 320 avp->av_bslot = 0;
329 for (slot = 0; slot < ATH_BCBUF; slot++) 321 for (slot = 0; slot < ATH_BCBUF; slot++)
330 if (sc->sc_bslot[slot] == ATH_IF_ID_ANY) { 322 if (sc->beacon.bslot[slot] == ATH_IF_ID_ANY) {
331 /* 323 /*
332 * XXX hack, space out slots to better 324 * XXX hack, space out slots to better
333 * deal with misses 325 * deal with misses
334 */ 326 */
335 if (slot+1 < ATH_BCBUF && 327 if (slot+1 < ATH_BCBUF &&
336 sc->sc_bslot[slot+1] == 328 sc->beacon.bslot[slot+1] ==
337 ATH_IF_ID_ANY) { 329 ATH_IF_ID_ANY) {
338 avp->av_bslot = slot+1; 330 avp->av_bslot = slot+1;
339 break; 331 break;
@@ -341,8 +333,8 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
341 avp->av_bslot = slot; 333 avp->av_bslot = slot;
342 /* NB: keep looking for a double slot */ 334 /* NB: keep looking for a double slot */
343 } 335 }
344 BUG_ON(sc->sc_bslot[avp->av_bslot] != ATH_IF_ID_ANY); 336 BUG_ON(sc->beacon.bslot[avp->av_bslot] != ATH_IF_ID_ANY);
345 sc->sc_bslot[avp->av_bslot] = if_id; 337 sc->beacon.bslot[avp->av_bslot] = if_id;
346 sc->sc_nbcnvaps++; 338 sc->sc_nbcnvaps++;
347 } 339 }
348 } 340 }
@@ -363,15 +355,14 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
363 * FIXME: Fill avp->av_btxctl.txpower and 355 * FIXME: Fill avp->av_btxctl.txpower and
364 * avp->av_btxctl.shortPreamble 356 * avp->av_btxctl.shortPreamble
365 */ 357 */
366 skb = ieee80211_beacon_get(sc->hw, avp->av_if_data); 358 skb = ieee80211_beacon_get(sc->hw, vif);
367 if (skb == NULL) { 359 if (skb == NULL) {
368 DPRINTF(sc, ATH_DBG_BEACON, "%s: cannot get skb\n", 360 DPRINTF(sc, ATH_DBG_BEACON, "cannot get skb\n");
369 __func__);
370 return -ENOMEM; 361 return -ENOMEM;
371 } 362 }
372 363
373 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 364 tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
374 sc->bc_tstamp = le64_to_cpu(tstamp); 365 sc->beacon.bc_tstamp = le64_to_cpu(tstamp);
375 366
376 /* 367 /*
377 * Calculate a TSF adjustment factor required for 368 * Calculate a TSF adjustment factor required for
@@ -402,36 +393,36 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
402 val = cpu_to_le64(tsfadjust << 10); /* TU->TSF */ 393 val = cpu_to_le64(tsfadjust << 10); /* TU->TSF */
403 394
404 DPRINTF(sc, ATH_DBG_BEACON, 395 DPRINTF(sc, ATH_DBG_BEACON,
405 "%s: %s beacons, bslot %d intval %u tsfadjust %llu\n", 396 "stagger beacons, bslot %d intval %u tsfadjust %llu\n",
406 __func__, "stagger",
407 avp->av_bslot, intval, (unsigned long long)tsfadjust); 397 avp->av_bslot, intval, (unsigned long long)tsfadjust);
408 398
409 hdr = (struct ieee80211_hdr *)skb->data; 399 hdr = (struct ieee80211_hdr *)skb->data;
410 memcpy(&hdr[1], &val, sizeof(val)); 400 memcpy(&hdr[1], &val, sizeof(val));
411 } 401 }
412 402
403 bf->bf_mpdu = skb;
413 bf->bf_buf_addr = bf->bf_dmacontext = 404 bf->bf_buf_addr = bf->bf_dmacontext =
414 pci_map_single(sc->pdev, skb->data, 405 pci_map_single(sc->pdev, skb->data,
415 skb->len, 406 skb->len,
416 PCI_DMA_TODEVICE); 407 PCI_DMA_TODEVICE);
417 bf->bf_mpdu = skb; 408 if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_buf_addr))) {
409 dev_kfree_skb_any(skb);
410 bf->bf_mpdu = NULL;
411 DPRINTF(sc, ATH_DBG_CONFIG,
412 "pci_dma_mapping_error() on beacon alloc\n");
413 return -ENOMEM;
414 }
418 415
419 return 0; 416 return 0;
420} 417}
421 418
422/*
423 * Reclaim beacon resources and return buffer to the pool.
424 *
425 * Checks the VAP to put the beacon frame buffer back to the ATH object
426 * queue, and de-allocates any skbs that were sent as CAB traffic.
427*/
428void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp) 419void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
429{ 420{
430 if (avp->av_bcbuf != NULL) { 421 if (avp->av_bcbuf != NULL) {
431 struct ath_buf *bf; 422 struct ath_buf *bf;
432 423
433 if (avp->av_bslot != -1) { 424 if (avp->av_bslot != -1) {
434 sc->sc_bslot[avp->av_bslot] = ATH_IF_ID_ANY; 425 sc->beacon.bslot[avp->av_bslot] = ATH_IF_ID_ANY;
435 sc->sc_nbcnvaps--; 426 sc->sc_nbcnvaps--;
436 } 427 }
437 428
@@ -444,19 +435,12 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
444 dev_kfree_skb_any(skb); 435 dev_kfree_skb_any(skb);
445 bf->bf_mpdu = NULL; 436 bf->bf_mpdu = NULL;
446 } 437 }
447 list_add_tail(&bf->list, &sc->sc_bbuf); 438 list_add_tail(&bf->list, &sc->beacon.bbuf);
448 439
449 avp->av_bcbuf = NULL; 440 avp->av_bcbuf = NULL;
450 } 441 }
451} 442}
452 443
453/*
454 * Tasklet for Sending Beacons
455 *
456 * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame
457 * contents are done as needed and the slot time is also adjusted based on
458 * current state.
459*/
460void ath9k_beacon_tasklet(unsigned long data) 444void ath9k_beacon_tasklet(unsigned long data)
461{ 445{
462 struct ath_softc *sc = (struct ath_softc *)data; 446 struct ath_softc *sc = (struct ath_softc *)data;
@@ -473,9 +457,7 @@ void ath9k_beacon_tasklet(unsigned long data)
473 457
474 if (sc->sc_flags & SC_OP_NO_RESET) { 458 if (sc->sc_flags & SC_OP_NO_RESET) {
475 show_cycles = ath9k_hw_GetMibCycleCountsPct(ah, 459 show_cycles = ath9k_hw_GetMibCycleCountsPct(ah,
476 &rx_clear, 460 &rx_clear, &rx_frame, &tx_frame);
477 &rx_frame,
478 &tx_frame);
479 } 461 }
480 462
481 /* 463 /*
@@ -487,67 +469,65 @@ void ath9k_beacon_tasklet(unsigned long data)
487 * 469 *
488 * FIXME: Clean up this mess !! 470 * FIXME: Clean up this mess !!
489 */ 471 */
490 if (ath9k_hw_numtxpending(ah, sc->sc_bhalq) != 0) { 472 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
491 sc->sc_bmisscount++; 473 sc->beacon.bmisscnt++;
492 /* XXX: doth needs the chanchange IE countdown decremented. 474 /* XXX: doth needs the chanchange IE countdown decremented.
493 * We should consider adding a mac80211 call to indicate 475 * We should consider adding a mac80211 call to indicate
494 * a beacon miss so appropriate action could be taken 476 * a beacon miss so appropriate action could be taken
495 * (in that layer). 477 * (in that layer).
496 */ 478 */
497 if (sc->sc_bmisscount < BSTUCK_THRESH) { 479 if (sc->beacon.bmisscnt < BSTUCK_THRESH) {
498 if (sc->sc_flags & SC_OP_NO_RESET) { 480 if (sc->sc_flags & SC_OP_NO_RESET) {
499 DPRINTF(sc, ATH_DBG_BEACON, 481 DPRINTF(sc, ATH_DBG_BEACON,
500 "%s: missed %u consecutive beacons\n", 482 "missed %u consecutive beacons\n",
501 __func__, sc->sc_bmisscount); 483 sc->beacon.bmisscnt);
502 if (show_cycles) { 484 if (show_cycles) {
503 /* 485 /*
504 * Display cycle counter stats from HW 486 * Display cycle counter stats from HW
505 * to aide in debug of stickiness. 487 * to aide in debug of stickiness.
506 */ 488 */
507 DPRINTF(sc, ATH_DBG_BEACON, 489 DPRINTF(sc, ATH_DBG_BEACON,
508 "%s: busy times: rx_clear=%d, " 490 "busy times: rx_clear=%d, "
509 "rx_frame=%d, tx_frame=%d\n", 491 "rx_frame=%d, tx_frame=%d\n",
510 __func__, rx_clear, rx_frame, 492 rx_clear, rx_frame,
511 tx_frame); 493 tx_frame);
512 } else { 494 } else {
513 DPRINTF(sc, ATH_DBG_BEACON, 495 DPRINTF(sc, ATH_DBG_BEACON,
514 "%s: unable to obtain " 496 "unable to obtain "
515 "busy times\n", __func__); 497 "busy times\n");
516 } 498 }
517 } else { 499 } else {
518 DPRINTF(sc, ATH_DBG_BEACON, 500 DPRINTF(sc, ATH_DBG_BEACON,
519 "%s: missed %u consecutive beacons\n", 501 "missed %u consecutive beacons\n",
520 __func__, sc->sc_bmisscount); 502 sc->beacon.bmisscnt);
521 } 503 }
522 } else if (sc->sc_bmisscount >= BSTUCK_THRESH) { 504 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
523 if (sc->sc_flags & SC_OP_NO_RESET) { 505 if (sc->sc_flags & SC_OP_NO_RESET) {
524 if (sc->sc_bmisscount == BSTUCK_THRESH) { 506 if (sc->beacon.bmisscnt == BSTUCK_THRESH) {
525 DPRINTF(sc, ATH_DBG_BEACON, 507 DPRINTF(sc, ATH_DBG_BEACON,
526 "%s: beacon is officially " 508 "beacon is officially "
527 "stuck\n", __func__); 509 "stuck\n");
528 ath9k_hw_dmaRegDump(ah);
529 } 510 }
530 } else { 511 } else {
531 DPRINTF(sc, ATH_DBG_BEACON, 512 DPRINTF(sc, ATH_DBG_BEACON,
532 "%s: beacon is officially stuck\n", 513 "beacon is officially stuck\n");
533 __func__);
534 ath_bstuck_process(sc); 514 ath_bstuck_process(sc);
535 } 515 }
536 } 516 }
537 return; 517 return;
538 } 518 }
539 519
540 if (sc->sc_bmisscount != 0) { 520 if (sc->beacon.bmisscnt != 0) {
541 if (sc->sc_flags & SC_OP_NO_RESET) { 521 if (sc->sc_flags & SC_OP_NO_RESET) {
542 DPRINTF(sc, ATH_DBG_BEACON, 522 DPRINTF(sc, ATH_DBG_BEACON,
543 "%s: resume beacon xmit after %u misses\n", 523 "resume beacon xmit after %u misses\n",
544 __func__, sc->sc_bmisscount); 524 sc->beacon.bmisscnt);
545 } else { 525 } else {
546 DPRINTF(sc, ATH_DBG_BEACON, 526 DPRINTF(sc, ATH_DBG_BEACON,
547 "%s: resume beacon xmit after %u misses\n", 527 "resume beacon xmit after %u misses\n",
548 __func__, sc->sc_bmisscount); 528 sc->beacon.bmisscnt);
549 } 529 }
550 sc->sc_bmisscount = 0; 530 sc->beacon.bmisscnt = 0;
551 } 531 }
552 532
553 /* 533 /*
@@ -562,11 +542,11 @@ void ath9k_beacon_tasklet(unsigned long data)
562 tsf = ath9k_hw_gettsf64(ah); 542 tsf = ath9k_hw_gettsf64(ah);
563 tsftu = TSF_TO_TU(tsf>>32, tsf); 543 tsftu = TSF_TO_TU(tsf>>32, tsf);
564 slot = ((tsftu % intval) * ATH_BCBUF) / intval; 544 slot = ((tsftu % intval) * ATH_BCBUF) / intval;
565 if_id = sc->sc_bslot[(slot + 1) % ATH_BCBUF]; 545 if_id = sc->beacon.bslot[(slot + 1) % ATH_BCBUF];
566 546
567 DPRINTF(sc, ATH_DBG_BEACON, 547 DPRINTF(sc, ATH_DBG_BEACON,
568 "%s: slot %d [tsf %llu tsftu %u intval %u] if_id %d\n", 548 "slot %d [tsf %llu tsftu %u intval %u] if_id %d\n",
569 __func__, slot, (unsigned long long)tsf, tsftu, 549 slot, (unsigned long long)tsf, tsftu,
570 intval, if_id); 550 intval, if_id);
571 551
572 bfaddr = 0; 552 bfaddr = 0;
@@ -594,48 +574,34 @@ void ath9k_beacon_tasklet(unsigned long data)
594 * set to ATH_BCBUF so this check is a noop. 574 * set to ATH_BCBUF so this check is a noop.
595 */ 575 */
596 /* XXX locking */ 576 /* XXX locking */
597 if (sc->sc_updateslot == UPDATE) { 577 if (sc->beacon.updateslot == UPDATE) {
598 sc->sc_updateslot = COMMIT; /* commit next beacon */ 578 sc->beacon.updateslot = COMMIT; /* commit next beacon */
599 sc->sc_slotupdate = slot; 579 sc->beacon.slotupdate = slot;
600 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) 580 } else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) {
601 ath_setslottime(sc); /* commit change to hardware */ 581 ath9k_hw_setslottime(sc->sc_ah, sc->beacon.slottime);
602 582 sc->beacon.updateslot = OK;
583 }
603 if (bfaddr != 0) { 584 if (bfaddr != 0) {
604 /* 585 /*
605 * Stop any current dma and put the new frame(s) on the queue. 586 * Stop any current dma and put the new frame(s) on the queue.
606 * This should never fail since we check above that no frames 587 * This should never fail since we check above that no frames
607 * are still pending on the queue. 588 * are still pending on the queue.
608 */ 589 */
609 if (!ath9k_hw_stoptxdma(ah, sc->sc_bhalq)) { 590 if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) {
610 DPRINTF(sc, ATH_DBG_FATAL, 591 DPRINTF(sc, ATH_DBG_FATAL,
611 "%s: beacon queue %u did not stop?\n", 592 "beacon queue %u did not stop?\n", sc->beacon.beaconq);
612 __func__, sc->sc_bhalq);
613 /* NB: the HAL still stops DMA, so proceed */ 593 /* NB: the HAL still stops DMA, so proceed */
614 } 594 }
615 595
616 /* NB: cabq traffic should already be queued and primed */ 596 /* NB: cabq traffic should already be queued and primed */
617 ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bfaddr); 597 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
618 ath9k_hw_txstart(ah, sc->sc_bhalq); 598 ath9k_hw_txstart(ah, sc->beacon.beaconq);
619 599
620 sc->ast_be_xmit += bc; /* XXX per-vap? */ 600 sc->beacon.ast_be_xmit += bc; /* XXX per-vap? */
621 } 601 }
622} 602}
623 603
624/* 604/*
625 * Tasklet for Beacon Stuck processing
626 *
627 * Processing for Beacon Stuck.
628 * Basically resets the chip.
629*/
630void ath_bstuck_process(struct ath_softc *sc)
631{
632 DPRINTF(sc, ATH_DBG_BEACON,
633 "%s: stuck beacon; resetting (bmiss count %u)\n",
634 __func__, sc->sc_bmisscount);
635 ath_reset(sc, false);
636}
637
638/*
639 * Configure the beacon and sleep timers. 605 * Configure the beacon and sleep timers.
640 * 606 *
641 * When operating as an AP this resets the TSF and sets 607 * When operating as an AP this resets the TSF and sets
@@ -652,15 +618,21 @@ void ath_bstuck_process(struct ath_softc *sc)
652 */ 618 */
653void ath_beacon_config(struct ath_softc *sc, int if_id) 619void ath_beacon_config(struct ath_softc *sc, int if_id)
654{ 620{
621 struct ieee80211_vif *vif;
655 struct ath_hal *ah = sc->sc_ah; 622 struct ath_hal *ah = sc->sc_ah;
656 struct ath_beacon_config conf; 623 struct ath_beacon_config conf;
657 enum ath9k_opmode av_opmode; 624 struct ath_vap *avp;
625 enum nl80211_iftype opmode;
658 u32 nexttbtt, intval; 626 u32 nexttbtt, intval;
659 627
660 if (if_id != ATH_IF_ID_ANY) 628 if (if_id != ATH_IF_ID_ANY) {
661 av_opmode = sc->sc_vaps[if_id]->av_opmode; 629 vif = sc->sc_vaps[if_id];
662 else 630 ASSERT(vif);
663 av_opmode = sc->sc_ah->ah_opmode; 631 avp = (void *)vif->drv_priv;
632 opmode = avp->av_opmode;
633 } else {
634 opmode = sc->sc_ah->ah_opmode;
635 }
664 636
665 memset(&conf, 0, sizeof(struct ath_beacon_config)); 637 memset(&conf, 0, sizeof(struct ath_beacon_config));
666 638
@@ -672,10 +644,10 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
672 conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval; 644 conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval;
673 645
674 /* extract tstamp from last beacon and convert to TU */ 646 /* extract tstamp from last beacon and convert to TU */
675 nexttbtt = TSF_TO_TU(sc->bc_tstamp >> 32, sc->bc_tstamp); 647 nexttbtt = TSF_TO_TU(sc->beacon.bc_tstamp >> 32, sc->beacon.bc_tstamp);
676 648
677 /* XXX conditionalize multi-bss support? */ 649 /* XXX conditionalize multi-bss support? */
678 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) { 650 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) {
679 /* 651 /*
680 * For multi-bss ap support beacons are either staggered 652 * For multi-bss ap support beacons are either staggered
681 * evenly over N slots or burst together. For the former 653 * evenly over N slots or burst together. For the former
@@ -694,11 +666,11 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
694 else if (intval) /* NB: can be 0 for monitor mode */ 666 else if (intval) /* NB: can be 0 for monitor mode */
695 nexttbtt = roundup(nexttbtt, intval); 667 nexttbtt = roundup(nexttbtt, intval);
696 668
697 DPRINTF(sc, ATH_DBG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", 669 DPRINTF(sc, ATH_DBG_BEACON, "nexttbtt %u intval %u (%u)\n",
698 __func__, nexttbtt, intval, conf.beacon_interval); 670 nexttbtt, intval, conf.beacon_interval);
699 671
700 /* Check for ATH9K_M_HOSTAP and sc_nostabeacons for WDS client */ 672 /* Check for NL80211_IFTYPE_AP and sc_nostabeacons for WDS client */
701 if (sc->sc_ah->ah_opmode == ATH9K_M_STA) { 673 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION) {
702 struct ath9k_beacon_state bs; 674 struct ath9k_beacon_state bs;
703 u64 tsf; 675 u64 tsf;
704 u32 tsftu; 676 u32 tsftu;
@@ -782,7 +754,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
782 bs.bs_sleepduration = bs.bs_dtimperiod; 754 bs.bs_sleepduration = bs.bs_dtimperiod;
783 755
784 DPRINTF(sc, ATH_DBG_BEACON, 756 DPRINTF(sc, ATH_DBG_BEACON,
785 "%s: tsf %llu " 757 "tsf %llu "
786 "tsf:tu %u " 758 "tsf:tu %u "
787 "intval %u " 759 "intval %u "
788 "nexttbtt %u " 760 "nexttbtt %u "
@@ -794,7 +766,6 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
794 "maxdur %u " 766 "maxdur %u "
795 "next %u " 767 "next %u "
796 "timoffset %u\n", 768 "timoffset %u\n",
797 __func__,
798 (unsigned long long)tsf, tsftu, 769 (unsigned long long)tsf, tsftu,
799 bs.bs_intval, 770 bs.bs_intval,
800 bs.bs_nexttbtt, 771 bs.bs_nexttbtt,
@@ -818,7 +789,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
818 ath9k_hw_set_interrupts(ah, 0); 789 ath9k_hw_set_interrupts(ah, 0);
819 if (nexttbtt == intval) 790 if (nexttbtt == intval)
820 intval |= ATH9K_BEACON_RESET_TSF; 791 intval |= ATH9K_BEACON_RESET_TSF;
821 if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS) { 792 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC) {
822 /* 793 /*
823 * Pull nexttbtt forward to reflect the current 794 * Pull nexttbtt forward to reflect the current
824 * TSF 795 * TSF
@@ -834,8 +805,8 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
834 } 805 }
835#undef FUDGE 806#undef FUDGE
836 DPRINTF(sc, ATH_DBG_BEACON, 807 DPRINTF(sc, ATH_DBG_BEACON,
837 "%s: IBSS nexttbtt %u intval %u (%u)\n", 808 "IBSS nexttbtt %u intval %u (%u)\n",
838 __func__, nexttbtt, 809 nexttbtt,
839 intval & ~ATH9K_BEACON_RESET_TSF, 810 intval & ~ATH9K_BEACON_RESET_TSF,
840 conf.beacon_interval); 811 conf.beacon_interval);
841 812
@@ -850,7 +821,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
850 if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) 821 if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
851 sc->sc_imask |= ATH9K_INT_SWBA; 822 sc->sc_imask |= ATH9K_INT_SWBA;
852 ath_beaconq_config(sc); 823 ath_beaconq_config(sc);
853 } else if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) { 824 } else if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) {
854 /* 825 /*
855 * In AP mode we enable the beacon timers and 826 * In AP mode we enable the beacon timers and
856 * SWBA interrupts to prepare beacon frames. 827 * SWBA interrupts to prepare beacon frames.
@@ -860,20 +831,18 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
860 ath_beaconq_config(sc); 831 ath_beaconq_config(sc);
861 } 832 }
862 ath9k_hw_beaconinit(ah, nexttbtt, intval); 833 ath9k_hw_beaconinit(ah, nexttbtt, intval);
863 sc->sc_bmisscount = 0; 834 sc->beacon.bmisscnt = 0;
864 ath9k_hw_set_interrupts(ah, sc->sc_imask); 835 ath9k_hw_set_interrupts(ah, sc->sc_imask);
865 /* 836 /*
866 * When using a self-linked beacon descriptor in 837 * When using a self-linked beacon descriptor in
867 * ibss mode load it once here. 838 * ibss mode load it once here.
868 */ 839 */
869 if (sc->sc_ah->ah_opmode == ATH9K_M_IBSS && 840 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC &&
870 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) 841 (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL))
871 ath_beacon_start_adhoc(sc, 0); 842 ath_beacon_start_adhoc(sc, 0);
872 } 843 }
873} 844}
874 845
875/* Function to collect beacon rssi data and resync beacon if necessary */
876
877void ath_beacon_sync(struct ath_softc *sc, int if_id) 846void ath_beacon_sync(struct ath_softc *sc, int if_id)
878{ 847{
879 /* 848 /*
diff --git a/drivers/net/wireless/ath9k/calib.c b/drivers/net/wireless/ath9k/calib.c
new file mode 100644
index 000000000000..3c7454fc51bd
--- /dev/null
+++ b/drivers/net/wireless/ath9k/calib.c
@@ -0,0 +1,1021 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21
22static const int16_t NOISE_FLOOR[] = { -96, -93, -98, -96, -93, -96 };
23
24/* We can tune this as we go by monitoring really low values */
25#define ATH9K_NF_TOO_LOW -60
26
27/* AR5416 may return very high value (like -31 dBm), in those cases the nf
28 * is incorrect and we should use the static NF value. Later we can try to
29 * find out why they are reporting these values */
30
31static bool ath9k_hw_nf_in_range(struct ath_hal *ah, s16 nf)
32{
33 if (nf > ATH9K_NF_TOO_LOW) {
34 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
35 "noise floor value detected (%d) is "
36 "lower than what we think is a "
37 "reasonable value (%d)\n",
38 nf, ATH9K_NF_TOO_LOW);
39 return false;
40 }
41 return true;
42}
43
44static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
45{
46 int16_t nfval;
47 int16_t sort[ATH9K_NF_CAL_HIST_MAX];
48 int i, j;
49
50 for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
51 sort[i] = nfCalBuffer[i];
52
53 for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
54 for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
55 if (sort[j] > sort[j - 1]) {
56 nfval = sort[j];
57 sort[j] = sort[j - 1];
58 sort[j - 1] = nfval;
59 }
60 }
61 }
62 nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
63
64 return nfval;
65}
66
67static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
68 int16_t *nfarray)
69{
70 int i;
71
72 for (i = 0; i < NUM_NF_READINGS; i++) {
73 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
74
75 if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
76 h[i].currIndex = 0;
77
78 if (h[i].invalidNFcount > 0) {
79 if (nfarray[i] < AR_PHY_CCA_MIN_BAD_VALUE ||
80 nfarray[i] > AR_PHY_CCA_MAX_HIGH_VALUE) {
81 h[i].invalidNFcount = ATH9K_NF_CAL_HIST_MAX;
82 } else {
83 h[i].invalidNFcount--;
84 h[i].privNF = nfarray[i];
85 }
86 } else {
87 h[i].privNF =
88 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
89 }
90 }
91 return;
92}
93
94static void ath9k_hw_do_getnf(struct ath_hal *ah,
95 int16_t nfarray[NUM_NF_READINGS])
96{
97 int16_t nf;
98
99 if (AR_SREV_9280_10_OR_LATER(ah))
100 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
101 else
102 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
103
104 if (nf & 0x100)
105 nf = 0 - ((nf ^ 0x1ff) + 1);
106 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
107 "NF calibrated [ctl] [chain 0] is %d\n", nf);
108 nfarray[0] = nf;
109
110 if (AR_SREV_9280_10_OR_LATER(ah))
111 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
112 AR9280_PHY_CH1_MINCCA_PWR);
113 else
114 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
115 AR_PHY_CH1_MINCCA_PWR);
116
117 if (nf & 0x100)
118 nf = 0 - ((nf ^ 0x1ff) + 1);
119 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
120 "NF calibrated [ctl] [chain 1] is %d\n", nf);
121 nfarray[1] = nf;
122
123 if (!AR_SREV_9280(ah)) {
124 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
125 AR_PHY_CH2_MINCCA_PWR);
126 if (nf & 0x100)
127 nf = 0 - ((nf ^ 0x1ff) + 1);
128 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
129 "NF calibrated [ctl] [chain 2] is %d\n", nf);
130 nfarray[2] = nf;
131 }
132
133 if (AR_SREV_9280_10_OR_LATER(ah))
134 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
135 AR9280_PHY_EXT_MINCCA_PWR);
136 else
137 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
138 AR_PHY_EXT_MINCCA_PWR);
139
140 if (nf & 0x100)
141 nf = 0 - ((nf ^ 0x1ff) + 1);
142 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
143 "NF calibrated [ext] [chain 0] is %d\n", nf);
144 nfarray[3] = nf;
145
146 if (AR_SREV_9280_10_OR_LATER(ah))
147 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
148 AR9280_PHY_CH1_EXT_MINCCA_PWR);
149 else
150 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
151 AR_PHY_CH1_EXT_MINCCA_PWR);
152
153 if (nf & 0x100)
154 nf = 0 - ((nf ^ 0x1ff) + 1);
155 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
156 "NF calibrated [ext] [chain 1] is %d\n", nf);
157 nfarray[4] = nf;
158
159 if (!AR_SREV_9280(ah)) {
160 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
161 AR_PHY_CH2_EXT_MINCCA_PWR);
162 if (nf & 0x100)
163 nf = 0 - ((nf ^ 0x1ff) + 1);
164 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
165 "NF calibrated [ext] [chain 2] is %d\n", nf);
166 nfarray[5] = nf;
167 }
168}
169
170static bool getNoiseFloorThresh(struct ath_hal *ah,
171 const struct ath9k_channel *chan,
172 int16_t *nft)
173{
174 switch (chan->chanmode) {
175 case CHANNEL_A:
176 case CHANNEL_A_HT20:
177 case CHANNEL_A_HT40PLUS:
178 case CHANNEL_A_HT40MINUS:
179 *nft = (int8_t)ath9k_hw_get_eeprom(ah, EEP_NFTHRESH_5);
180 break;
181 case CHANNEL_B:
182 case CHANNEL_G:
183 case CHANNEL_G_HT20:
184 case CHANNEL_G_HT40PLUS:
185 case CHANNEL_G_HT40MINUS:
186 *nft = (int8_t)ath9k_hw_get_eeprom(ah, EEP_NFTHRESH_2);
187 break;
188 default:
189 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
190 "invalid channel flags 0x%x\n", chan->channelFlags);
191 return false;
192 }
193
194 return true;
195}
196
197static void ath9k_hw_setup_calibration(struct ath_hal *ah,
198 struct hal_cal_list *currCal)
199{
200 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
201 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
202 currCal->calData->calCountMax);
203
204 switch (currCal->calData->calType) {
205 case IQ_MISMATCH_CAL:
206 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
207 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
208 "starting IQ Mismatch Calibration\n");
209 break;
210 case ADC_GAIN_CAL:
211 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
212 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
213 "starting ADC Gain Calibration\n");
214 break;
215 case ADC_DC_CAL:
216 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
217 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
218 "starting ADC DC Calibration\n");
219 break;
220 case ADC_DC_INIT_CAL:
221 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
222 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
223 "starting Init ADC DC Calibration\n");
224 break;
225 }
226
227 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
228 AR_PHY_TIMING_CTRL4_DO_CAL);
229}
230
231static void ath9k_hw_reset_calibration(struct ath_hal *ah,
232 struct hal_cal_list *currCal)
233{
234 struct ath_hal_5416 *ahp = AH5416(ah);
235 int i;
236
237 ath9k_hw_setup_calibration(ah, currCal);
238
239 currCal->calState = CAL_RUNNING;
240
241 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
242 ahp->ah_Meas0.sign[i] = 0;
243 ahp->ah_Meas1.sign[i] = 0;
244 ahp->ah_Meas2.sign[i] = 0;
245 ahp->ah_Meas3.sign[i] = 0;
246 }
247
248 ahp->ah_CalSamples = 0;
249}
250
251static void ath9k_hw_per_calibration(struct ath_hal *ah,
252 struct ath9k_channel *ichan,
253 u8 rxchainmask,
254 struct hal_cal_list *currCal,
255 bool *isCalDone)
256{
257 struct ath_hal_5416 *ahp = AH5416(ah);
258
259 *isCalDone = false;
260
261 if (currCal->calState == CAL_RUNNING) {
262 if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
263 AR_PHY_TIMING_CTRL4_DO_CAL)) {
264
265 currCal->calData->calCollect(ah);
266 ahp->ah_CalSamples++;
267
268 if (ahp->ah_CalSamples >= currCal->calData->calNumSamples) {
269 int i, numChains = 0;
270 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
271 if (rxchainmask & (1 << i))
272 numChains++;
273 }
274
275 currCal->calData->calPostProc(ah, numChains);
276 ichan->CalValid |= currCal->calData->calType;
277 currCal->calState = CAL_DONE;
278 *isCalDone = true;
279 } else {
280 ath9k_hw_setup_calibration(ah, currCal);
281 }
282 }
283 } else if (!(ichan->CalValid & currCal->calData->calType)) {
284 ath9k_hw_reset_calibration(ah, currCal);
285 }
286}
287
288static bool ath9k_hw_iscal_supported(struct ath_hal *ah,
289 struct ath9k_channel *chan,
290 enum hal_cal_types calType)
291{
292 struct ath_hal_5416 *ahp = AH5416(ah);
293 bool retval = false;
294
295 switch (calType & ahp->ah_suppCals) {
296 case IQ_MISMATCH_CAL:
297 if (!IS_CHAN_B(chan))
298 retval = true;
299 break;
300 case ADC_GAIN_CAL:
301 case ADC_DC_CAL:
302 if (!IS_CHAN_B(chan)
303 && !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
304 retval = true;
305 break;
306 }
307
308 return retval;
309}
310
311static void ath9k_hw_iqcal_collect(struct ath_hal *ah)
312{
313 struct ath_hal_5416 *ahp = AH5416(ah);
314 int i;
315
316 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
317 ahp->ah_totalPowerMeasI[i] +=
318 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
319 ahp->ah_totalPowerMeasQ[i] +=
320 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
321 ahp->ah_totalIqCorrMeas[i] +=
322 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
323 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
324 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
325 ahp->ah_CalSamples, i, ahp->ah_totalPowerMeasI[i],
326 ahp->ah_totalPowerMeasQ[i],
327 ahp->ah_totalIqCorrMeas[i]);
328 }
329}
330
331static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah)
332{
333 struct ath_hal_5416 *ahp = AH5416(ah);
334 int i;
335
336 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
337 ahp->ah_totalAdcIOddPhase[i] +=
338 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
339 ahp->ah_totalAdcIEvenPhase[i] +=
340 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
341 ahp->ah_totalAdcQOddPhase[i] +=
342 REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
343 ahp->ah_totalAdcQEvenPhase[i] +=
344 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
345
346 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
347 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
348 "oddq=0x%08x; evenq=0x%08x;\n",
349 ahp->ah_CalSamples, i,
350 ahp->ah_totalAdcIOddPhase[i],
351 ahp->ah_totalAdcIEvenPhase[i],
352 ahp->ah_totalAdcQOddPhase[i],
353 ahp->ah_totalAdcQEvenPhase[i]);
354 }
355}
356
357static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah)
358{
359 struct ath_hal_5416 *ahp = AH5416(ah);
360 int i;
361
362 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
363 ahp->ah_totalAdcDcOffsetIOddPhase[i] +=
364 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
365 ahp->ah_totalAdcDcOffsetIEvenPhase[i] +=
366 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
367 ahp->ah_totalAdcDcOffsetQOddPhase[i] +=
368 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
369 ahp->ah_totalAdcDcOffsetQEvenPhase[i] +=
370 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
371
372 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
373 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
374 "oddq=0x%08x; evenq=0x%08x;\n",
375 ahp->ah_CalSamples, i,
376 ahp->ah_totalAdcDcOffsetIOddPhase[i],
377 ahp->ah_totalAdcDcOffsetIEvenPhase[i],
378 ahp->ah_totalAdcDcOffsetQOddPhase[i],
379 ahp->ah_totalAdcDcOffsetQEvenPhase[i]);
380 }
381}
382
383static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains)
384{
385 struct ath_hal_5416 *ahp = AH5416(ah);
386 u32 powerMeasQ, powerMeasI, iqCorrMeas;
387 u32 qCoffDenom, iCoffDenom;
388 int32_t qCoff, iCoff;
389 int iqCorrNeg, i;
390
391 for (i = 0; i < numChains; i++) {
392 powerMeasI = ahp->ah_totalPowerMeasI[i];
393 powerMeasQ = ahp->ah_totalPowerMeasQ[i];
394 iqCorrMeas = ahp->ah_totalIqCorrMeas[i];
395
396 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
397 "Starting IQ Cal and Correction for Chain %d\n",
398 i);
399
400 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
401 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
402 i, ahp->ah_totalIqCorrMeas[i]);
403
404 iqCorrNeg = 0;
405
406 if (iqCorrMeas > 0x80000000) {
407 iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
408 iqCorrNeg = 1;
409 }
410
411 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
412 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
413 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
414 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
415 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
416 iqCorrNeg);
417
418 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
419 qCoffDenom = powerMeasQ / 64;
420
421 if (powerMeasQ != 0) {
422 iCoff = iqCorrMeas / iCoffDenom;
423 qCoff = powerMeasI / qCoffDenom - 64;
424 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
425 "Chn %d iCoff = 0x%08x\n", i, iCoff);
426 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
427 "Chn %d qCoff = 0x%08x\n", i, qCoff);
428
429 iCoff = iCoff & 0x3f;
430 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
431 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
432 if (iqCorrNeg == 0x0)
433 iCoff = 0x40 - iCoff;
434
435 if (qCoff > 15)
436 qCoff = 15;
437 else if (qCoff <= -16)
438 qCoff = 16;
439
440 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
441 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
442 i, iCoff, qCoff);
443
444 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
445 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
446 iCoff);
447 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
448 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
449 qCoff);
450 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
451 "IQ Cal and Correction done for Chain %d\n",
452 i);
453 }
454 }
455
456 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
457 AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
458}
459
460static void ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah, u8 numChains)
461{
462 struct ath_hal_5416 *ahp = AH5416(ah);
463 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
464 u32 qGainMismatch, iGainMismatch, val, i;
465
466 for (i = 0; i < numChains; i++) {
467 iOddMeasOffset = ahp->ah_totalAdcIOddPhase[i];
468 iEvenMeasOffset = ahp->ah_totalAdcIEvenPhase[i];
469 qOddMeasOffset = ahp->ah_totalAdcQOddPhase[i];
470 qEvenMeasOffset = ahp->ah_totalAdcQEvenPhase[i];
471
472 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
473 "Starting ADC Gain Cal for Chain %d\n", i);
474
475 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
476 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
477 iOddMeasOffset);
478 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
479 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
480 iEvenMeasOffset);
481 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
482 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
483 qOddMeasOffset);
484 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
485 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
486 qEvenMeasOffset);
487
488 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
489 iGainMismatch =
490 ((iEvenMeasOffset * 32) /
491 iOddMeasOffset) & 0x3f;
492 qGainMismatch =
493 ((qOddMeasOffset * 32) /
494 qEvenMeasOffset) & 0x3f;
495
496 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
497 "Chn %d gain_mismatch_i = 0x%08x\n", i,
498 iGainMismatch);
499 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
500 "Chn %d gain_mismatch_q = 0x%08x\n", i,
501 qGainMismatch);
502
503 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
504 val &= 0xfffff000;
505 val |= (qGainMismatch) | (iGainMismatch << 6);
506 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
507
508 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
509 "ADC Gain Cal done for Chain %d\n", i);
510 }
511 }
512
513 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
514 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
515 AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
516}
517
518static void ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah, u8 numChains)
519{
520 struct ath_hal_5416 *ahp = AH5416(ah);
521 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
522 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
523 const struct hal_percal_data *calData =
524 ahp->ah_cal_list_curr->calData;
525 u32 numSamples =
526 (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
527
528 for (i = 0; i < numChains; i++) {
529 iOddMeasOffset = ahp->ah_totalAdcDcOffsetIOddPhase[i];
530 iEvenMeasOffset = ahp->ah_totalAdcDcOffsetIEvenPhase[i];
531 qOddMeasOffset = ahp->ah_totalAdcDcOffsetQOddPhase[i];
532 qEvenMeasOffset = ahp->ah_totalAdcDcOffsetQEvenPhase[i];
533
534 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
535 "Starting ADC DC Offset Cal for Chain %d\n", i);
536
537 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
538 "Chn %d pwr_meas_odd_i = %d\n", i,
539 iOddMeasOffset);
540 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
541 "Chn %d pwr_meas_even_i = %d\n", i,
542 iEvenMeasOffset);
543 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
544 "Chn %d pwr_meas_odd_q = %d\n", i,
545 qOddMeasOffset);
546 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
547 "Chn %d pwr_meas_even_q = %d\n", i,
548 qEvenMeasOffset);
549
550 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
551 numSamples) & 0x1ff;
552 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
553 numSamples) & 0x1ff;
554
555 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
556 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
557 iDcMismatch);
558 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
559 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
560 qDcMismatch);
561
562 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
563 val &= 0xc0000fff;
564 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
565 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
566
567 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
568 "ADC DC Offset Cal done for Chain %d\n", i);
569 }
570
571 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
572 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
573 AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
574}
575
576void ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
577 bool *isCalDone)
578{
579 struct ath_hal_5416 *ahp = AH5416(ah);
580 struct ath9k_channel *ichan =
581 ath9k_regd_check_channel(ah, chan);
582 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
583
584 *isCalDone = true;
585
586 if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
587 return;
588
589 if (currCal == NULL)
590 return;
591
592 if (ichan == NULL) {
593 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
594 "invalid channel %u/0x%x; no mapping\n",
595 chan->channel, chan->channelFlags);
596 return;
597 }
598
599
600 if (currCal->calState != CAL_DONE) {
601 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
602 "Calibration state incorrect, %d\n",
603 currCal->calState);
604 return;
605 }
606
607
608 if (!ath9k_hw_iscal_supported(ah, chan, currCal->calData->calType))
609 return;
610
611 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
612 "Resetting Cal %d state for channel %u/0x%x\n",
613 currCal->calData->calType, chan->channel,
614 chan->channelFlags);
615
616 ichan->CalValid &= ~currCal->calData->calType;
617 currCal->calState = CAL_WAITING;
618
619 *isCalDone = false;
620}
621
622void ath9k_hw_start_nfcal(struct ath_hal *ah)
623{
624 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
625 AR_PHY_AGC_CONTROL_ENABLE_NF);
626 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
627 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
628 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
629}
630
631void ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan)
632{
633 struct ath9k_nfcal_hist *h;
634 int i, j;
635 int32_t val;
636 const u32 ar5416_cca_regs[6] = {
637 AR_PHY_CCA,
638 AR_PHY_CH1_CCA,
639 AR_PHY_CH2_CCA,
640 AR_PHY_EXT_CCA,
641 AR_PHY_CH1_EXT_CCA,
642 AR_PHY_CH2_EXT_CCA
643 };
644 u8 chainmask;
645
646 if (AR_SREV_9280(ah))
647 chainmask = 0x1B;
648 else
649 chainmask = 0x3F;
650
651#ifdef ATH_NF_PER_CHAN
652 h = chan->nfCalHist;
653#else
654 h = ah->nfCalHist;
655#endif
656
657 for (i = 0; i < NUM_NF_READINGS; i++) {
658 if (chainmask & (1 << i)) {
659 val = REG_READ(ah, ar5416_cca_regs[i]);
660 val &= 0xFFFFFE00;
661 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
662 REG_WRITE(ah, ar5416_cca_regs[i], val);
663 }
664 }
665
666 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
667 AR_PHY_AGC_CONTROL_ENABLE_NF);
668 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
669 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
670 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
671
672 for (j = 0; j < 1000; j++) {
673 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
674 AR_PHY_AGC_CONTROL_NF) == 0)
675 break;
676 udelay(10);
677 }
678
679 for (i = 0; i < NUM_NF_READINGS; i++) {
680 if (chainmask & (1 << i)) {
681 val = REG_READ(ah, ar5416_cca_regs[i]);
682 val &= 0xFFFFFE00;
683 val |= (((u32) (-50) << 1) & 0x1ff);
684 REG_WRITE(ah, ar5416_cca_regs[i], val);
685 }
686 }
687}
688
689int16_t ath9k_hw_getnf(struct ath_hal *ah,
690 struct ath9k_channel *chan)
691{
692 int16_t nf, nfThresh;
693 int16_t nfarray[NUM_NF_READINGS] = { 0 };
694 struct ath9k_nfcal_hist *h;
695 u8 chainmask;
696
697 if (AR_SREV_9280(ah))
698 chainmask = 0x1B;
699 else
700 chainmask = 0x3F;
701
702 chan->channelFlags &= (~CHANNEL_CW_INT);
703 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
704 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
705 "NF did not complete in calibration window\n");
706 nf = 0;
707 chan->rawNoiseFloor = nf;
708 return chan->rawNoiseFloor;
709 } else {
710 ath9k_hw_do_getnf(ah, nfarray);
711 nf = nfarray[0];
712 if (getNoiseFloorThresh(ah, chan, &nfThresh)
713 && nf > nfThresh) {
714 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
715 "noise floor failed detected; "
716 "detected %d, threshold %d\n",
717 nf, nfThresh);
718 chan->channelFlags |= CHANNEL_CW_INT;
719 }
720 }
721
722#ifdef ATH_NF_PER_CHAN
723 h = chan->nfCalHist;
724#else
725 h = ah->nfCalHist;
726#endif
727
728 ath9k_hw_update_nfcal_hist_buffer(h, nfarray);
729 chan->rawNoiseFloor = h[0].privNF;
730
731 return chan->rawNoiseFloor;
732}
733
734void ath9k_init_nfcal_hist_buffer(struct ath_hal *ah)
735{
736 int i, j;
737
738 for (i = 0; i < NUM_NF_READINGS; i++) {
739 ah->nfCalHist[i].currIndex = 0;
740 ah->nfCalHist[i].privNF = AR_PHY_CCA_MAX_GOOD_VALUE;
741 ah->nfCalHist[i].invalidNFcount =
742 AR_PHY_CCA_FILTERWINDOW_LENGTH;
743 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
744 ah->nfCalHist[i].nfCalBuffer[j] =
745 AR_PHY_CCA_MAX_GOOD_VALUE;
746 }
747 }
748 return;
749}
750
751s16 ath9k_hw_getchan_noise(struct ath_hal *ah, struct ath9k_channel *chan)
752{
753 struct ath9k_channel *ichan;
754 s16 nf;
755
756 ichan = ath9k_regd_check_channel(ah, chan);
757 if (ichan == NULL) {
758 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
759 "invalid channel %u/0x%x; no mapping\n",
760 chan->channel, chan->channelFlags);
761 return ATH_DEFAULT_NOISE_FLOOR;
762 }
763 if (ichan->rawNoiseFloor == 0) {
764 enum wireless_mode mode = ath9k_hw_chan2wmode(ah, chan);
765 nf = NOISE_FLOOR[mode];
766 } else
767 nf = ichan->rawNoiseFloor;
768
769 if (!ath9k_hw_nf_in_range(ah, nf))
770 nf = ATH_DEFAULT_NOISE_FLOOR;
771
772 return nf;
773}
774
775bool ath9k_hw_calibrate(struct ath_hal *ah, struct ath9k_channel *chan,
776 u8 rxchainmask, bool longcal,
777 bool *isCalDone)
778{
779 struct ath_hal_5416 *ahp = AH5416(ah);
780 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
781 struct ath9k_channel *ichan = ath9k_regd_check_channel(ah, chan);
782
783 *isCalDone = true;
784
785 if (ichan == NULL) {
786 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
787 "invalid channel %u/0x%x; no mapping\n",
788 chan->channel, chan->channelFlags);
789 return false;
790 }
791
792 if (currCal &&
793 (currCal->calState == CAL_RUNNING ||
794 currCal->calState == CAL_WAITING)) {
795 ath9k_hw_per_calibration(ah, ichan, rxchainmask, currCal,
796 isCalDone);
797 if (*isCalDone) {
798 ahp->ah_cal_list_curr = currCal = currCal->calNext;
799
800 if (currCal->calState == CAL_WAITING) {
801 *isCalDone = false;
802 ath9k_hw_reset_calibration(ah, currCal);
803 }
804 }
805 }
806
807 if (longcal) {
808 ath9k_hw_getnf(ah, ichan);
809 ath9k_hw_loadnf(ah, ah->ah_curchan);
810 ath9k_hw_start_nfcal(ah);
811
812 if ((ichan->channelFlags & CHANNEL_CW_INT) != 0) {
813 chan->channelFlags |= CHANNEL_CW_INT;
814 ichan->channelFlags &= ~CHANNEL_CW_INT;
815 }
816 }
817
818 return true;
819}
820
821static inline void ath9k_hw_9285_pa_cal(struct ath_hal *ah)
822{
823
824 u32 regVal;
825 int i, offset, offs_6_1, offs_0;
826 u32 ccomp_org, reg_field;
827 u32 regList[][2] = {
828 { 0x786c, 0 },
829 { 0x7854, 0 },
830 { 0x7820, 0 },
831 { 0x7824, 0 },
832 { 0x7868, 0 },
833 { 0x783c, 0 },
834 { 0x7838, 0 },
835 };
836
837 if (AR_SREV_9285_11(ah)) {
838 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
839 udelay(10);
840 }
841
842 for (i = 0; i < ARRAY_SIZE(regList); i++)
843 regList[i][1] = REG_READ(ah, regList[i][0]);
844
845 regVal = REG_READ(ah, 0x7834);
846 regVal &= (~(0x1));
847 REG_WRITE(ah, 0x7834, regVal);
848 regVal = REG_READ(ah, 0x9808);
849 regVal |= (0x1 << 27);
850 REG_WRITE(ah, 0x9808, regVal);
851
852 REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
853 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
854 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
855 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
856 REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
857 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
858 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
859 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 1);
860 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
861 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
862 REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
863 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
864 ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
865 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 7);
866
867 REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
868 udelay(30);
869 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
870 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
871
872 for (i = 6; i > 0; i--) {
873 regVal = REG_READ(ah, 0x7834);
874 regVal |= (1 << (19 + i));
875 REG_WRITE(ah, 0x7834, regVal);
876 udelay(1);
877 regVal = REG_READ(ah, 0x7834);
878 regVal &= (~(0x1 << (19 + i)));
879 reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
880 regVal |= (reg_field << (19 + i));
881 REG_WRITE(ah, 0x7834, regVal);
882 }
883
884 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
885 udelay(1);
886 reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
887 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
888 offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
889 offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
890
891 offset = (offs_6_1<<1) | offs_0;
892 offset = offset - 0;
893 offs_6_1 = offset>>1;
894 offs_0 = offset & 1;
895
896 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
897 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
898
899 regVal = REG_READ(ah, 0x7834);
900 regVal |= 0x1;
901 REG_WRITE(ah, 0x7834, regVal);
902 regVal = REG_READ(ah, 0x9808);
903 regVal &= (~(0x1 << 27));
904 REG_WRITE(ah, 0x9808, regVal);
905
906 for (i = 0; i < ARRAY_SIZE(regList); i++)
907 REG_WRITE(ah, regList[i][0], regList[i][1]);
908
909 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
910
911 if (AR_SREV_9285_11(ah))
912 REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
913
914}
915
916bool ath9k_hw_init_cal(struct ath_hal *ah,
917 struct ath9k_channel *chan)
918{
919 struct ath_hal_5416 *ahp = AH5416(ah);
920 struct ath9k_channel *ichan = ath9k_regd_check_channel(ah, chan);
921
922 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
923 REG_READ(ah, AR_PHY_AGC_CONTROL) |
924 AR_PHY_AGC_CONTROL_CAL);
925
926 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0)) {
927 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
928 "offset calibration failed to complete in 1ms; "
929 "noisy environment?\n");
930 return false;
931 }
932
933 if (AR_SREV_9285(ah) && AR_SREV_9285_11_OR_LATER(ah))
934 ath9k_hw_9285_pa_cal(ah);
935
936 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
937 REG_READ(ah, AR_PHY_AGC_CONTROL) |
938 AR_PHY_AGC_CONTROL_NF);
939
940 ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr = NULL;
941
942 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
943 if (ath9k_hw_iscal_supported(ah, chan, ADC_GAIN_CAL)) {
944 INIT_CAL(&ahp->ah_adcGainCalData);
945 INSERT_CAL(ahp, &ahp->ah_adcGainCalData);
946 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
947 "enabling ADC Gain Calibration.\n");
948 }
949 if (ath9k_hw_iscal_supported(ah, chan, ADC_DC_CAL)) {
950 INIT_CAL(&ahp->ah_adcDcCalData);
951 INSERT_CAL(ahp, &ahp->ah_adcDcCalData);
952 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
953 "enabling ADC DC Calibration.\n");
954 }
955 if (ath9k_hw_iscal_supported(ah, chan, IQ_MISMATCH_CAL)) {
956 INIT_CAL(&ahp->ah_iqCalData);
957 INSERT_CAL(ahp, &ahp->ah_iqCalData);
958 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
959 "enabling IQ Calibration.\n");
960 }
961
962 ahp->ah_cal_list_curr = ahp->ah_cal_list;
963
964 if (ahp->ah_cal_list_curr)
965 ath9k_hw_reset_calibration(ah, ahp->ah_cal_list_curr);
966 }
967
968 ichan->CalValid = 0;
969
970 return true;
971}
972
973const struct hal_percal_data iq_cal_multi_sample = {
974 IQ_MISMATCH_CAL,
975 MAX_CAL_SAMPLES,
976 PER_MIN_LOG_COUNT,
977 ath9k_hw_iqcal_collect,
978 ath9k_hw_iqcalibrate
979};
980const struct hal_percal_data iq_cal_single_sample = {
981 IQ_MISMATCH_CAL,
982 MIN_CAL_SAMPLES,
983 PER_MAX_LOG_COUNT,
984 ath9k_hw_iqcal_collect,
985 ath9k_hw_iqcalibrate
986};
987const struct hal_percal_data adc_gain_cal_multi_sample = {
988 ADC_GAIN_CAL,
989 MAX_CAL_SAMPLES,
990 PER_MIN_LOG_COUNT,
991 ath9k_hw_adc_gaincal_collect,
992 ath9k_hw_adc_gaincal_calibrate
993};
994const struct hal_percal_data adc_gain_cal_single_sample = {
995 ADC_GAIN_CAL,
996 MIN_CAL_SAMPLES,
997 PER_MAX_LOG_COUNT,
998 ath9k_hw_adc_gaincal_collect,
999 ath9k_hw_adc_gaincal_calibrate
1000};
1001const struct hal_percal_data adc_dc_cal_multi_sample = {
1002 ADC_DC_CAL,
1003 MAX_CAL_SAMPLES,
1004 PER_MIN_LOG_COUNT,
1005 ath9k_hw_adc_dccal_collect,
1006 ath9k_hw_adc_dccal_calibrate
1007};
1008const struct hal_percal_data adc_dc_cal_single_sample = {
1009 ADC_DC_CAL,
1010 MIN_CAL_SAMPLES,
1011 PER_MAX_LOG_COUNT,
1012 ath9k_hw_adc_dccal_collect,
1013 ath9k_hw_adc_dccal_calibrate
1014};
1015const struct hal_percal_data adc_init_dc_cal = {
1016 ADC_DC_INIT_CAL,
1017 MIN_CAL_SAMPLES,
1018 INIT_LOG_COUNT,
1019 ath9k_hw_adc_dccal_collect,
1020 ath9k_hw_adc_dccal_calibrate
1021};
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
deleted file mode 100644
index c5033f6f42ac..000000000000
--- a/drivers/net/wireless/ath9k/core.c
+++ /dev/null
@@ -1,1886 +0,0 @@
1/*
2 * Copyright (c) 2008, Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 /* Implementation of the main "ATH" layer. */
18
19#include "core.h"
20#include "regd.h"
21
22static int ath_outdoor; /* enable outdoor use */
23
24static u32 ath_chainmask_sel_up_rssi_thres =
25 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
26static u32 ath_chainmask_sel_down_rssi_thres =
27 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
28static u32 ath_chainmask_sel_period =
29 ATH_CHAINMASK_SEL_TIMEOUT;
30
31/* return bus cachesize in 4B word units */
32
33static void bus_read_cachesize(struct ath_softc *sc, int *csz)
34{
35 u8 u8tmp;
36
37 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
38 *csz = (int)u8tmp;
39
40 /*
41 * This check was put in to avoid "unplesant" consequences if
42 * the bootrom has not fully initialized all PCI devices.
43 * Sometimes the cache line size register is not set
44 */
45
46 if (*csz == 0)
47 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
48}
49
50/*
51 * Set current operating mode
52 *
53 * This function initializes and fills the rate table in the ATH object based
54 * on the operating mode.
55*/
56static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
57{
58 const struct ath9k_rate_table *rt;
59 int i;
60
61 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
62 rt = ath9k_hw_getratetable(sc->sc_ah, mode);
63 BUG_ON(!rt);
64
65 for (i = 0; i < rt->rateCount; i++)
66 sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
67
68 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
69 for (i = 0; i < 256; i++) {
70 u8 ix = rt->rateCodeToIndex[i];
71
72 if (ix == 0xff)
73 continue;
74
75 sc->sc_hwmap[i].ieeerate =
76 rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
77 sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
78
79 if (rt->info[ix].shortPreamble ||
80 rt->info[ix].phy == PHY_OFDM) {
81 /* XXX: Handle this */
82 }
83
84 /* NB: this uses the last entry if the rate isn't found */
85 /* XXX beware of overlow */
86 }
87 sc->sc_currates = rt;
88 sc->sc_curmode = mode;
89 /*
90 * All protection frames are transmited at 2Mb/s for
91 * 11g, otherwise at 1Mb/s.
92 * XXX select protection rate index from rate table.
93 */
94 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
95}
96
97/*
98 * Set up rate table (legacy rates)
99 */
100static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
101{
102 struct ath_hal *ah = sc->sc_ah;
103 const struct ath9k_rate_table *rt = NULL;
104 struct ieee80211_supported_band *sband;
105 struct ieee80211_rate *rate;
106 int i, maxrates;
107
108 switch (band) {
109 case IEEE80211_BAND_2GHZ:
110 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
111 break;
112 case IEEE80211_BAND_5GHZ:
113 rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
114 break;
115 default:
116 break;
117 }
118
119 if (rt == NULL)
120 return;
121
122 sband = &sc->sbands[band];
123 rate = sc->rates[band];
124
125 if (rt->rateCount > ATH_RATE_MAX)
126 maxrates = ATH_RATE_MAX;
127 else
128 maxrates = rt->rateCount;
129
130 for (i = 0; i < maxrates; i++) {
131 rate[i].bitrate = rt->info[i].rateKbps / 100;
132 rate[i].hw_value = rt->info[i].rateCode;
133 sband->n_bitrates++;
134 DPRINTF(sc, ATH_DBG_CONFIG,
135 "%s: Rate: %2dMbps, ratecode: %2d\n",
136 __func__,
137 rate[i].bitrate / 10,
138 rate[i].hw_value);
139 }
140}
141
142/*
143 * Set up channel list
144 */
145static int ath_setup_channels(struct ath_softc *sc)
146{
147 struct ath_hal *ah = sc->sc_ah;
148 int nchan, i, a = 0, b = 0;
149 u8 regclassids[ATH_REGCLASSIDS_MAX];
150 u32 nregclass = 0;
151 struct ieee80211_supported_band *band_2ghz;
152 struct ieee80211_supported_band *band_5ghz;
153 struct ieee80211_channel *chan_2ghz;
154 struct ieee80211_channel *chan_5ghz;
155 struct ath9k_channel *c;
156
157 /* Fill in ah->ah_channels */
158 if (!ath9k_regd_init_channels(ah,
159 ATH_CHAN_MAX,
160 (u32 *)&nchan,
161 regclassids,
162 ATH_REGCLASSIDS_MAX,
163 &nregclass,
164 CTRY_DEFAULT,
165 false,
166 1)) {
167 u32 rd = ah->ah_currentRD;
168
169 DPRINTF(sc, ATH_DBG_FATAL,
170 "%s: unable to collect channel list; "
171 "regdomain likely %u country code %u\n",
172 __func__, rd, CTRY_DEFAULT);
173 return -EINVAL;
174 }
175
176 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
177 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
178 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
179 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
180
181 for (i = 0; i < nchan; i++) {
182 c = &ah->ah_channels[i];
183 if (IS_CHAN_2GHZ(c)) {
184 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
185 chan_2ghz[a].center_freq = c->channel;
186 chan_2ghz[a].max_power = c->maxTxPower;
187
188 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
189 chan_2ghz[a].flags |=
190 IEEE80211_CHAN_NO_IBSS;
191 if (c->channelFlags & CHANNEL_PASSIVE)
192 chan_2ghz[a].flags |=
193 IEEE80211_CHAN_PASSIVE_SCAN;
194
195 band_2ghz->n_channels = ++a;
196
197 DPRINTF(sc, ATH_DBG_CONFIG,
198 "%s: 2MHz channel: %d, "
199 "channelFlags: 0x%x\n",
200 __func__,
201 c->channel,
202 c->channelFlags);
203 } else if (IS_CHAN_5GHZ(c)) {
204 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
205 chan_5ghz[b].center_freq = c->channel;
206 chan_5ghz[b].max_power = c->maxTxPower;
207
208 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
209 chan_5ghz[b].flags |=
210 IEEE80211_CHAN_NO_IBSS;
211 if (c->channelFlags & CHANNEL_PASSIVE)
212 chan_5ghz[b].flags |=
213 IEEE80211_CHAN_PASSIVE_SCAN;
214
215 band_5ghz->n_channels = ++b;
216
217 DPRINTF(sc, ATH_DBG_CONFIG,
218 "%s: 5MHz channel: %d, "
219 "channelFlags: 0x%x\n",
220 __func__,
221 c->channel,
222 c->channelFlags);
223 }
224 }
225
226 return 0;
227}
228
229/*
230 * Determine mode from channel flags
231 *
232 * This routine will provide the enumerated WIRELESSS_MODE value based
233 * on the settings of the channel flags. If no valid set of flags
234 * exist, the lowest mode (11b) is selected.
235*/
236
237static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
238{
239 if (chan->chanmode == CHANNEL_A)
240 return ATH9K_MODE_11A;
241 else if (chan->chanmode == CHANNEL_G)
242 return ATH9K_MODE_11G;
243 else if (chan->chanmode == CHANNEL_B)
244 return ATH9K_MODE_11B;
245 else if (chan->chanmode == CHANNEL_A_HT20)
246 return ATH9K_MODE_11NA_HT20;
247 else if (chan->chanmode == CHANNEL_G_HT20)
248 return ATH9K_MODE_11NG_HT20;
249 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
250 return ATH9K_MODE_11NA_HT40PLUS;
251 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
252 return ATH9K_MODE_11NA_HT40MINUS;
253 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
254 return ATH9K_MODE_11NG_HT40PLUS;
255 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
256 return ATH9K_MODE_11NG_HT40MINUS;
257
258 WARN_ON(1); /* should not get here */
259
260 return ATH9K_MODE_11B;
261}
262
263/*
264 * Stop the device, grabbing the top-level lock to protect
265 * against concurrent entry through ath_init (which can happen
266 * if another thread does a system call and the thread doing the
267 * stop is preempted).
268 */
269
270static int ath_stop(struct ath_softc *sc)
271{
272 struct ath_hal *ah = sc->sc_ah;
273
274 DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %ld\n",
275 __func__, sc->sc_flags & SC_OP_INVALID);
276
277 /*
278 * Shutdown the hardware and driver:
279 * stop output from above
280 * turn off timers
281 * disable interrupts
282 * clear transmit machinery
283 * clear receive machinery
284 * turn off the radio
285 * reclaim beacon resources
286 *
287 * Note that some of this work is not possible if the
288 * hardware is gone (invalid).
289 */
290
291 ath_draintxq(sc, false);
292 if (!(sc->sc_flags & SC_OP_INVALID)) {
293 ath_stoprecv(sc);
294 ath9k_hw_phy_disable(ah);
295 } else
296 sc->sc_rxlink = NULL;
297
298 return 0;
299}
300
301/*
302 * Set the current channel
303 *
304 * Set/change channels. If the channel is really being changed, it's done
305 * by reseting the chip. To accomplish this we must first cleanup any pending
306 * DMA, then restart stuff after a la ath_init.
307*/
308int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
309{
310 struct ath_hal *ah = sc->sc_ah;
311 bool fastcc = true, stopped;
312
313 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
314 return -EIO;
315
316 DPRINTF(sc, ATH_DBG_CONFIG,
317 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
318 __func__,
319 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
320 sc->sc_ah->ah_curchan->channelFlags),
321 sc->sc_ah->ah_curchan->channel,
322 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
323 hchan->channel, hchan->channelFlags);
324
325 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
326 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
327 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
328 (sc->sc_flags & SC_OP_FULL_RESET)) {
329 int status;
330 /*
331 * This is only performed if the channel settings have
332 * actually changed.
333 *
334 * To switch channels clear any pending DMA operations;
335 * wait long enough for the RX fifo to drain, reset the
336 * hardware at the new frequency, and then re-enable
337 * the relevant bits of the h/w.
338 */
339 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
340 ath_draintxq(sc, false); /* clear pending tx frames */
341 stopped = ath_stoprecv(sc); /* turn off frame recv */
342
343 /* XXX: do not flush receive queue here. We don't want
344 * to flush data frames already in queue because of
345 * changing channel. */
346
347 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
348 fastcc = false;
349
350 spin_lock_bh(&sc->sc_resetlock);
351 if (!ath9k_hw_reset(ah, hchan,
352 sc->sc_ht_info.tx_chan_width,
353 sc->sc_tx_chainmask,
354 sc->sc_rx_chainmask,
355 sc->sc_ht_extprotspacing,
356 fastcc, &status)) {
357 DPRINTF(sc, ATH_DBG_FATAL,
358 "%s: unable to reset channel %u (%uMhz) "
359 "flags 0x%x hal status %u\n", __func__,
360 ath9k_hw_mhz2ieee(ah, hchan->channel,
361 hchan->channelFlags),
362 hchan->channel, hchan->channelFlags, status);
363 spin_unlock_bh(&sc->sc_resetlock);
364 return -EIO;
365 }
366 spin_unlock_bh(&sc->sc_resetlock);
367
368 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
369 sc->sc_flags &= ~SC_OP_FULL_RESET;
370
371 /* Re-enable rx framework */
372 if (ath_startrecv(sc) != 0) {
373 DPRINTF(sc, ATH_DBG_FATAL,
374 "%s: unable to restart recv logic\n", __func__);
375 return -EIO;
376 }
377 /*
378 * Change channels and update the h/w rate map
379 * if we're switching; e.g. 11a to 11b/g.
380 */
381 ath_setcurmode(sc, ath_chan2mode(hchan));
382
383 ath_update_txpow(sc); /* update tx power state */
384 /*
385 * Re-enable interrupts.
386 */
387 ath9k_hw_set_interrupts(ah, sc->sc_imask);
388 }
389 return 0;
390}
391
392/**********************/
393/* Chainmask Handling */
394/**********************/
395
396static void ath_chainmask_sel_timertimeout(unsigned long data)
397{
398 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
399 cm->switch_allowed = 1;
400}
401
402/* Start chainmask select timer */
403static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
404{
405 cm->switch_allowed = 0;
406 mod_timer(&cm->timer, ath_chainmask_sel_period);
407}
408
409/* Stop chainmask select timer */
410static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
411{
412 cm->switch_allowed = 0;
413 del_timer_sync(&cm->timer);
414}
415
416static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
417{
418 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
419
420 memset(cm, 0, sizeof(struct ath_chainmask_sel));
421
422 cm->cur_tx_mask = sc->sc_tx_chainmask;
423 cm->cur_rx_mask = sc->sc_rx_chainmask;
424 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
425 setup_timer(&cm->timer,
426 ath_chainmask_sel_timertimeout, (unsigned long) cm);
427}
428
429int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
430{
431 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
432
433 /*
434 * Disable auto-swtiching in one of the following if conditions.
435 * sc_chainmask_auto_sel is used for internal global auto-switching
436 * enabled/disabled setting
437 */
438 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
439 cm->cur_tx_mask = sc->sc_tx_chainmask;
440 return cm->cur_tx_mask;
441 }
442
443 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
444 return cm->cur_tx_mask;
445
446 if (cm->switch_allowed) {
447 /* Switch down from tx 3 to tx 2. */
448 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
449 ATH_RSSI_OUT(cm->tx_avgrssi) >=
450 ath_chainmask_sel_down_rssi_thres) {
451 cm->cur_tx_mask = sc->sc_tx_chainmask;
452
453 /* Don't let another switch happen until
454 * this timer expires */
455 ath_chainmask_sel_timerstart(cm);
456 }
457 /* Switch up from tx 2 to 3. */
458 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
459 ATH_RSSI_OUT(cm->tx_avgrssi) <=
460 ath_chainmask_sel_up_rssi_thres) {
461 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
462
463 /* Don't let another switch happen
464 * until this timer expires */
465 ath_chainmask_sel_timerstart(cm);
466 }
467 }
468
469 return cm->cur_tx_mask;
470}
471
472/*
473 * Update tx/rx chainmask. For legacy association,
474 * hard code chainmask to 1x1, for 11n association, use
475 * the chainmask configuration.
476 */
477
478void ath_update_chainmask(struct ath_softc *sc, int is_ht)
479{
480 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
481 if (is_ht) {
482 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
483 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
484 } else {
485 sc->sc_tx_chainmask = 1;
486 sc->sc_rx_chainmask = 1;
487 }
488
489 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
490 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
491}
492
493/*******/
494/* ANI */
495/*******/
496
497/*
498 * This routine performs the periodic noise floor calibration function
499 * that is used to adjust and optimize the chip performance. This
500 * takes environmental changes (location, temperature) into account.
501 * When the task is complete, it reschedules itself depending on the
502 * appropriate interval that was calculated.
503 */
504
505static void ath_ani_calibrate(unsigned long data)
506{
507 struct ath_softc *sc;
508 struct ath_hal *ah;
509 bool longcal = false;
510 bool shortcal = false;
511 bool aniflag = false;
512 unsigned int timestamp = jiffies_to_msecs(jiffies);
513 u32 cal_interval;
514
515 sc = (struct ath_softc *)data;
516 ah = sc->sc_ah;
517
518 /*
519 * don't calibrate when we're scanning.
520 * we are most likely not on our home channel.
521 */
522 if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)
523 return;
524
525 /* Long calibration runs independently of short calibration. */
526 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
527 longcal = true;
528 DPRINTF(sc, ATH_DBG_ANI, "%s: longcal @%lu\n",
529 __func__, jiffies);
530 sc->sc_ani.sc_longcal_timer = timestamp;
531 }
532
533 /* Short calibration applies only while sc_caldone is false */
534 if (!sc->sc_ani.sc_caldone) {
535 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
536 ATH_SHORT_CALINTERVAL) {
537 shortcal = true;
538 DPRINTF(sc, ATH_DBG_ANI, "%s: shortcal @%lu\n",
539 __func__, jiffies);
540 sc->sc_ani.sc_shortcal_timer = timestamp;
541 sc->sc_ani.sc_resetcal_timer = timestamp;
542 }
543 } else {
544 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
545 ATH_RESTART_CALINTERVAL) {
546 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
547 &sc->sc_ani.sc_caldone);
548 if (sc->sc_ani.sc_caldone)
549 sc->sc_ani.sc_resetcal_timer = timestamp;
550 }
551 }
552
553 /* Verify whether we must check ANI */
554 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
555 ATH_ANI_POLLINTERVAL) {
556 aniflag = true;
557 sc->sc_ani.sc_checkani_timer = timestamp;
558 }
559
560 /* Skip all processing if there's nothing to do. */
561 if (longcal || shortcal || aniflag) {
562 /* Call ANI routine if necessary */
563 if (aniflag)
564 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
565 ah->ah_curchan);
566
567 /* Perform calibration if necessary */
568 if (longcal || shortcal) {
569 bool iscaldone = false;
570
571 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
572 sc->sc_rx_chainmask, longcal,
573 &iscaldone)) {
574 if (longcal)
575 sc->sc_ani.sc_noise_floor =
576 ath9k_hw_getchan_noise(ah,
577 ah->ah_curchan);
578
579 DPRINTF(sc, ATH_DBG_ANI,
580 "%s: calibrate chan %u/%x nf: %d\n",
581 __func__,
582 ah->ah_curchan->channel,
583 ah->ah_curchan->channelFlags,
584 sc->sc_ani.sc_noise_floor);
585 } else {
586 DPRINTF(sc, ATH_DBG_ANY,
587 "%s: calibrate chan %u/%x failed\n",
588 __func__,
589 ah->ah_curchan->channel,
590 ah->ah_curchan->channelFlags);
591 }
592 sc->sc_ani.sc_caldone = iscaldone;
593 }
594 }
595
596 /*
597 * Set timer interval based on previous results.
598 * The interval must be the shortest necessary to satisfy ANI,
599 * short calibration and long calibration.
600 */
601
602 cal_interval = ATH_ANI_POLLINTERVAL;
603 if (!sc->sc_ani.sc_caldone)
604 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
605
606 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
607}
608
609/******************/
610/* VAP management */
611/******************/
612
613int ath_vap_attach(struct ath_softc *sc,
614 int if_id,
615 struct ieee80211_vif *if_data,
616 enum ath9k_opmode opmode)
617{
618 struct ath_vap *avp;
619
620 if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
621 DPRINTF(sc, ATH_DBG_FATAL,
622 "%s: Invalid interface id = %u\n", __func__, if_id);
623 return -EINVAL;
624 }
625
626 switch (opmode) {
627 case ATH9K_M_STA:
628 case ATH9K_M_IBSS:
629 case ATH9K_M_MONITOR:
630 break;
631 case ATH9K_M_HOSTAP:
632 /* XXX not right, beacon buffer is allocated on RUN trans */
633 if (list_empty(&sc->sc_bbuf))
634 return -ENOMEM;
635 break;
636 default:
637 return -EINVAL;
638 }
639
640 /* create ath_vap */
641 avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
642 if (avp == NULL)
643 return -ENOMEM;
644
645 memset(avp, 0, sizeof(struct ath_vap));
646 avp->av_if_data = if_data;
647 /* Set the VAP opmode */
648 avp->av_opmode = opmode;
649 avp->av_bslot = -1;
650
651 if (opmode == ATH9K_M_HOSTAP)
652 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
653
654 sc->sc_vaps[if_id] = avp;
655 sc->sc_nvaps++;
656 /* Set the device opmode */
657 sc->sc_ah->ah_opmode = opmode;
658
659 /* default VAP configuration */
660 avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
661 avp->av_config.av_fixed_retryset = 0x03030303;
662
663 return 0;
664}
665
666int ath_vap_detach(struct ath_softc *sc, int if_id)
667{
668 struct ath_hal *ah = sc->sc_ah;
669 struct ath_vap *avp;
670
671 avp = sc->sc_vaps[if_id];
672 if (avp == NULL) {
673 DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
674 __func__, if_id);
675 return -EINVAL;
676 }
677
678 /*
679 * Quiesce the hardware while we remove the vap. In
680 * particular we need to reclaim all references to the
681 * vap state by any frames pending on the tx queues.
682 *
683 * XXX can we do this w/o affecting other vap's?
684 */
685 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
686 ath_draintxq(sc, false); /* stop xmit side */
687 ath_stoprecv(sc); /* stop recv side */
688 ath_flushrecv(sc); /* flush recv queue */
689
690 kfree(avp);
691 sc->sc_vaps[if_id] = NULL;
692 sc->sc_nvaps--;
693
694 return 0;
695}
696
697int ath_vap_config(struct ath_softc *sc,
698 int if_id, struct ath_vap_config *if_config)
699{
700 struct ath_vap *avp;
701
702 if (if_id >= ATH_BCBUF) {
703 DPRINTF(sc, ATH_DBG_FATAL,
704 "%s: Invalid interface id = %u\n", __func__, if_id);
705 return -EINVAL;
706 }
707
708 avp = sc->sc_vaps[if_id];
709 ASSERT(avp != NULL);
710
711 if (avp)
712 memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
713
714 return 0;
715}
716
717/********/
718/* Core */
719/********/
720
721int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
722{
723 struct ath_hal *ah = sc->sc_ah;
724 int status;
725 int error = 0;
726
727 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
728 __func__, sc->sc_ah->ah_opmode);
729
730 /*
731 * Stop anything previously setup. This is safe
732 * whether this is the first time through or not.
733 */
734 ath_stop(sc);
735
736 /* Initialize chanmask selection */
737 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
738 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
739
740 /* Reset SERDES registers */
741 ath9k_hw_configpcipowersave(ah, 0);
742
743 /*
744 * The basic interface to setting the hardware in a good
745 * state is ``reset''. On return the hardware is known to
746 * be powered up and with interrupts disabled. This must
747 * be followed by initialization of the appropriate bits
748 * and then setup of the interrupt mask.
749 */
750
751 spin_lock_bh(&sc->sc_resetlock);
752 if (!ath9k_hw_reset(ah, initial_chan,
753 sc->sc_ht_info.tx_chan_width,
754 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
755 sc->sc_ht_extprotspacing, false, &status)) {
756 DPRINTF(sc, ATH_DBG_FATAL,
757 "%s: unable to reset hardware; hal status %u "
758 "(freq %u flags 0x%x)\n", __func__, status,
759 initial_chan->channel, initial_chan->channelFlags);
760 error = -EIO;
761 spin_unlock_bh(&sc->sc_resetlock);
762 goto done;
763 }
764 spin_unlock_bh(&sc->sc_resetlock);
765 /*
766 * This is needed only to setup initial state
767 * but it's best done after a reset.
768 */
769 ath_update_txpow(sc);
770
771 /*
772 * Setup the hardware after reset:
773 * The receive engine is set going.
774 * Frame transmit is handled entirely
775 * in the frame output path; there's nothing to do
776 * here except setup the interrupt mask.
777 */
778 if (ath_startrecv(sc) != 0) {
779 DPRINTF(sc, ATH_DBG_FATAL,
780 "%s: unable to start recv logic\n", __func__);
781 error = -EIO;
782 goto done;
783 }
784 /* Setup our intr mask. */
785 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
786 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
787 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
788
789 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
790 sc->sc_imask |= ATH9K_INT_GTT;
791
792 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
793 sc->sc_imask |= ATH9K_INT_CST;
794
795 /*
796 * Enable MIB interrupts when there are hardware phy counters.
797 * Note we only do this (at the moment) for station mode.
798 */
799 if (ath9k_hw_phycounters(ah) &&
800 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
801 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
802 sc->sc_imask |= ATH9K_INT_MIB;
803 /*
804 * Some hardware processes the TIM IE and fires an
805 * interrupt when the TIM bit is set. For hardware
806 * that does, if not overridden by configuration,
807 * enable the TIM interrupt when operating as station.
808 */
809 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
810 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
811 !sc->sc_config.swBeaconProcess)
812 sc->sc_imask |= ATH9K_INT_TIM;
813 /*
814 * Don't enable interrupts here as we've not yet built our
815 * vap and node data structures, which will be needed as soon
816 * as we start receiving.
817 */
818 ath_setcurmode(sc, ath_chan2mode(initial_chan));
819
820 /* XXX: we must make sure h/w is ready and clear invalid flag
821 * before turning on interrupt. */
822 sc->sc_flags &= ~SC_OP_INVALID;
823done:
824 return error;
825}
826
827int ath_reset(struct ath_softc *sc, bool retry_tx)
828{
829 struct ath_hal *ah = sc->sc_ah;
830 int status;
831 int error = 0;
832
833 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
834 ath_draintxq(sc, retry_tx); /* stop xmit */
835 ath_stoprecv(sc); /* stop recv */
836 ath_flushrecv(sc); /* flush recv queue */
837
838 /* Reset chip */
839 spin_lock_bh(&sc->sc_resetlock);
840 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
841 sc->sc_ht_info.tx_chan_width,
842 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
843 sc->sc_ht_extprotspacing, false, &status)) {
844 DPRINTF(sc, ATH_DBG_FATAL,
845 "%s: unable to reset hardware; hal status %u\n",
846 __func__, status);
847 error = -EIO;
848 }
849 spin_unlock_bh(&sc->sc_resetlock);
850
851 if (ath_startrecv(sc) != 0) /* restart recv */
852 DPRINTF(sc, ATH_DBG_FATAL,
853 "%s: unable to start recv logic\n", __func__);
854
855 /*
856 * We may be doing a reset in response to a request
857 * that changes the channel so update any state that
858 * might change as a result.
859 */
860 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
861
862 ath_update_txpow(sc);
863
864 if (sc->sc_flags & SC_OP_BEACONS)
865 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
866
867 ath9k_hw_set_interrupts(ah, sc->sc_imask);
868
869 /* Restart the txq */
870 if (retry_tx) {
871 int i;
872 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
873 if (ATH_TXQ_SETUP(sc, i)) {
874 spin_lock_bh(&sc->sc_txq[i].axq_lock);
875 ath_txq_schedule(sc, &sc->sc_txq[i]);
876 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
877 }
878 }
879 }
880
881 return error;
882}
883
884int ath_suspend(struct ath_softc *sc)
885{
886 struct ath_hal *ah = sc->sc_ah;
887
888 /* No I/O if device has been surprise removed */
889 if (sc->sc_flags & SC_OP_INVALID)
890 return -EIO;
891
892 /* Shut off the interrupt before setting sc->sc_invalid to '1' */
893 ath9k_hw_set_interrupts(ah, 0);
894
895 /* XXX: we must make sure h/w will not generate any interrupt
896 * before setting the invalid flag. */
897 sc->sc_flags |= SC_OP_INVALID;
898
899 /* disable HAL and put h/w to sleep */
900 ath9k_hw_disable(sc->sc_ah);
901
902 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
903
904 return 0;
905}
906
907/* Interrupt handler. Most of the actual processing is deferred.
908 * It's the caller's responsibility to ensure the chip is awake. */
909
910irqreturn_t ath_isr(int irq, void *dev)
911{
912 struct ath_softc *sc = dev;
913 struct ath_hal *ah = sc->sc_ah;
914 enum ath9k_int status;
915 bool sched = false;
916
917 do {
918 if (sc->sc_flags & SC_OP_INVALID) {
919 /*
920 * The hardware is not ready/present, don't
921 * touch anything. Note this can happen early
922 * on if the IRQ is shared.
923 */
924 return IRQ_NONE;
925 }
926 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
927 return IRQ_NONE;
928 }
929
930 /*
931 * Figure out the reason(s) for the interrupt. Note
932 * that the hal returns a pseudo-ISR that may include
933 * bits we haven't explicitly enabled so we mask the
934 * value to insure we only process bits we requested.
935 */
936 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
937
938 status &= sc->sc_imask; /* discard unasked-for bits */
939
940 /*
941 * If there are no status bits set, then this interrupt was not
942 * for me (should have been caught above).
943 */
944
945 if (!status)
946 return IRQ_NONE;
947
948 sc->sc_intrstatus = status;
949
950 if (status & ATH9K_INT_FATAL) {
951 /* need a chip reset */
952 sched = true;
953 } else if (status & ATH9K_INT_RXORN) {
954 /* need a chip reset */
955 sched = true;
956 } else {
957 if (status & ATH9K_INT_SWBA) {
958 /* schedule a tasklet for beacon handling */
959 tasklet_schedule(&sc->bcon_tasklet);
960 }
961 if (status & ATH9K_INT_RXEOL) {
962 /*
963 * NB: the hardware should re-read the link when
964 * RXE bit is written, but it doesn't work
965 * at least on older hardware revs.
966 */
967 sched = true;
968 }
969
970 if (status & ATH9K_INT_TXURN)
971 /* bump tx trigger level */
972 ath9k_hw_updatetxtriglevel(ah, true);
973 /* XXX: optimize this */
974 if (status & ATH9K_INT_RX)
975 sched = true;
976 if (status & ATH9K_INT_TX)
977 sched = true;
978 if (status & ATH9K_INT_BMISS)
979 sched = true;
980 /* carrier sense timeout */
981 if (status & ATH9K_INT_CST)
982 sched = true;
983 if (status & ATH9K_INT_MIB) {
984 /*
985 * Disable interrupts until we service the MIB
986 * interrupt; otherwise it will continue to
987 * fire.
988 */
989 ath9k_hw_set_interrupts(ah, 0);
990 /*
991 * Let the hal handle the event. We assume
992 * it will clear whatever condition caused
993 * the interrupt.
994 */
995 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
996 ath9k_hw_set_interrupts(ah, sc->sc_imask);
997 }
998 if (status & ATH9K_INT_TIM_TIMER) {
999 if (!(ah->ah_caps.hw_caps &
1000 ATH9K_HW_CAP_AUTOSLEEP)) {
1001 /* Clear RxAbort bit so that we can
1002 * receive frames */
1003 ath9k_hw_setrxabort(ah, 0);
1004 sched = true;
1005 }
1006 }
1007 }
1008 } while (0);
1009
1010 if (sched) {
1011 /* turn off every interrupt except SWBA */
1012 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
1013 tasklet_schedule(&sc->intr_tq);
1014 }
1015
1016 return IRQ_HANDLED;
1017}
1018
1019/* Deferred interrupt processing */
1020
1021static void ath9k_tasklet(unsigned long data)
1022{
1023 struct ath_softc *sc = (struct ath_softc *)data;
1024 u32 status = sc->sc_intrstatus;
1025
1026 if (status & ATH9K_INT_FATAL) {
1027 /* need a chip reset */
1028 ath_reset(sc, false);
1029 return;
1030 } else {
1031
1032 if (status &
1033 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
1034 /* XXX: fill me in */
1035 /*
1036 if (status & ATH9K_INT_RXORN) {
1037 }
1038 if (status & ATH9K_INT_RXEOL) {
1039 }
1040 */
1041 spin_lock_bh(&sc->sc_rxflushlock);
1042 ath_rx_tasklet(sc, 0);
1043 spin_unlock_bh(&sc->sc_rxflushlock);
1044 }
1045 /* XXX: optimize this */
1046 if (status & ATH9K_INT_TX)
1047 ath_tx_tasklet(sc);
1048 /* XXX: fill me in */
1049 /*
1050 if (status & ATH9K_INT_BMISS) {
1051 }
1052 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
1053 if (status & ATH9K_INT_TIM) {
1054 }
1055 if (status & ATH9K_INT_DTIMSYNC) {
1056 }
1057 }
1058 */
1059 }
1060
1061 /* re-enable hardware interrupt */
1062 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1063}
1064
1065int ath_init(u16 devid, struct ath_softc *sc)
1066{
1067 struct ath_hal *ah = NULL;
1068 int status;
1069 int error = 0, i;
1070 int csz = 0;
1071
1072 /* XXX: hardware will not be ready until ath_open() being called */
1073 sc->sc_flags |= SC_OP_INVALID;
1074
1075 sc->sc_debug = DBG_DEFAULT;
1076 DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
1077
1078 /* Initialize tasklet */
1079 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1080 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1081 (unsigned long)sc);
1082
1083 /*
1084 * Cache line size is used to size and align various
1085 * structures used to communicate with the hardware.
1086 */
1087 bus_read_cachesize(sc, &csz);
1088 /* XXX assert csz is non-zero */
1089 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1090
1091 spin_lock_init(&sc->sc_resetlock);
1092
1093 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1094 if (ah == NULL) {
1095 DPRINTF(sc, ATH_DBG_FATAL,
1096 "%s: unable to attach hardware; HAL status %u\n",
1097 __func__, status);
1098 error = -ENXIO;
1099 goto bad;
1100 }
1101 sc->sc_ah = ah;
1102
1103 /* Initializes the noise floor to a reasonable default value.
1104 * Later on this will be updated during ANI processing. */
1105 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1106
1107 /* Get the hardware key cache size. */
1108 sc->sc_keymax = ah->ah_caps.keycache_size;
1109 if (sc->sc_keymax > ATH_KEYMAX) {
1110 DPRINTF(sc, ATH_DBG_KEYCACHE,
1111 "%s: Warning, using only %u entries in %u key cache\n",
1112 __func__, ATH_KEYMAX, sc->sc_keymax);
1113 sc->sc_keymax = ATH_KEYMAX;
1114 }
1115
1116 /*
1117 * Reset the key cache since some parts do not
1118 * reset the contents on initial power up.
1119 */
1120 for (i = 0; i < sc->sc_keymax; i++)
1121 ath9k_hw_keyreset(ah, (u16) i);
1122 /*
1123 * Mark key cache slots associated with global keys
1124 * as in use. If we knew TKIP was not to be used we
1125 * could leave the +32, +64, and +32+64 slots free.
1126 * XXX only for splitmic.
1127 */
1128 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1129 set_bit(i, sc->sc_keymap);
1130 set_bit(i + 32, sc->sc_keymap);
1131 set_bit(i + 64, sc->sc_keymap);
1132 set_bit(i + 32 + 64, sc->sc_keymap);
1133 }
1134 /*
1135 * Collect the channel list using the default country
1136 * code and including outdoor channels. The 802.11 layer
1137 * is resposible for filtering this list based on settings
1138 * like the phy mode.
1139 */
1140 error = ath_setup_channels(sc);
1141 if (error)
1142 goto bad;
1143
1144 /* default to STA mode */
1145 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
1146
1147 /* Setup rate tables */
1148
1149 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1150 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1151
1152 /* NB: setup here so ath_rate_update is happy */
1153 ath_setcurmode(sc, ATH9K_MODE_11A);
1154
1155 /*
1156 * Allocate hardware transmit queues: one queue for
1157 * beacon frames and one data queue for each QoS
1158 * priority. Note that the hal handles reseting
1159 * these queues at the needed time.
1160 */
1161 sc->sc_bhalq = ath_beaconq_setup(ah);
1162 if (sc->sc_bhalq == -1) {
1163 DPRINTF(sc, ATH_DBG_FATAL,
1164 "%s: unable to setup a beacon xmit queue\n", __func__);
1165 error = -EIO;
1166 goto bad2;
1167 }
1168 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1169 if (sc->sc_cabq == NULL) {
1170 DPRINTF(sc, ATH_DBG_FATAL,
1171 "%s: unable to setup CAB xmit queue\n", __func__);
1172 error = -EIO;
1173 goto bad2;
1174 }
1175
1176 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1177 ath_cabq_update(sc);
1178
1179 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1180 sc->sc_haltype2q[i] = -1;
1181
1182 /* Setup data queues */
1183 /* NB: ensure BK queue is the lowest priority h/w queue */
1184 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1185 DPRINTF(sc, ATH_DBG_FATAL,
1186 "%s: unable to setup xmit queue for BK traffic\n",
1187 __func__);
1188 error = -EIO;
1189 goto bad2;
1190 }
1191
1192 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1193 DPRINTF(sc, ATH_DBG_FATAL,
1194 "%s: unable to setup xmit queue for BE traffic\n",
1195 __func__);
1196 error = -EIO;
1197 goto bad2;
1198 }
1199 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1200 DPRINTF(sc, ATH_DBG_FATAL,
1201 "%s: unable to setup xmit queue for VI traffic\n",
1202 __func__);
1203 error = -EIO;
1204 goto bad2;
1205 }
1206 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1207 DPRINTF(sc, ATH_DBG_FATAL,
1208 "%s: unable to setup xmit queue for VO traffic\n",
1209 __func__);
1210 error = -EIO;
1211 goto bad2;
1212 }
1213
1214 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
1215
1216 sc->sc_rc = ath_rate_attach(ah);
1217 if (sc->sc_rc == NULL) {
1218 error = -EIO;
1219 goto bad2;
1220 }
1221
1222 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1223 ATH9K_CIPHER_TKIP, NULL)) {
1224 /*
1225 * Whether we should enable h/w TKIP MIC.
1226 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1227 * report WMM capable, so it's always safe to turn on
1228 * TKIP MIC in this case.
1229 */
1230 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1231 0, 1, NULL);
1232 }
1233
1234 /*
1235 * Check whether the separate key cache entries
1236 * are required to handle both tx+rx MIC keys.
1237 * With split mic keys the number of stations is limited
1238 * to 27 otherwise 59.
1239 */
1240 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1241 ATH9K_CIPHER_TKIP, NULL)
1242 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1243 ATH9K_CIPHER_MIC, NULL)
1244 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1245 0, NULL))
1246 sc->sc_splitmic = 1;
1247
1248 /* turn on mcast key search if possible */
1249 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1250 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1251 1, NULL);
1252
1253 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1254 sc->sc_config.txpowlimit_override = 0;
1255
1256 /* 11n Capabilities */
1257 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1258 sc->sc_flags |= SC_OP_TXAGGR;
1259 sc->sc_flags |= SC_OP_RXAGGR;
1260 }
1261
1262 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1263 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1264
1265 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1266 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1267
1268 ath9k_hw_getmac(ah, sc->sc_myaddr);
1269 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1270 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1271 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1272 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1273 }
1274 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1275
1276 /* initialize beacon slots */
1277 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1278 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1279
1280 /* save MISC configurations */
1281 sc->sc_config.swBeaconProcess = 1;
1282
1283#ifdef CONFIG_SLOW_ANT_DIV
1284 /* range is 40 - 255, we use something in the middle */
1285 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1286#endif
1287
1288 return 0;
1289bad2:
1290 /* cleanup tx queues */
1291 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1292 if (ATH_TXQ_SETUP(sc, i))
1293 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1294bad:
1295 if (ah)
1296 ath9k_hw_detach(ah);
1297 return error;
1298}
1299
1300void ath_deinit(struct ath_softc *sc)
1301{
1302 struct ath_hal *ah = sc->sc_ah;
1303 int i;
1304
1305 DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
1306
1307 tasklet_kill(&sc->intr_tq);
1308 tasklet_kill(&sc->bcon_tasklet);
1309 ath_stop(sc);
1310 if (!(sc->sc_flags & SC_OP_INVALID))
1311 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1312 ath_rate_detach(sc->sc_rc);
1313 /* cleanup tx queues */
1314 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1315 if (ATH_TXQ_SETUP(sc, i))
1316 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1317 ath9k_hw_detach(ah);
1318}
1319
1320/*******************/
1321/* Node Management */
1322/*******************/
1323
1324struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
1325{
1326 struct ath_vap *avp;
1327 struct ath_node *an;
1328 DECLARE_MAC_BUF(mac);
1329
1330 avp = sc->sc_vaps[if_id];
1331 ASSERT(avp != NULL);
1332
1333 /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
1334 an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
1335 if (an == NULL)
1336 return NULL;
1337 memset(an, 0, sizeof(*an));
1338
1339 an->an_sc = sc;
1340 memcpy(an->an_addr, addr, ETH_ALEN);
1341 atomic_set(&an->an_refcnt, 1);
1342
1343 /* set up per-node tx/rx state */
1344 ath_tx_node_init(sc, an);
1345 ath_rx_node_init(sc, an);
1346
1347 ath_chainmask_sel_init(sc, an);
1348 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1349 list_add(&an->list, &sc->node_list);
1350
1351 return an;
1352}
1353
1354void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1355{
1356 unsigned long flags;
1357
1358 DECLARE_MAC_BUF(mac);
1359
1360 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1361 an->an_flags |= ATH_NODE_CLEAN;
1362 ath_tx_node_cleanup(sc, an, bh_flag);
1363 ath_rx_node_cleanup(sc, an);
1364
1365 ath_tx_node_free(sc, an);
1366 ath_rx_node_free(sc, an);
1367
1368 spin_lock_irqsave(&sc->node_lock, flags);
1369
1370 list_del(&an->list);
1371
1372 spin_unlock_irqrestore(&sc->node_lock, flags);
1373
1374 kfree(an);
1375}
1376
1377/* Finds a node and increases the refcnt if found */
1378
1379struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
1380{
1381 struct ath_node *an = NULL, *an_found = NULL;
1382
1383 if (list_empty(&sc->node_list)) /* FIXME */
1384 goto out;
1385 list_for_each_entry(an, &sc->node_list, list) {
1386 if (!compare_ether_addr(an->an_addr, addr)) {
1387 atomic_inc(&an->an_refcnt);
1388 an_found = an;
1389 break;
1390 }
1391 }
1392out:
1393 return an_found;
1394}
1395
1396/* Decrements the refcnt and if it drops to zero, detach the node */
1397
1398void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
1399{
1400 if (atomic_dec_and_test(&an->an_refcnt))
1401 ath_node_detach(sc, an, bh_flag);
1402}
1403
1404/* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
1405struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
1406{
1407 struct ath_node *an = NULL, *an_found = NULL;
1408
1409 if (list_empty(&sc->node_list))
1410 return NULL;
1411
1412 list_for_each_entry(an, &sc->node_list, list)
1413 if (!compare_ether_addr(an->an_addr, addr)) {
1414 an_found = an;
1415 break;
1416 }
1417
1418 return an_found;
1419}
1420
1421/*
1422 * Set up New Node
1423 *
1424 * Setup driver-specific state for a newly associated node. This routine
1425 * really only applies if compression or XR are enabled, there is no code
1426 * covering any other cases.
1427*/
1428
1429void ath_newassoc(struct ath_softc *sc,
1430 struct ath_node *an, int isnew, int isuapsd)
1431{
1432 int tidno;
1433
1434 /* if station reassociates, tear down the aggregation state. */
1435 if (!isnew) {
1436 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1437 if (sc->sc_flags & SC_OP_TXAGGR)
1438 ath_tx_aggr_teardown(sc, an, tidno);
1439 if (sc->sc_flags & SC_OP_RXAGGR)
1440 ath_rx_aggr_teardown(sc, an, tidno);
1441 }
1442 }
1443 an->an_flags = 0;
1444}
1445
1446/**************/
1447/* Encryption */
1448/**************/
1449
1450void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1451{
1452 ath9k_hw_keyreset(sc->sc_ah, keyix);
1453 if (freeslot)
1454 clear_bit(keyix, sc->sc_keymap);
1455}
1456
1457int ath_keyset(struct ath_softc *sc,
1458 u16 keyix,
1459 struct ath9k_keyval *hk,
1460 const u8 mac[ETH_ALEN])
1461{
1462 bool status;
1463
1464 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1465 keyix, hk, mac, false);
1466
1467 return status != false;
1468}
1469
1470/***********************/
1471/* TX Power/Regulatory */
1472/***********************/
1473
1474/*
1475 * Set Transmit power in HAL
1476 *
1477 * This routine makes the actual HAL calls to set the new transmit power
1478 * limit.
1479*/
1480
1481void ath_update_txpow(struct ath_softc *sc)
1482{
1483 struct ath_hal *ah = sc->sc_ah;
1484 u32 txpow;
1485
1486 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1487 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1488 /* read back in case value is clamped */
1489 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1490 sc->sc_curtxpow = txpow;
1491 }
1492}
1493
1494/* Return the current country and domain information */
1495void ath_get_currentCountry(struct ath_softc *sc,
1496 struct ath9k_country_entry *ctry)
1497{
1498 ath9k_regd_get_current_country(sc->sc_ah, ctry);
1499
1500 /* If HAL not specific yet, since it is band dependent,
1501 * use the one we passed in. */
1502 if (ctry->countryCode == CTRY_DEFAULT) {
1503 ctry->iso[0] = 0;
1504 ctry->iso[1] = 0;
1505 } else if (ctry->iso[0] && ctry->iso[1]) {
1506 if (!ctry->iso[2]) {
1507 if (ath_outdoor)
1508 ctry->iso[2] = 'O';
1509 else
1510 ctry->iso[2] = 'I';
1511 }
1512 }
1513}
1514
1515/**************************/
1516/* Slow Antenna Diversity */
1517/**************************/
1518
1519void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1520 struct ath_softc *sc,
1521 int32_t rssitrig)
1522{
1523 int trig;
1524
1525 /* antdivf_rssitrig can range from 40 - 0xff */
1526 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1527 trig = (rssitrig < 40) ? 40 : rssitrig;
1528
1529 antdiv->antdiv_sc = sc;
1530 antdiv->antdivf_rssitrig = trig;
1531}
1532
1533void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1534 u8 num_antcfg,
1535 const u8 *bssid)
1536{
1537 antdiv->antdiv_num_antcfg =
1538 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1539 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1540 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1541 antdiv->antdiv_curcfg = 0;
1542 antdiv->antdiv_bestcfg = 0;
1543 antdiv->antdiv_laststatetsf = 0;
1544
1545 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1546
1547 antdiv->antdiv_start = 1;
1548}
1549
1550void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1551{
1552 antdiv->antdiv_start = 0;
1553}
1554
1555static int32_t ath_find_max_val(int32_t *val,
1556 u8 num_val, u8 *max_index)
1557{
1558 u32 MaxVal = *val++;
1559 u32 cur_index = 0;
1560
1561 *max_index = 0;
1562 while (++cur_index < num_val) {
1563 if (*val > MaxVal) {
1564 MaxVal = *val;
1565 *max_index = cur_index;
1566 }
1567
1568 val++;
1569 }
1570
1571 return MaxVal;
1572}
1573
1574void ath_slow_ant_div(struct ath_antdiv *antdiv,
1575 struct ieee80211_hdr *hdr,
1576 struct ath_rx_status *rx_stats)
1577{
1578 struct ath_softc *sc = antdiv->antdiv_sc;
1579 struct ath_hal *ah = sc->sc_ah;
1580 u64 curtsf = 0;
1581 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1582 __le16 fc = hdr->frame_control;
1583
1584 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1585 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1586 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1587 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1588 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1589 } else {
1590 return;
1591 }
1592
1593 switch (antdiv->antdiv_state) {
1594 case ATH_ANT_DIV_IDLE:
1595 if ((antdiv->antdiv_lastbrssi[curcfg] <
1596 antdiv->antdivf_rssitrig)
1597 && ((curtsf - antdiv->antdiv_laststatetsf) >
1598 ATH_ANT_DIV_MIN_IDLE_US)) {
1599
1600 curcfg++;
1601 if (curcfg == antdiv->antdiv_num_antcfg)
1602 curcfg = 0;
1603
1604 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1605 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1606 antdiv->antdiv_curcfg = curcfg;
1607 antdiv->antdiv_laststatetsf = curtsf;
1608 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1609 }
1610 }
1611 break;
1612
1613 case ATH_ANT_DIV_SCAN:
1614 if ((curtsf - antdiv->antdiv_laststatetsf) <
1615 ATH_ANT_DIV_MIN_SCAN_US)
1616 break;
1617
1618 curcfg++;
1619 if (curcfg == antdiv->antdiv_num_antcfg)
1620 curcfg = 0;
1621
1622 if (curcfg == antdiv->antdiv_bestcfg) {
1623 ath_find_max_val(antdiv->antdiv_lastbrssi,
1624 antdiv->antdiv_num_antcfg, &bestcfg);
1625 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1626 antdiv->antdiv_bestcfg = bestcfg;
1627 antdiv->antdiv_curcfg = bestcfg;
1628 antdiv->antdiv_laststatetsf = curtsf;
1629 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1630 }
1631 } else {
1632 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1633 antdiv->antdiv_curcfg = curcfg;
1634 antdiv->antdiv_laststatetsf = curtsf;
1635 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1636 }
1637 }
1638
1639 break;
1640 }
1641}
1642
1643/***********************/
1644/* Descriptor Handling */
1645/***********************/
1646
1647/*
1648 * Set up DMA descriptors
1649 *
1650 * This function will allocate both the DMA descriptor structure, and the
1651 * buffers it contains. These are used to contain the descriptors used
1652 * by the system.
1653*/
1654
1655int ath_descdma_setup(struct ath_softc *sc,
1656 struct ath_descdma *dd,
1657 struct list_head *head,
1658 const char *name,
1659 int nbuf,
1660 int ndesc)
1661{
1662#define DS2PHYS(_dd, _ds) \
1663 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1664#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1665#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1666
1667 struct ath_desc *ds;
1668 struct ath_buf *bf;
1669 int i, bsize, error;
1670
1671 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1672 __func__, name, nbuf, ndesc);
1673
1674 /* ath_desc must be a multiple of DWORDs */
1675 if ((sizeof(struct ath_desc) % 4) != 0) {
1676 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1677 __func__);
1678 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1679 error = -ENOMEM;
1680 goto fail;
1681 }
1682
1683 dd->dd_name = name;
1684 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1685
1686 /*
1687 * Need additional DMA memory because we can't use
1688 * descriptors that cross the 4K page boundary. Assume
1689 * one skipped descriptor per 4K page.
1690 */
1691 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1692 u32 ndesc_skipped =
1693 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1694 u32 dma_len;
1695
1696 while (ndesc_skipped) {
1697 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1698 dd->dd_desc_len += dma_len;
1699
1700 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1701 };
1702 }
1703
1704 /* allocate descriptors */
1705 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1706 dd->dd_desc_len,
1707 &dd->dd_desc_paddr);
1708 if (dd->dd_desc == NULL) {
1709 error = -ENOMEM;
1710 goto fail;
1711 }
1712 ds = dd->dd_desc;
1713 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1714 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1715 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1716
1717 /* allocate buffers */
1718 bsize = sizeof(struct ath_buf) * nbuf;
1719 bf = kmalloc(bsize, GFP_KERNEL);
1720 if (bf == NULL) {
1721 error = -ENOMEM;
1722 goto fail2;
1723 }
1724 memset(bf, 0, bsize);
1725 dd->dd_bufptr = bf;
1726
1727 INIT_LIST_HEAD(head);
1728 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1729 bf->bf_desc = ds;
1730 bf->bf_daddr = DS2PHYS(dd, ds);
1731
1732 if (!(sc->sc_ah->ah_caps.hw_caps &
1733 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1734 /*
1735 * Skip descriptor addresses which can cause 4KB
1736 * boundary crossing (addr + length) with a 32 dword
1737 * descriptor fetch.
1738 */
1739 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1740 ASSERT((caddr_t) bf->bf_desc <
1741 ((caddr_t) dd->dd_desc +
1742 dd->dd_desc_len));
1743
1744 ds += ndesc;
1745 bf->bf_desc = ds;
1746 bf->bf_daddr = DS2PHYS(dd, ds);
1747 }
1748 }
1749 list_add_tail(&bf->list, head);
1750 }
1751 return 0;
1752fail2:
1753 pci_free_consistent(sc->pdev,
1754 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1755fail:
1756 memset(dd, 0, sizeof(*dd));
1757 return error;
1758#undef ATH_DESC_4KB_BOUND_CHECK
1759#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1760#undef DS2PHYS
1761}
1762
1763/*
1764 * Cleanup DMA descriptors
1765 *
1766 * This function will free the DMA block that was allocated for the descriptor
1767 * pool. Since this was allocated as one "chunk", it is freed in the same
1768 * manner.
1769*/
1770
1771void ath_descdma_cleanup(struct ath_softc *sc,
1772 struct ath_descdma *dd,
1773 struct list_head *head)
1774{
1775 /* Free memory associated with descriptors */
1776 pci_free_consistent(sc->pdev,
1777 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1778
1779 INIT_LIST_HEAD(head);
1780 kfree(dd->dd_bufptr);
1781 memset(dd, 0, sizeof(*dd));
1782}
1783
1784/*************/
1785/* Utilities */
1786/*************/
1787
1788int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1789{
1790 int qnum;
1791
1792 switch (queue) {
1793 case 0:
1794 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1795 break;
1796 case 1:
1797 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1798 break;
1799 case 2:
1800 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1801 break;
1802 case 3:
1803 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1804 break;
1805 default:
1806 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1807 break;
1808 }
1809
1810 return qnum;
1811}
1812
1813int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1814{
1815 int qnum;
1816
1817 switch (queue) {
1818 case ATH9K_WME_AC_VO:
1819 qnum = 0;
1820 break;
1821 case ATH9K_WME_AC_VI:
1822 qnum = 1;
1823 break;
1824 case ATH9K_WME_AC_BE:
1825 qnum = 2;
1826 break;
1827 case ATH9K_WME_AC_BK:
1828 qnum = 3;
1829 break;
1830 default:
1831 qnum = -1;
1832 break;
1833 }
1834
1835 return qnum;
1836}
1837
1838
1839/*
1840 * Expand time stamp to TSF
1841 *
1842 * Extend 15-bit time stamp from rx descriptor to
1843 * a full 64-bit TSF using the current h/w TSF.
1844*/
1845
1846u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1847{
1848 u64 tsf;
1849
1850 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1851 if ((tsf & 0x7fff) < rstamp)
1852 tsf -= 0x8000;
1853 return (tsf & ~0x7fff) | rstamp;
1854}
1855
1856/*
1857 * Set Default Antenna
1858 *
1859 * Call into the HAL to set the default antenna to use. Not really valid for
1860 * MIMO technology.
1861*/
1862
1863void ath_setdefantenna(void *context, u32 antenna)
1864{
1865 struct ath_softc *sc = (struct ath_softc *)context;
1866 struct ath_hal *ah = sc->sc_ah;
1867
1868 /* XXX block beacon interrupts */
1869 ath9k_hw_setantenna(ah, antenna);
1870 sc->sc_defant = antenna;
1871 sc->sc_rxotherant = 0;
1872}
1873
1874/*
1875 * Set Slot Time
1876 *
1877 * This will wake up the chip if required, and set the slot time for the
1878 * frame (maximum transmit time). Slot time is assumed to be already set
1879 * in the ATH object member sc_slottime
1880*/
1881
1882void ath_setslottime(struct ath_softc *sc)
1883{
1884 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1885 sc->sc_updateslot = OK;
1886}
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
index cb3e61e57c4d..4ca2aed236e0 100644
--- a/drivers/net/wireless/ath9k/core.h
+++ b/drivers/net/wireless/ath9k/core.h
@@ -17,27 +17,8 @@
17#ifndef CORE_H 17#ifndef CORE_H
18#define CORE_H 18#define CORE_H
19 19
20#include <linux/version.h>
21#include <linux/autoconf.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/spinlock.h>
25#include <linux/errno.h>
26#include <linux/skbuff.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
29#include <linux/ip.h>
30#include <linux/tcp.h>
31#include <linux/in.h>
32#include <linux/delay.h>
33#include <linux/wait.h>
34#include <linux/pci.h> 21#include <linux/pci.h>
35#include <linux/interrupt.h>
36#include <linux/sched.h>
37#include <linux/list.h>
38#include <asm/byteorder.h>
39#include <linux/scatterlist.h>
40#include <asm/page.h>
41#include <net/mac80211.h> 22#include <net/mac80211.h>
42#include <linux/leds.h> 23#include <linux/leds.h>
43#include <linux/rfkill.h> 24#include <linux/rfkill.h>
@@ -47,10 +28,6 @@
47 28
48struct ath_node; 29struct ath_node;
49 30
50/******************/
51/* Utility macros */
52/******************/
53
54/* Macro to expand scalars to 64-bit objects */ 31/* Macro to expand scalars to 64-bit objects */
55 32
56#define ito64(x) (sizeof(x) == 8) ? \ 33#define ito64(x) (sizeof(x) == 8) ? \
@@ -84,94 +61,125 @@ struct ath_node;
84#define TSF_TO_TU(_h,_l) \ 61#define TSF_TO_TU(_h,_l) \
85 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) 62 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
86 63
87#define ATH9K_BH_STATUS_INTACT 0 64#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
88#define ATH9K_BH_STATUS_CHANGE 1
89
90#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i))
91
92static inline unsigned long get_timestamp(void)
93{
94 return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ);
95}
96 65
97static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 66static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
98 67
99/*************/
100/* Debugging */
101/*************/
102
103enum ATH_DEBUG { 68enum ATH_DEBUG {
104 ATH_DBG_RESET = 0x00000001, 69 ATH_DBG_RESET = 0x00000001,
105 ATH_DBG_PHY_IO = 0x00000002, 70 ATH_DBG_REG_IO = 0x00000002,
106 ATH_DBG_REG_IO = 0x00000004, 71 ATH_DBG_QUEUE = 0x00000004,
107 ATH_DBG_QUEUE = 0x00000008, 72 ATH_DBG_EEPROM = 0x00000008,
108 ATH_DBG_EEPROM = 0x00000010, 73 ATH_DBG_CALIBRATE = 0x00000010,
109 ATH_DBG_NF_CAL = 0x00000020, 74 ATH_DBG_CHANNEL = 0x00000020,
110 ATH_DBG_CALIBRATE = 0x00000040, 75 ATH_DBG_INTERRUPT = 0x00000040,
111 ATH_DBG_CHANNEL = 0x00000080, 76 ATH_DBG_REGULATORY = 0x00000080,
112 ATH_DBG_INTERRUPT = 0x00000100, 77 ATH_DBG_ANI = 0x00000100,
113 ATH_DBG_REGULATORY = 0x00000200, 78 ATH_DBG_POWER_MGMT = 0x00000200,
114 ATH_DBG_ANI = 0x00000400, 79 ATH_DBG_XMIT = 0x00000400,
115 ATH_DBG_POWER_MGMT = 0x00000800, 80 ATH_DBG_BEACON = 0x00001000,
116 ATH_DBG_XMIT = 0x00001000, 81 ATH_DBG_CONFIG = 0x00002000,
117 ATH_DBG_BEACON = 0x00002000, 82 ATH_DBG_KEYCACHE = 0x00004000,
118 ATH_DBG_RATE = 0x00004000, 83 ATH_DBG_FATAL = 0x00008000,
119 ATH_DBG_CONFIG = 0x00008000,
120 ATH_DBG_KEYCACHE = 0x00010000,
121 ATH_DBG_AGGR = 0x00020000,
122 ATH_DBG_FATAL = 0x00040000,
123 ATH_DBG_ANY = 0xffffffff 84 ATH_DBG_ANY = 0xffffffff
124}; 85};
125 86
126#define DBG_DEFAULT (ATH_DBG_FATAL) 87#define DBG_DEFAULT (ATH_DBG_FATAL)
127 88
128#define DPRINTF(sc, _m, _fmt, ...) do { \ 89#ifdef CONFIG_ATH9K_DEBUG
129 if (sc->sc_debug & (_m)) \ 90
130 printk(_fmt , ##__VA_ARGS__); \ 91/**
131 } while (0) 92 * struct ath_interrupt_stats - Contains statistics about interrupts
93 * @total: Total no. of interrupts generated so far
94 * @rxok: RX with no errors
95 * @rxeol: RX with no more RXDESC available
96 * @rxorn: RX FIFO overrun
97 * @txok: TX completed at the requested rate
98 * @txurn: TX FIFO underrun
99 * @mib: MIB regs reaching its threshold
100 * @rxphyerr: RX with phy errors
101 * @rx_keycache_miss: RX with key cache misses
102 * @swba: Software Beacon Alert
103 * @bmiss: Beacon Miss
104 * @bnr: Beacon Not Ready
105 * @cst: Carrier Sense TImeout
106 * @gtt: Global TX Timeout
107 * @tim: RX beacon TIM occurrence
108 * @cabend: RX End of CAB traffic
109 * @dtimsync: DTIM sync lossage
110 * @dtim: RX Beacon with DTIM
111 */
112struct ath_interrupt_stats {
113 u32 total;
114 u32 rxok;
115 u32 rxeol;
116 u32 rxorn;
117 u32 txok;
118 u32 txeol;
119 u32 txurn;
120 u32 mib;
121 u32 rxphyerr;
122 u32 rx_keycache_miss;
123 u32 swba;
124 u32 bmiss;
125 u32 bnr;
126 u32 cst;
127 u32 gtt;
128 u32 tim;
129 u32 cabend;
130 u32 dtimsync;
131 u32 dtim;
132};
133
134struct ath_stats {
135 struct ath_interrupt_stats istats;
136};
137
138struct ath9k_debug {
139 int debug_mask;
140 struct dentry *debugfs_root;
141 struct dentry *debugfs_phy;
142 struct dentry *debugfs_dma;
143 struct dentry *debugfs_interrupt;
144 struct ath_stats stats;
145};
146
147void DPRINTF(struct ath_softc *sc, int dbg_mask, const char *fmt, ...);
148int ath9k_init_debug(struct ath_softc *sc);
149void ath9k_exit_debug(struct ath_softc *sc);
150void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
151
152#else
153
154static inline void DPRINTF(struct ath_softc *sc, int dbg_mask,
155 const char *fmt, ...)
156{
157}
158
159static inline int ath9k_init_debug(struct ath_softc *sc)
160{
161 return 0;
162}
132 163
133/***************************/ 164static inline void ath9k_exit_debug(struct ath_softc *sc)
134/* Load-time Configuration */ 165{
135/***************************/ 166}
167
168static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
169 enum ath9k_int status)
170{
171}
172
173#endif /* CONFIG_ATH9K_DEBUG */
136 174
137/* Per-instance load-time (note: NOT run-time) configurations
138 * for Atheros Device */
139struct ath_config { 175struct ath_config {
140 u32 ath_aggr_prot; 176 u32 ath_aggr_prot;
141 u16 txpowlimit; 177 u16 txpowlimit;
142 u16 txpowlimit_override; 178 u16 txpowlimit_override;
143 u8 cabqReadytime; /* Cabq Readytime % */ 179 u8 cabqReadytime;
144 u8 swBeaconProcess; /* Process received beacons in SW (vs HW) */ 180 u8 swBeaconProcess;
145};
146
147/***********************/
148/* Chainmask Selection */
149/***********************/
150
151#define ATH_CHAINMASK_SEL_TIMEOUT 6000
152/* Default - Number of last RSSI values that is used for
153 * chainmask selection */
154#define ATH_CHAINMASK_SEL_RSSI_CNT 10
155/* Means use 3x3 chainmask instead of configured chainmask */
156#define ATH_CHAINMASK_SEL_3X3 7
157/* Default - Rssi threshold below which we have to switch to 3x3 */
158#define ATH_CHAINMASK_SEL_UP_RSSI_THRES 20
159/* Default - Rssi threshold above which we have to switch to
160 * user configured values */
161#define ATH_CHAINMASK_SEL_DOWN_RSSI_THRES 35
162/* Struct to store the chainmask select related info */
163struct ath_chainmask_sel {
164 struct timer_list timer;
165 int cur_tx_mask; /* user configured or 3x3 */
166 int cur_rx_mask; /* user configured or 3x3 */
167 int tx_avgrssi;
168 u8 switch_allowed:1, /* timer will set this */
169 cm_sel_enabled : 1;
170}; 181};
171 182
172int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an);
173void ath_update_chainmask(struct ath_softc *sc, int is_ht);
174
175/*************************/ 183/*************************/
176/* Descriptor Management */ 184/* Descriptor Management */
177/*************************/ 185/*************************/
@@ -200,15 +208,14 @@ enum buffer_type {
200}; 208};
201 209
202struct ath_buf_state { 210struct ath_buf_state {
203 int bfs_nframes; /* # frames in aggregate */ 211 int bfs_nframes; /* # frames in aggregate */
204 u16 bfs_al; /* length of aggregate */ 212 u16 bfs_al; /* length of aggregate */
205 u16 bfs_frmlen; /* length of frame */ 213 u16 bfs_frmlen; /* length of frame */
206 int bfs_seqno; /* sequence number */ 214 int bfs_seqno; /* sequence number */
207 int bfs_tidno; /* tid of this frame */ 215 int bfs_tidno; /* tid of this frame */
208 int bfs_retries; /* current retries */ 216 int bfs_retries; /* current retries */
209 struct ath_rc_series bfs_rcs[4]; /* rate series */ 217 u32 bf_type; /* BUF_* (enum buffer_type) */
210 u32 bf_type; /* BUF_* (enum buffer_type) */ 218 u32 bfs_keyix;
211 /* key type use to encrypt this frame */
212 enum ath9k_key_type bfs_keytype; 219 enum ath9k_key_type bfs_keytype;
213}; 220};
214 221
@@ -219,6 +226,7 @@ struct ath_buf_state {
219#define bf_seqno bf_state.bfs_seqno 226#define bf_seqno bf_state.bfs_seqno
220#define bf_tidno bf_state.bfs_tidno 227#define bf_tidno bf_state.bfs_tidno
221#define bf_rcs bf_state.bfs_rcs 228#define bf_rcs bf_state.bfs_rcs
229#define bf_keyix bf_state.bfs_keyix
222#define bf_keytype bf_state.bfs_keytype 230#define bf_keytype bf_state.bfs_keytype
223#define bf_isdata(bf) (bf->bf_state.bf_type & BUF_DATA) 231#define bf_isdata(bf) (bf->bf_state.bf_type & BUF_DATA)
224#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR) 232#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
@@ -242,9 +250,7 @@ struct ath_buf {
242 an aggregate) */ 250 an aggregate) */
243 struct ath_buf *bf_lastfrm; /* last buf of this frame */ 251 struct ath_buf *bf_lastfrm; /* last buf of this frame */
244 struct ath_buf *bf_next; /* next subframe in the aggregate */ 252 struct ath_buf *bf_next; /* next subframe in the aggregate */
245 struct ath_buf *bf_rifslast; /* last buf for RIFS burst */
246 void *bf_mpdu; /* enclosing frame structure */ 253 void *bf_mpdu; /* enclosing frame structure */
247 void *bf_node; /* pointer to the node */
248 struct ath_desc *bf_desc; /* virtual addr of desc */ 254 struct ath_desc *bf_desc; /* virtual addr of desc */
249 dma_addr_t bf_daddr; /* physical addr of desc */ 255 dma_addr_t bf_daddr; /* physical addr of desc */
250 dma_addr_t bf_buf_addr; /* physical addr of data buffer */ 256 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
@@ -254,13 +260,6 @@ struct ath_buf {
254 dma_addr_t bf_dmacontext; 260 dma_addr_t bf_dmacontext;
255}; 261};
256 262
257/*
258 * reset the rx buffer.
259 * any new fields added to the athbuf and require
260 * reset need to be added to this macro.
261 * currently bf_status is the only one requires that
262 * requires reset.
263 */
264#define ATH_RXBUF_RESET(_bf) ((_bf)->bf_status = 0) 263#define ATH_RXBUF_RESET(_bf) ((_bf)->bf_status = 0)
265 264
266/* hw processing complete, desc processed by hal */ 265/* hw processing complete, desc processed by hal */
@@ -281,159 +280,81 @@ struct ath_descdma {
281 dma_addr_t dd_dmacontext; 280 dma_addr_t dd_dmacontext;
282}; 281};
283 282
284/* Abstraction of a received RX MPDU/MMPDU, or a RX fragment */ 283int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
285 284 struct list_head *head, const char *name,
286struct ath_rx_context { 285 int nbuf, int ndesc);
287 struct ath_buf *ctx_rxbuf; /* associated ath_buf for rx */ 286void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
288};
289#define ATH_RX_CONTEXT(skb) ((struct ath_rx_context *)skb->cb)
290
291int ath_descdma_setup(struct ath_softc *sc,
292 struct ath_descdma *dd,
293 struct list_head *head,
294 const char *name,
295 int nbuf,
296 int ndesc);
297int ath_desc_alloc(struct ath_softc *sc);
298void ath_desc_free(struct ath_softc *sc);
299void ath_descdma_cleanup(struct ath_softc *sc,
300 struct ath_descdma *dd,
301 struct list_head *head); 287 struct list_head *head);
302 288
303/******/ 289/***********/
304/* RX */ 290/* RX / TX */
305/******/ 291/***********/
306
307#define ATH_MAX_ANTENNA 3
308#define ATH_RXBUF 512
309#define ATH_RX_TIMEOUT 40 /* 40 milliseconds */
310#define WME_NUM_TID 16
311#define IEEE80211_BAR_CTL_TID_M 0xF000 /* tid mask */
312#define IEEE80211_BAR_CTL_TID_S 12 /* tid shift */
313
314enum ATH_RX_TYPE {
315 ATH_RX_NON_CONSUMED = 0,
316 ATH_RX_CONSUMED
317};
318
319/* per frame rx status block */
320struct ath_recv_status {
321 u64 tsf; /* mac tsf */
322 int8_t rssi; /* RSSI (noise floor ajusted) */
323 int8_t rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
324 int8_t rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */
325 int8_t abs_rssi; /* absolute RSSI */
326 u8 rateieee; /* data rate received (IEEE rate code) */
327 u8 ratecode; /* phy rate code */
328 int rateKbps; /* data rate received (Kbps) */
329 int antenna; /* rx antenna */
330 int flags; /* status of associated skb */
331#define ATH_RX_FCS_ERROR 0x01
332#define ATH_RX_MIC_ERROR 0x02
333#define ATH_RX_DECRYPT_ERROR 0x04
334#define ATH_RX_RSSI_VALID 0x08
335/* if any of ctl,extn chainrssis are valid */
336#define ATH_RX_CHAIN_RSSI_VALID 0x10
337/* if extn chain rssis are valid */
338#define ATH_RX_RSSI_EXTN_VALID 0x20
339/* set if 40Mhz, clear if 20Mhz */
340#define ATH_RX_40MHZ 0x40
341/* set if short GI, clear if full GI */
342#define ATH_RX_SHORT_GI 0x80
343};
344
345struct ath_rxbuf {
346 struct sk_buff *rx_wbuf;
347 unsigned long rx_time; /* system time when received */
348 struct ath_recv_status rx_status; /* cached rx status */
349};
350
351/* Per-TID aggregate receiver state for a node */
352struct ath_arx_tid {
353 struct ath_node *an;
354 struct ath_rxbuf *rxbuf; /* re-ordering buffer */
355 struct timer_list timer;
356 spinlock_t tidlock;
357 int baw_head; /* seq_next at head */
358 int baw_tail; /* tail of block-ack window */
359 int seq_reset; /* need to reset start sequence */
360 int addba_exchangecomplete;
361 u16 seq_next; /* next expected sequence */
362 u16 baw_size; /* block-ack window size */
363};
364
365/* Per-node receiver aggregate state */
366struct ath_arx {
367 struct ath_arx_tid tid[WME_NUM_TID];
368};
369
370int ath_startrecv(struct ath_softc *sc);
371bool ath_stoprecv(struct ath_softc *sc);
372void ath_flushrecv(struct ath_softc *sc);
373u32 ath_calcrxfilter(struct ath_softc *sc);
374void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an);
375void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an);
376void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
377void ath_handle_rx_intr(struct ath_softc *sc);
378int ath_rx_init(struct ath_softc *sc, int nbufs);
379void ath_rx_cleanup(struct ath_softc *sc);
380int ath_rx_tasklet(struct ath_softc *sc, int flush);
381int ath_rx_input(struct ath_softc *sc,
382 struct ath_node *node,
383 int is_ampdu,
384 struct sk_buff *skb,
385 struct ath_recv_status *rx_status,
386 enum ATH_RX_TYPE *status);
387int _ath_rx_indicate(struct ath_softc *sc,
388 struct sk_buff *skb,
389 struct ath_recv_status *status,
390 u16 keyix);
391int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb,
392 struct ath_recv_status *status);
393
394/******/
395/* TX */
396/******/
397 292
293#define ATH_MAX_ANTENNA 3
294#define ATH_RXBUF 512
295#define WME_NUM_TID 16
398#define ATH_TXBUF 512 296#define ATH_TXBUF 512
399/* max number of transmit attempts (tries) */
400#define ATH_TXMAXTRY 13 297#define ATH_TXMAXTRY 13
401/* max number of 11n transmit attempts (tries) */
402#define ATH_11N_TXMAXTRY 10 298#define ATH_11N_TXMAXTRY 10
403/* max number of tries for management and control frames */
404#define ATH_MGT_TXMAXTRY 4 299#define ATH_MGT_TXMAXTRY 4
405#define WME_BA_BMP_SIZE 64 300#define WME_BA_BMP_SIZE 64
406#define WME_MAX_BA WME_BA_BMP_SIZE 301#define WME_MAX_BA WME_BA_BMP_SIZE
407#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA) 302#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
303
408#define TID_TO_WME_AC(_tid) \ 304#define TID_TO_WME_AC(_tid) \
409 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ 305 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
410 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ 306 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
411 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ 307 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
412 WME_AC_VO) 308 WME_AC_VO)
413 309
310#define WME_AC_BE 0
311#define WME_AC_BK 1
312#define WME_AC_VI 2
313#define WME_AC_VO 3
314#define WME_NUM_AC 4
315
316#define ADDBA_EXCHANGE_ATTEMPTS 10
317#define ATH_AGGR_DELIM_SZ 4
318#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
319/* number of delimiters for encryption padding */
320#define ATH_AGGR_ENCRYPTDELIM 10
321/* minimum h/w qdepth to be sustained to maximize aggregation */
322#define ATH_AGGR_MIN_QDEPTH 2
323#define ATH_AMPDU_SUBFRAME_DEFAULT 32
324#define IEEE80211_SEQ_SEQ_SHIFT 4
325#define IEEE80211_SEQ_MAX 4096
326#define IEEE80211_MIN_AMPDU_BUF 0x8
327#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13
328
329/* return whether a bit at index _n in bitmap _bm is set
330 * _sz is the size of the bitmap */
331#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
332 ((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
333
334/* return block-ack bitmap index given sequence and starting sequence */
335#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
336
337/* returns delimiter padding required given the packet length */
338#define ATH_AGGR_GET_NDELIM(_len) \
339 (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
340 (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
341
342#define BAW_WITHIN(_start, _bawsz, _seqno) \
343 ((((_seqno) - (_start)) & 4095) < (_bawsz))
414 344
415/* Wireless Multimedia Extension Defines */ 345#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
416#define WME_AC_BE 0 /* best effort */ 346#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
417#define WME_AC_BK 1 /* background */ 347#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
418#define WME_AC_VI 2 /* video */ 348#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
419#define WME_AC_VO 3 /* voice */
420#define WME_NUM_AC 4
421 349
422enum ATH_SM_PWRSAV{ 350enum ATH_AGGR_STATUS {
423 ATH_SM_ENABLE, 351 ATH_AGGR_DONE,
424 ATH_SM_PWRSAV_STATIC, 352 ATH_AGGR_BAW_CLOSED,
425 ATH_SM_PWRSAV_DYNAMIC, 353 ATH_AGGR_LIMITED,
354 ATH_AGGR_SHORTPKT,
355 ATH_AGGR_8K_LIMITED,
426}; 356};
427 357
428/*
429 * Data transmit queue state. One of these exists for each
430 * hardware transmit queue. Packets sent to us from above
431 * are assigned to queues based on their priority. Not all
432 * devices support a complete set of hardware transmit queues.
433 * For those devices the array sc_ac2q will map multiple
434 * priorities to fewer hardware queues (typically all to one
435 * hardware queue).
436 */
437struct ath_txq { 358struct ath_txq {
438 u32 axq_qnum; /* hardware q number */ 359 u32 axq_qnum; /* hardware q number */
439 u32 *axq_link; /* link ptr in last TX desc */ 360 u32 *axq_link; /* link ptr in last TX desc */
@@ -443,10 +364,6 @@ struct ath_txq {
443 u32 axq_depth; /* queue depth */ 364 u32 axq_depth; /* queue depth */
444 u8 axq_aggr_depth; /* aggregates queued */ 365 u8 axq_aggr_depth; /* aggregates queued */
445 u32 axq_totalqueued; /* total ever queued */ 366 u32 axq_totalqueued; /* total ever queued */
446
447 /* count to determine if descriptor should generate int on this txq. */
448 u32 axq_intrcnt;
449
450 bool stopped; /* Is mac80211 queue stopped ? */ 367 bool stopped; /* Is mac80211 queue stopped ? */
451 struct ath_buf *axq_linkbuf; /* virtual addr of last buffer*/ 368 struct ath_buf *axq_linkbuf; /* virtual addr of last buffer*/
452 369
@@ -460,6 +377,10 @@ struct ath_txq {
460 struct list_head axq_acq; 377 struct list_head axq_acq;
461}; 378};
462 379
380#define AGGR_CLEANUP BIT(1)
381#define AGGR_ADDBA_COMPLETE BIT(2)
382#define AGGR_ADDBA_PROGRESS BIT(3)
383
463/* per TID aggregate tx state for a destination */ 384/* per TID aggregate tx state for a destination */
464struct ath_atx_tid { 385struct ath_atx_tid {
465 struct list_head list; /* round-robin tid entry */ 386 struct list_head list; /* round-robin tid entry */
@@ -475,9 +396,7 @@ struct ath_atx_tid {
475 int baw_tail; /* next unused tx buffer slot */ 396 int baw_tail; /* next unused tx buffer slot */
476 int sched; 397 int sched;
477 int paused; 398 int paused;
478 int cleanup_inprogress; 399 u8 state;
479 u32 addba_exchangecomplete:1;
480 int32_t addba_exchangeinprogress;
481 int addba_exchangeattempts; 400 int addba_exchangeattempts;
482}; 401};
483 402
@@ -490,32 +409,10 @@ struct ath_atx_ac {
490 struct list_head tid_q; /* queue of TIDs with buffers */ 409 struct list_head tid_q; /* queue of TIDs with buffers */
491}; 410};
492 411
493/* per dest tx state */
494struct ath_atx {
495 struct ath_atx_tid tid[WME_NUM_TID];
496 struct ath_atx_ac ac[WME_NUM_AC];
497};
498
499/* per-frame tx control block */ 412/* per-frame tx control block */
500struct ath_tx_control { 413struct ath_tx_control {
501 struct ath_node *an; 414 struct ath_txq *txq;
502 int if_id; 415 int if_id;
503 int qnum;
504 u32 ht:1;
505 u32 ps:1;
506 u32 use_minrate:1;
507 enum ath9k_pkt_type atype;
508 enum ath9k_key_type keytype;
509 u32 flags;
510 u16 seqno;
511 u16 tidno;
512 u16 txpower;
513 u16 frmlen;
514 u32 keyix;
515 int min_rate;
516 int mcast_rate;
517 struct ath_softc *dev;
518 dma_addr_t dmacontext;
519}; 416};
520 417
521/* per frame tx status block */ 418/* per frame tx status block */
@@ -528,21 +425,63 @@ struct ath_xmit_status {
528#define ATH_TX_BAR 0x04 425#define ATH_TX_BAR 0x04
529}; 426};
530 427
428/* All RSSI values are noise floor adjusted */
531struct ath_tx_stat { 429struct ath_tx_stat {
532 int rssi; /* RSSI (noise floor ajusted) */ 430 int rssi;
533 int rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */ 431 int rssictl[ATH_MAX_ANTENNA];
534 int rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */ 432 int rssiextn[ATH_MAX_ANTENNA];
535 int rateieee; /* data rate xmitted (IEEE rate code) */ 433 int rateieee;
536 int rateKbps; /* data rate xmitted (Kbps) */ 434 int rateKbps;
537 int ratecode; /* phy rate code */ 435 int ratecode;
538 int flags; /* validity flags */ 436 int flags;
539/* if any of ctl,extn chain rssis are valid */
540#define ATH_TX_CHAIN_RSSI_VALID 0x01
541/* if extn chain rssis are valid */
542#define ATH_TX_RSSI_EXTN_VALID 0x02
543 u32 airtime; /* time on air per final tx rate */ 437 u32 airtime; /* time on air per final tx rate */
544}; 438};
545 439
440struct aggr_rifs_param {
441 int param_max_frames;
442 int param_max_len;
443 int param_rl;
444 int param_al;
445 struct ath_rc_series *param_rcs;
446};
447
448struct ath_node {
449 struct ath_softc *an_sc;
450 struct ath_atx_tid tid[WME_NUM_TID];
451 struct ath_atx_ac ac[WME_NUM_AC];
452 u16 maxampdu;
453 u8 mpdudensity;
454};
455
456struct ath_tx {
457 u16 seq_no;
458 u32 txqsetup;
459 int hwq_map[ATH9K_WME_AC_VO+1];
460 spinlock_t txbuflock;
461 struct list_head txbuf;
462 struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
463 struct ath_descdma txdma;
464};
465
466struct ath_rx {
467 u8 defant;
468 u8 rxotherant;
469 u32 *rxlink;
470 int bufsize;
471 unsigned int rxfilter;
472 spinlock_t rxflushlock;
473 spinlock_t rxbuflock;
474 struct list_head rxbuf;
475 struct ath_descdma rxdma;
476};
477
478int ath_startrecv(struct ath_softc *sc);
479bool ath_stoprecv(struct ath_softc *sc);
480void ath_flushrecv(struct ath_softc *sc);
481u32 ath_calcrxfilter(struct ath_softc *sc);
482int ath_rx_init(struct ath_softc *sc, int nbufs);
483void ath_rx_cleanup(struct ath_softc *sc);
484int ath_rx_tasklet(struct ath_softc *sc, int flush);
546struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); 485struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
547void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); 486void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
548int ath_tx_setup(struct ath_softc *sc, int haltype); 487int ath_tx_setup(struct ath_softc *sc, int haltype);
@@ -550,139 +489,51 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx);
550void ath_tx_draintxq(struct ath_softc *sc, 489void ath_tx_draintxq(struct ath_softc *sc,
551 struct ath_txq *txq, bool retry_tx); 490 struct ath_txq *txq, bool retry_tx);
552void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an); 491void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
553void ath_tx_node_cleanup(struct ath_softc *sc, 492void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
554 struct ath_node *an, bool bh_flag);
555void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an); 493void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an);
556void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq); 494void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
557int ath_tx_init(struct ath_softc *sc, int nbufs); 495int ath_tx_init(struct ath_softc *sc, int nbufs);
558int ath_tx_cleanup(struct ath_softc *sc); 496int ath_tx_cleanup(struct ath_softc *sc);
559int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype); 497int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
498struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
560int ath_txq_update(struct ath_softc *sc, int qnum, 499int ath_txq_update(struct ath_softc *sc, int qnum,
561 struct ath9k_tx_queue_info *q); 500 struct ath9k_tx_queue_info *q);
562int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb); 501int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
502 struct ath_tx_control *txctl);
563void ath_tx_tasklet(struct ath_softc *sc); 503void ath_tx_tasklet(struct ath_softc *sc);
564u32 ath_txq_depth(struct ath_softc *sc, int qnum); 504u32 ath_txq_depth(struct ath_softc *sc, int qnum);
565u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum); 505u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum);
566void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth);
567void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
568 struct ath_xmit_status *tx_status, struct ath_node *an);
569void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb); 506void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb);
507void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid);
508bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
509void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tidno);
510int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
511 u16 tid, u16 *ssn);
512int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
513void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
570 514
571/**********************/ 515/********/
572/* Node / Aggregation */ 516/* VAPs */
573/**********************/ 517/********/
574
575/* indicates the node is clened up */
576#define ATH_NODE_CLEAN 0x1
577/* indicates the node is 80211 power save */
578#define ATH_NODE_PWRSAVE 0x2
579
580#define ADDBA_EXCHANGE_ATTEMPTS 10
581#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */
582#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
583/* number of delimiters for encryption padding */
584#define ATH_AGGR_ENCRYPTDELIM 10
585/* minimum h/w qdepth to be sustained to maximize aggregation */
586#define ATH_AGGR_MIN_QDEPTH 2
587#define ATH_AMPDU_SUBFRAME_DEFAULT 32
588#define IEEE80211_SEQ_SEQ_SHIFT 4
589#define IEEE80211_SEQ_MAX 4096
590#define IEEE80211_MIN_AMPDU_BUF 0x8
591
592/* return whether a bit at index _n in bitmap _bm is set
593 * _sz is the size of the bitmap */
594#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \
595 ((_bm)[(_n) >> 5] & (1 << ((_n) & 31))))
596
597/* return block-ack bitmap index given sequence and starting sequence */
598#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
599
600/* returns delimiter padding required given the packet length */
601#define ATH_AGGR_GET_NDELIM(_len) \
602 (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \
603 (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2)
604
605#define BAW_WITHIN(_start, _bawsz, _seqno) \
606 ((((_seqno) - (_start)) & 4095) < (_bawsz))
607
608#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
609#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
610#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
611#define ATH_AN_2_TID(_an, _tidno) (&(_an)->an_aggr.tx.tid[(_tidno)])
612
613enum ATH_AGGR_STATUS {
614 ATH_AGGR_DONE,
615 ATH_AGGR_BAW_CLOSED,
616 ATH_AGGR_LIMITED,
617 ATH_AGGR_SHORTPKT,
618 ATH_AGGR_8K_LIMITED,
619};
620
621enum ATH_AGGR_CHECK {
622 AGGR_NOT_REQUIRED,
623 AGGR_REQUIRED,
624 AGGR_CLEANUP_PROGRESS,
625 AGGR_EXCHANGE_PROGRESS,
626 AGGR_EXCHANGE_DONE
627};
628 518
629struct aggr_rifs_param { 519/*
630 int param_max_frames; 520 * Define the scheme that we select MAC address for multiple
631 int param_max_len; 521 * BSS on the same radio. The very first VAP will just use the MAC
632 int param_rl; 522 * address from the EEPROM. For the next 3 VAPs, we set the
633 int param_al; 523 * U/L bit (bit 1) in MAC address, and use the next two bits as the
634 struct ath_rc_series *param_rcs; 524 * index of the VAP.
635}; 525 */
636 526
637/* Per-node aggregation state */ 527#define ATH_SET_VAP_BSSID_MASK(bssid_mask) \
638struct ath_node_aggr { 528 ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02))
639 struct ath_atx tx; /* node transmit state */
640 struct ath_arx rx; /* node receive state */
641};
642 529
643/* driver-specific node state */ 530struct ath_vap {
644struct ath_node { 531 int av_bslot;
645 struct list_head list; 532 enum nl80211_iftype av_opmode;
646 struct ath_softc *an_sc; 533 struct ath_buf *av_bcbuf;
647 atomic_t an_refcnt; 534 struct ath_tx_control av_btxctl;
648 struct ath_chainmask_sel an_chainmask_sel;
649 struct ath_node_aggr an_aggr;
650 u8 an_smmode; /* SM Power save mode */
651 u8 an_flags;
652 u8 an_addr[ETH_ALEN];
653}; 535};
654 536
655void ath_tx_resume_tid(struct ath_softc *sc,
656 struct ath_atx_tid *tid);
657enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
658 struct ath_node *an, u8 tidno);
659void ath_tx_aggr_teardown(struct ath_softc *sc,
660 struct ath_node *an, u8 tidno);
661void ath_rx_aggr_teardown(struct ath_softc *sc,
662 struct ath_node *an, u8 tidno);
663int ath_rx_aggr_start(struct ath_softc *sc,
664 const u8 *addr,
665 u16 tid,
666 u16 *ssn);
667int ath_rx_aggr_stop(struct ath_softc *sc,
668 const u8 *addr,
669 u16 tid);
670int ath_tx_aggr_start(struct ath_softc *sc,
671 const u8 *addr,
672 u16 tid,
673 u16 *ssn);
674int ath_tx_aggr_stop(struct ath_softc *sc,
675 const u8 *addr,
676 u16 tid);
677void ath_newassoc(struct ath_softc *sc,
678 struct ath_node *node, int isnew, int isuapsd);
679struct ath_node *ath_node_attach(struct ath_softc *sc,
680 u8 addr[ETH_ALEN], int if_id);
681void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
682struct ath_node *ath_node_get(struct ath_softc *sc, u8 addr[ETH_ALEN]);
683void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag);
684struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr);
685
686/*******************/ 537/*******************/
687/* Beacon Handling */ 538/* Beacon Handling */
688/*******************/ 539/*******************/
@@ -693,12 +544,11 @@ struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr);
693 * number of beacon intervals, the game's up. 544 * number of beacon intervals, the game's up.
694 */ 545 */
695#define BSTUCK_THRESH (9 * ATH_BCBUF) 546#define BSTUCK_THRESH (9 * ATH_BCBUF)
696#define ATH_BCBUF 4 /* number of beacon buffers */ 547#define ATH_BCBUF 1
697#define ATH_DEFAULT_BINTVAL 100 /* default beacon interval in TU */ 548#define ATH_DEFAULT_BINTVAL 100 /* TU */
698#define ATH_DEFAULT_BMISS_LIMIT 10 549#define ATH_DEFAULT_BMISS_LIMIT 10
699#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) 550#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
700 551
701/* beacon configuration */
702struct ath_beacon_config { 552struct ath_beacon_config {
703 u16 beacon_interval; 553 u16 beacon_interval;
704 u16 listen_interval; 554 u16 listen_interval;
@@ -712,93 +562,32 @@ struct ath_beacon_config {
712 } u; /* last received beacon/probe response timestamp of this BSS. */ 562 } u; /* last received beacon/probe response timestamp of this BSS. */
713}; 563};
714 564
565struct ath_beacon {
566 enum {
567 OK, /* no change needed */
568 UPDATE, /* update pending */
569 COMMIT /* beacon sent, commit change */
570 } updateslot; /* slot time update fsm */
571
572 u32 beaconq;
573 u32 bmisscnt;
574 u32 ast_be_xmit;
575 u64 bc_tstamp;
576 int bslot[ATH_BCBUF];
577 int slottime;
578 int slotupdate;
579 struct ath9k_tx_queue_info beacon_qi;
580 struct ath_descdma bdma;
581 struct ath_txq *cabq;
582 struct list_head bbuf;
583};
584
715void ath9k_beacon_tasklet(unsigned long data); 585void ath9k_beacon_tasklet(unsigned long data);
716void ath_beacon_config(struct ath_softc *sc, int if_id); 586void ath_beacon_config(struct ath_softc *sc, int if_id);
717int ath_beaconq_setup(struct ath_hal *ah); 587int ath_beaconq_setup(struct ath_hal *ah);
718int ath_beacon_alloc(struct ath_softc *sc, int if_id); 588int ath_beacon_alloc(struct ath_softc *sc, int if_id);
719void ath_bstuck_process(struct ath_softc *sc);
720void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp); 589void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp);
721void ath_beacon_sync(struct ath_softc *sc, int if_id); 590void ath_beacon_sync(struct ath_softc *sc, int if_id);
722void ath_get_beaconconfig(struct ath_softc *sc,
723 int if_id,
724 struct ath_beacon_config *conf);
725/********/
726/* VAPs */
727/********/
728
729/*
730 * Define the scheme that we select MAC address for multiple
731 * BSS on the same radio. The very first VAP will just use the MAC
732 * address from the EEPROM. For the next 3 VAPs, we set the
733 * U/L bit (bit 1) in MAC address, and use the next two bits as the
734 * index of the VAP.
735 */
736
737#define ATH_SET_VAP_BSSID_MASK(bssid_mask) \
738 ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02))
739
740/* VAP configuration (from protocol layer) */
741struct ath_vap_config {
742 u32 av_fixed_rateset;
743 u32 av_fixed_retryset;
744};
745
746/* driver-specific vap state */
747struct ath_vap {
748 struct ieee80211_vif *av_if_data;
749 enum ath9k_opmode av_opmode; /* VAP operational mode */
750 struct ath_buf *av_bcbuf; /* beacon buffer */
751 struct ath_tx_control av_btxctl; /* txctl information for beacon */
752 int av_bslot; /* beacon slot index */
753 struct ath_vap_config av_config;/* vap configuration parameters*/
754 struct ath_rate_node *rc_node;
755};
756
757int ath_vap_attach(struct ath_softc *sc,
758 int if_id,
759 struct ieee80211_vif *if_data,
760 enum ath9k_opmode opmode);
761int ath_vap_detach(struct ath_softc *sc, int if_id);
762int ath_vap_config(struct ath_softc *sc,
763 int if_id, struct ath_vap_config *if_config);
764
765/*********************/
766/* Antenna diversity */
767/*********************/
768
769#define ATH_ANT_DIV_MAX_CFG 2
770#define ATH_ANT_DIV_MIN_IDLE_US 1000000 /* us */
771#define ATH_ANT_DIV_MIN_SCAN_US 50000 /* us */
772
773enum ATH_ANT_DIV_STATE{
774 ATH_ANT_DIV_IDLE,
775 ATH_ANT_DIV_SCAN, /* evaluating antenna */
776};
777
778struct ath_antdiv {
779 struct ath_softc *antdiv_sc;
780 u8 antdiv_start;
781 enum ATH_ANT_DIV_STATE antdiv_state;
782 u8 antdiv_num_antcfg;
783 u8 antdiv_curcfg;
784 u8 antdiv_bestcfg;
785 int32_t antdivf_rssitrig;
786 int32_t antdiv_lastbrssi[ATH_ANT_DIV_MAX_CFG];
787 u64 antdiv_lastbtsf[ATH_ANT_DIV_MAX_CFG];
788 u64 antdiv_laststatetsf;
789 u8 antdiv_bssid[ETH_ALEN];
790};
791
792void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
793 struct ath_softc *sc, int32_t rssitrig);
794void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
795 u8 num_antcfg,
796 const u8 *bssid);
797void ath_slow_ant_div_stop(struct ath_antdiv *antdiv);
798void ath_slow_ant_div(struct ath_antdiv *antdiv,
799 struct ieee80211_hdr *wh,
800 struct ath_rx_status *rx_stats);
801void ath_setdefantenna(void *sc, u32 antenna);
802 591
803/*******/ 592/*******/
804/* ANI */ 593/* ANI */
@@ -863,7 +652,7 @@ struct ath_rfkill {
863#define DEFAULT_CACHELINE 32 652#define DEFAULT_CACHELINE 32
864#define ATH_DEFAULT_NOISE_FLOOR -95 653#define ATH_DEFAULT_NOISE_FLOOR -95
865#define ATH_REGCLASSIDS_MAX 10 654#define ATH_REGCLASSIDS_MAX 10
866#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ 655#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
867#define ATH_MAX_SW_RETRIES 10 656#define ATH_MAX_SW_RETRIES 10
868#define ATH_CHAN_MAX 255 657#define ATH_CHAN_MAX 255
869#define IEEE80211_WEP_NKID 4 /* number of key ids */ 658#define IEEE80211_WEP_NKID 4 /* number of key ids */
@@ -876,34 +665,12 @@ struct ath_rfkill {
876 * Different parts have different size key caches. We handle 665 * Different parts have different size key caches. We handle
877 * up to ATH_KEYMAX entries (could dynamically allocate state). 666 * up to ATH_KEYMAX entries (could dynamically allocate state).
878 */ 667 */
879#define ATH_KEYMAX 128 /* max key cache size we handle */ 668#define ATH_KEYMAX 128 /* max key cache size we handle */
880 669
881#define ATH_IF_ID_ANY 0xff 670#define ATH_IF_ID_ANY 0xff
882#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 671#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
883 672#define ATH_RSSI_DUMMY_MARKER 0x127
884#define RSSI_LPF_THRESHOLD -20 673#define ATH_RATE_DUMMY_MARKER 0
885#define ATH_RSSI_EP_MULTIPLIER (1<<7) /* pow2 to optimize out * and / */
886#define ATH_RATE_DUMMY_MARKER 0
887#define ATH_RSSI_LPF_LEN 10
888#define ATH_RSSI_DUMMY_MARKER 0x127
889
890#define ATH_EP_MUL(x, mul) ((x) * (mul))
891#define ATH_EP_RND(x, mul) \
892 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
893#define ATH_RSSI_OUT(x) \
894 (((x) != ATH_RSSI_DUMMY_MARKER) ? \
895 (ATH_EP_RND((x), ATH_RSSI_EP_MULTIPLIER)) : ATH_RSSI_DUMMY_MARKER)
896#define ATH_RSSI_IN(x) \
897 (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
898#define ATH_LPF_RSSI(x, y, len) \
899 ((x != ATH_RSSI_DUMMY_MARKER) ? \
900 (((x) * ((len) - 1) + (y)) / (len)) : (y))
901#define ATH_RSSI_LPF(x, y) do { \
902 if ((y) >= RSSI_LPF_THRESHOLD) \
903 x = ATH_LPF_RSSI((x), \
904 ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
905 } while (0)
906
907 674
908enum PROT_MODE { 675enum PROT_MODE {
909 PROT_M_NONE = 0, 676 PROT_M_NONE = 0,
@@ -911,19 +678,6 @@ enum PROT_MODE {
911 PROT_M_CTSONLY 678 PROT_M_CTSONLY
912}; 679};
913 680
914enum RATE_TYPE {
915 NORMAL_RATE = 0,
916 HALF_RATE,
917 QUARTER_RATE
918};
919
920struct ath_ht_info {
921 enum ath9k_ht_macmode tx_chan_width;
922 u16 maxampdu;
923 u8 mpdudensity;
924 u8 ext_chan_offset;
925};
926
927#define SC_OP_INVALID BIT(0) 681#define SC_OP_INVALID BIT(0)
928#define SC_OP_BEACONS BIT(1) 682#define SC_OP_BEACONS BIT(1)
929#define SC_OP_RXAGGR BIT(2) 683#define SC_OP_RXAGGR BIT(2)
@@ -944,141 +698,57 @@ struct ath_softc {
944 struct pci_dev *pdev; 698 struct pci_dev *pdev;
945 struct tasklet_struct intr_tq; 699 struct tasklet_struct intr_tq;
946 struct tasklet_struct bcon_tasklet; 700 struct tasklet_struct bcon_tasklet;
947 struct ath_config sc_config;
948 struct ath_hal *sc_ah; 701 struct ath_hal *sc_ah;
949 struct ath_rate_softc *sc_rc;
950 void __iomem *mem; 702 void __iomem *mem;
703 spinlock_t sc_resetlock;
704 struct mutex mutex;
951 705
952 u8 sc_curbssid[ETH_ALEN]; 706 u8 sc_curbssid[ETH_ALEN];
953 u8 sc_myaddr[ETH_ALEN]; 707 u8 sc_myaddr[ETH_ALEN];
954 u8 sc_bssidmask[ETH_ALEN]; 708 u8 sc_bssidmask[ETH_ALEN];
955
956 int sc_debug;
957 u32 sc_intrstatus; 709 u32 sc_intrstatus;
958 u32 sc_flags; /* SC_OP_* */ 710 u32 sc_flags; /* SC_OP_* */
959 unsigned int rx_filter;
960 u16 sc_curtxpow; 711 u16 sc_curtxpow;
961 u16 sc_curaid; 712 u16 sc_curaid;
962 u16 sc_cachelsz; 713 u16 sc_cachelsz;
963 int sc_slotupdate; /* slot to next advance fsm */ 714 u8 sc_nbcnvaps;
964 int sc_slottime; 715 u16 sc_nvaps;
965 int sc_bslot[ATH_BCBUF];
966 u8 sc_tx_chainmask; 716 u8 sc_tx_chainmask;
967 u8 sc_rx_chainmask; 717 u8 sc_rx_chainmask;
718 u32 sc_keymax;
719 DECLARE_BITMAP(sc_keymap, ATH_KEYMAX);
720 u8 sc_splitmic;
721 u8 sc_protrix;
968 enum ath9k_int sc_imask; 722 enum ath9k_int sc_imask;
969 enum wireless_mode sc_curmode; /* current phy mode */
970 enum PROT_MODE sc_protmode; 723 enum PROT_MODE sc_protmode;
971
972 u8 sc_nbcnvaps; /* # of vaps sending beacons */
973 u16 sc_nvaps; /* # of active virtual ap's */
974 struct ath_vap *sc_vaps[ATH_BCBUF];
975
976 u8 sc_mcastantenna;
977 u8 sc_defant; /* current default antenna */
978 u8 sc_rxotherant; /* rx's on non-default antenna */
979
980 struct ath9k_node_stats sc_halstats; /* station-mode rssi stats */
981 struct list_head node_list;
982 struct ath_ht_info sc_ht_info;
983 enum ath9k_ht_extprotspacing sc_ht_extprotspacing; 724 enum ath9k_ht_extprotspacing sc_ht_extprotspacing;
725 enum ath9k_ht_macmode tx_chan_width;
984 726
985#ifdef CONFIG_SLOW_ANT_DIV 727 struct ath_config sc_config;
986 struct ath_antdiv sc_antdiv; 728 struct ath_rx rx;
987#endif 729 struct ath_tx tx;
988 enum { 730 struct ath_beacon beacon;
989 OK, /* no change needed */ 731 struct ieee80211_vif *sc_vaps[ATH_BCBUF];
990 UPDATE, /* update pending */
991 COMMIT /* beacon sent, commit change */
992 } sc_updateslot; /* slot time update fsm */
993
994 /* Crypto */
995 u32 sc_keymax; /* size of key cache */
996 DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */
997 u8 sc_splitmic; /* split TKIP MIC keys */
998
999 /* RX */
1000 struct list_head sc_rxbuf;
1001 struct ath_descdma sc_rxdma;
1002 int sc_rxbufsize; /* rx size based on mtu */
1003 u32 *sc_rxlink; /* link ptr in last RX desc */
1004
1005 /* TX */
1006 struct list_head sc_txbuf;
1007 struct ath_txq sc_txq[ATH9K_NUM_TX_QUEUES];
1008 struct ath_descdma sc_txdma;
1009 u32 sc_txqsetup;
1010 u32 sc_txintrperiod; /* tx interrupt batching */
1011 int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME AC -> h/w qnum */
1012 u16 seq_no; /* TX sequence number */
1013
1014 /* Beacon */
1015 struct ath9k_tx_queue_info sc_beacon_qi;
1016 struct ath_descdma sc_bdma;
1017 struct ath_txq *sc_cabq;
1018 struct list_head sc_bbuf;
1019 u32 sc_bhalq;
1020 u32 sc_bmisscount;
1021 u32 ast_be_xmit; /* beacons transmitted */
1022 u64 bc_tstamp;
1023
1024 /* Rate */
1025 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX]; 732 struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX];
1026 const struct ath9k_rate_table *sc_currates; 733 struct ath_rate_table *hw_rate_table[ATH9K_MODE_MAX];
1027 u8 sc_rixmap[256]; /* IEEE to h/w rate table ix */ 734 struct ath_rate_table *cur_rate_table;
1028 u8 sc_protrix; /* protection rate index */
1029 struct {
1030 u32 rateKbps; /* transfer rate in kbs */
1031 u8 ieeerate; /* IEEE rate */
1032 } sc_hwmap[256]; /* h/w rate ix mappings */
1033
1034 /* Channel, Band */
1035 struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX]; 735 struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX];
1036 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 736 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
1037
1038 /* Locks */
1039 spinlock_t sc_rxflushlock;
1040 spinlock_t sc_rxbuflock;
1041 spinlock_t sc_txbuflock;
1042 spinlock_t sc_resetlock;
1043 spinlock_t node_lock;
1044
1045 /* LEDs */
1046 struct ath_led radio_led; 737 struct ath_led radio_led;
1047 struct ath_led assoc_led; 738 struct ath_led assoc_led;
1048 struct ath_led tx_led; 739 struct ath_led tx_led;
1049 struct ath_led rx_led; 740 struct ath_led rx_led;
1050
1051 /* Rfkill */
1052 struct ath_rfkill rf_kill; 741 struct ath_rfkill rf_kill;
1053
1054 /* ANI */
1055 struct ath_ani sc_ani; 742 struct ath_ani sc_ani;
743 struct ath9k_node_stats sc_halstats;
744#ifdef CONFIG_ATH9K_DEBUG
745 struct ath9k_debug sc_debug;
746#endif
1056}; 747};
1057 748
1058int ath_init(u16 devid, struct ath_softc *sc);
1059void ath_deinit(struct ath_softc *sc);
1060int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan);
1061int ath_suspend(struct ath_softc *sc);
1062irqreturn_t ath_isr(int irq, void *dev);
1063int ath_reset(struct ath_softc *sc, bool retry_tx); 749int ath_reset(struct ath_softc *sc, bool retry_tx);
1064int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan);
1065
1066/*********************/
1067/* Utility Functions */
1068/*********************/
1069
1070void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot);
1071int ath_keyset(struct ath_softc *sc,
1072 u16 keyix,
1073 struct ath9k_keyval *hk,
1074 const u8 mac[ETH_ALEN]);
1075int ath_get_hal_qnum(u16 queue, struct ath_softc *sc); 750int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
1076int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); 751int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
1077void ath_setslottime(struct ath_softc *sc);
1078void ath_update_txpow(struct ath_softc *sc);
1079int ath_cabq_update(struct ath_softc *); 752int ath_cabq_update(struct ath_softc *);
1080void ath_get_currentCountry(struct ath_softc *sc,
1081 struct ath9k_country_entry *ctry);
1082u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp);
1083 753
1084#endif /* CORE_H */ 754#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath9k/debug.c b/drivers/net/wireless/ath9k/debug.c
new file mode 100644
index 000000000000..a80ed576830f
--- /dev/null
+++ b/drivers/net/wireless/ath9k/debug.c
@@ -0,0 +1,262 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "reg.h"
19#include "hw.h"
20
21static unsigned int ath9k_debug = DBG_DEFAULT;
22module_param_named(debug, ath9k_debug, uint, 0);
23
24void DPRINTF(struct ath_softc *sc, int dbg_mask, const char *fmt, ...)
25{
26 if (!sc)
27 return;
28
29 if (sc->sc_debug.debug_mask & dbg_mask) {
30 va_list args;
31
32 va_start(args, fmt);
33 printk(KERN_DEBUG "ath9k: ");
34 vprintk(fmt, args);
35 va_end(args);
36 }
37}
38
39static int ath9k_debugfs_open(struct inode *inode, struct file *file)
40{
41 file->private_data = inode->i_private;
42 return 0;
43}
44
45static ssize_t read_file_dma(struct file *file, char __user *user_buf,
46 size_t count, loff_t *ppos)
47{
48 struct ath_softc *sc = file->private_data;
49 struct ath_hal *ah = sc->sc_ah;
50 char buf[1024];
51 unsigned int len = 0;
52 u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
53 int i, qcuOffset = 0, dcuOffset = 0;
54 u32 *qcuBase = &val[0], *dcuBase = &val[4];
55
56 REG_WRITE(ah, AR_MACMISC,
57 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
58 (AR_MACMISC_MISC_OBS_BUS_1 <<
59 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
60
61 len += snprintf(buf + len, sizeof(buf) - len,
62 "Raw DMA Debug values:\n");
63
64 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
65 if (i % 4 == 0)
66 len += snprintf(buf + len, sizeof(buf) - len, "\n");
67
68 val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32)));
69 len += snprintf(buf + len, sizeof(buf) - len, "%d: %08x ",
70 i, val[i]);
71 }
72
73 len += snprintf(buf + len, sizeof(buf) - len, "\n\n");
74 len += snprintf(buf + len, sizeof(buf) - len,
75 "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
76
77 for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
78 if (i == 8) {
79 qcuOffset = 0;
80 qcuBase++;
81 }
82
83 if (i == 6) {
84 dcuOffset = 0;
85 dcuBase++;
86 }
87
88 len += snprintf(buf + len, sizeof(buf) - len,
89 "%2d %2x %1x %2x %2x\n",
90 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
91 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
92 val[2] & (0x7 << (i * 3)) >> (i * 3),
93 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
94 }
95
96 len += snprintf(buf + len, sizeof(buf) - len, "\n");
97
98 len += snprintf(buf + len, sizeof(buf) - len,
99 "qcu_stitch state: %2x qcu_fetch state: %2x\n",
100 (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
101 len += snprintf(buf + len, sizeof(buf) - len,
102 "qcu_complete state: %2x dcu_complete state: %2x\n",
103 (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
104 len += snprintf(buf + len, sizeof(buf) - len,
105 "dcu_arb state: %2x dcu_fp state: %2x\n",
106 (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
107 len += snprintf(buf + len, sizeof(buf) - len,
108 "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
109 (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
110 len += snprintf(buf + len, sizeof(buf) - len,
111 "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
112 (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
113 len += snprintf(buf + len, sizeof(buf) - len,
114 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
115 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
116
117 len += snprintf(buf + len, sizeof(buf) - len, "pcu observe: 0x%x \n",
118 REG_READ(ah, AR_OBS_BUS_1));
119 len += snprintf(buf + len, sizeof(buf) - len,
120 "AR_CR: 0x%x \n", REG_READ(ah, AR_CR));
121
122 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
123}
124
125static const struct file_operations fops_dma = {
126 .read = read_file_dma,
127 .open = ath9k_debugfs_open,
128 .owner = THIS_MODULE
129};
130
131
132void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
133{
134 if (status)
135 sc->sc_debug.stats.istats.total++;
136 if (status & ATH9K_INT_RX)
137 sc->sc_debug.stats.istats.rxok++;
138 if (status & ATH9K_INT_RXEOL)
139 sc->sc_debug.stats.istats.rxeol++;
140 if (status & ATH9K_INT_RXORN)
141 sc->sc_debug.stats.istats.rxorn++;
142 if (status & ATH9K_INT_TX)
143 sc->sc_debug.stats.istats.txok++;
144 if (status & ATH9K_INT_TXURN)
145 sc->sc_debug.stats.istats.txurn++;
146 if (status & ATH9K_INT_MIB)
147 sc->sc_debug.stats.istats.mib++;
148 if (status & ATH9K_INT_RXPHY)
149 sc->sc_debug.stats.istats.rxphyerr++;
150 if (status & ATH9K_INT_RXKCM)
151 sc->sc_debug.stats.istats.rx_keycache_miss++;
152 if (status & ATH9K_INT_SWBA)
153 sc->sc_debug.stats.istats.swba++;
154 if (status & ATH9K_INT_BMISS)
155 sc->sc_debug.stats.istats.bmiss++;
156 if (status & ATH9K_INT_BNR)
157 sc->sc_debug.stats.istats.bnr++;
158 if (status & ATH9K_INT_CST)
159 sc->sc_debug.stats.istats.cst++;
160 if (status & ATH9K_INT_GTT)
161 sc->sc_debug.stats.istats.gtt++;
162 if (status & ATH9K_INT_TIM)
163 sc->sc_debug.stats.istats.tim++;
164 if (status & ATH9K_INT_CABEND)
165 sc->sc_debug.stats.istats.cabend++;
166 if (status & ATH9K_INT_DTIMSYNC)
167 sc->sc_debug.stats.istats.dtimsync++;
168 if (status & ATH9K_INT_DTIM)
169 sc->sc_debug.stats.istats.dtim++;
170}
171
172static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
173 size_t count, loff_t *ppos)
174{
175 struct ath_softc *sc = file->private_data;
176 char buf[512];
177 unsigned int len = 0;
178
179 len += snprintf(buf + len, sizeof(buf) - len,
180 "%8s: %10u\n", "RX", sc->sc_debug.stats.istats.rxok);
181 len += snprintf(buf + len, sizeof(buf) - len,
182 "%8s: %10u\n", "RXEOL", sc->sc_debug.stats.istats.rxeol);
183 len += snprintf(buf + len, sizeof(buf) - len,
184 "%8s: %10u\n", "RXORN", sc->sc_debug.stats.istats.rxorn);
185 len += snprintf(buf + len, sizeof(buf) - len,
186 "%8s: %10u\n", "TX", sc->sc_debug.stats.istats.txok);
187 len += snprintf(buf + len, sizeof(buf) - len,
188 "%8s: %10u\n", "TXURN", sc->sc_debug.stats.istats.txurn);
189 len += snprintf(buf + len, sizeof(buf) - len,
190 "%8s: %10u\n", "MIB", sc->sc_debug.stats.istats.mib);
191 len += snprintf(buf + len, sizeof(buf) - len,
192 "%8s: %10u\n", "RXPHY", sc->sc_debug.stats.istats.rxphyerr);
193 len += snprintf(buf + len, sizeof(buf) - len,
194 "%8s: %10u\n", "RXKCM", sc->sc_debug.stats.istats.rx_keycache_miss);
195 len += snprintf(buf + len, sizeof(buf) - len,
196 "%8s: %10u\n", "SWBA", sc->sc_debug.stats.istats.swba);
197 len += snprintf(buf + len, sizeof(buf) - len,
198 "%8s: %10u\n", "BMISS", sc->sc_debug.stats.istats.bmiss);
199 len += snprintf(buf + len, sizeof(buf) - len,
200 "%8s: %10u\n", "BNR", sc->sc_debug.stats.istats.bnr);
201 len += snprintf(buf + len, sizeof(buf) - len,
202 "%8s: %10u\n", "CST", sc->sc_debug.stats.istats.cst);
203 len += snprintf(buf + len, sizeof(buf) - len,
204 "%8s: %10u\n", "GTT", sc->sc_debug.stats.istats.gtt);
205 len += snprintf(buf + len, sizeof(buf) - len,
206 "%8s: %10u\n", "TIM", sc->sc_debug.stats.istats.tim);
207 len += snprintf(buf + len, sizeof(buf) - len,
208 "%8s: %10u\n", "CABEND", sc->sc_debug.stats.istats.cabend);
209 len += snprintf(buf + len, sizeof(buf) - len,
210 "%8s: %10u\n", "DTIMSYNC", sc->sc_debug.stats.istats.dtimsync);
211 len += snprintf(buf + len, sizeof(buf) - len,
212 "%8s: %10u\n", "DTIM", sc->sc_debug.stats.istats.dtim);
213 len += snprintf(buf + len, sizeof(buf) - len,
214 "%8s: %10u\n", "TOTAL", sc->sc_debug.stats.istats.total);
215
216 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
217}
218
219static const struct file_operations fops_interrupt = {
220 .read = read_file_interrupt,
221 .open = ath9k_debugfs_open,
222 .owner = THIS_MODULE
223};
224
225int ath9k_init_debug(struct ath_softc *sc)
226{
227 sc->sc_debug.debug_mask = ath9k_debug;
228
229 sc->sc_debug.debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
230 if (!sc->sc_debug.debugfs_root)
231 goto err;
232
233 sc->sc_debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
234 sc->sc_debug.debugfs_root);
235 if (!sc->sc_debug.debugfs_phy)
236 goto err;
237
238 sc->sc_debug.debugfs_dma = debugfs_create_file("dma", S_IRUGO,
239 sc->sc_debug.debugfs_phy, sc, &fops_dma);
240 if (!sc->sc_debug.debugfs_dma)
241 goto err;
242
243 sc->sc_debug.debugfs_interrupt = debugfs_create_file("interrupt",
244 S_IRUGO,
245 sc->sc_debug.debugfs_phy,
246 sc, &fops_interrupt);
247 if (!sc->sc_debug.debugfs_interrupt)
248 goto err;
249
250 return 0;
251err:
252 ath9k_exit_debug(sc);
253 return -ENOMEM;
254}
255
256void ath9k_exit_debug(struct ath_softc *sc)
257{
258 debugfs_remove(sc->sc_debug.debugfs_interrupt);
259 debugfs_remove(sc->sc_debug.debugfs_dma);
260 debugfs_remove(sc->sc_debug.debugfs_phy);
261 debugfs_remove(sc->sc_debug.debugfs_root);
262}
diff --git a/drivers/net/wireless/ath9k/eeprom.c b/drivers/net/wireless/ath9k/eeprom.c
new file mode 100644
index 000000000000..acd6c5374d44
--- /dev/null
+++ b/drivers/net/wireless/ath9k/eeprom.c
@@ -0,0 +1,2824 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21
22static void ath9k_hw_analog_shift_rmw(struct ath_hal *ah,
23 u32 reg, u32 mask,
24 u32 shift, u32 val)
25{
26 u32 regVal;
27
28 regVal = REG_READ(ah, reg) & ~mask;
29 regVal |= (val << shift) & mask;
30
31 REG_WRITE(ah, reg, regVal);
32
33 if (ah->ah_config.analog_shiftreg)
34 udelay(100);
35
36 return;
37}
38
39static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
40{
41
42 if (fbin == AR5416_BCHAN_UNUSED)
43 return fbin;
44
45 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
46}
47
48static inline int16_t ath9k_hw_interpolate(u16 target,
49 u16 srcLeft, u16 srcRight,
50 int16_t targetLeft,
51 int16_t targetRight)
52{
53 int16_t rv;
54
55 if (srcRight == srcLeft) {
56 rv = targetLeft;
57 } else {
58 rv = (int16_t) (((target - srcLeft) * targetRight +
59 (srcRight - target) * targetLeft) /
60 (srcRight - srcLeft));
61 }
62 return rv;
63}
64
65static inline bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList,
66 u16 listSize, u16 *indexL,
67 u16 *indexR)
68{
69 u16 i;
70
71 if (target <= pList[0]) {
72 *indexL = *indexR = 0;
73 return true;
74 }
75 if (target >= pList[listSize - 1]) {
76 *indexL = *indexR = (u16) (listSize - 1);
77 return true;
78 }
79
80 for (i = 0; i < listSize - 1; i++) {
81 if (pList[i] == target) {
82 *indexL = *indexR = i;
83 return true;
84 }
85 if (target < pList[i + 1]) {
86 *indexL = i;
87 *indexR = (u16) (i + 1);
88 return false;
89 }
90 }
91 return false;
92}
93
94static bool ath9k_hw_eeprom_read(struct ath_hal *ah, u32 off, u16 *data)
95{
96 (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
97
98 if (!ath9k_hw_wait(ah,
99 AR_EEPROM_STATUS_DATA,
100 AR_EEPROM_STATUS_DATA_BUSY |
101 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0)) {
102 return false;
103 }
104
105 *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
106 AR_EEPROM_STATUS_DATA_VAL);
107
108 return true;
109}
110
111static int ath9k_hw_flash_map(struct ath_hal *ah)
112{
113 struct ath_hal_5416 *ahp = AH5416(ah);
114
115 ahp->ah_cal_mem = ioremap(AR5416_EEPROM_START_ADDR, AR5416_EEPROM_MAX);
116
117 if (!ahp->ah_cal_mem) {
118 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
119 "cannot remap eeprom region \n");
120 return -EIO;
121 }
122
123 return 0;
124}
125
126static bool ath9k_hw_flash_read(struct ath_hal *ah, u32 off, u16 *data)
127{
128 struct ath_hal_5416 *ahp = AH5416(ah);
129
130 *data = ioread16(ahp->ah_cal_mem + off);
131
132 return true;
133}
134
135static inline bool ath9k_hw_nvram_read(struct ath_hal *ah, u32 off, u16 *data)
136{
137 if (ath9k_hw_use_flash(ah))
138 return ath9k_hw_flash_read(ah, off, data);
139 else
140 return ath9k_hw_eeprom_read(ah, off, data);
141}
142
143static bool ath9k_hw_fill_4k_eeprom(struct ath_hal *ah)
144{
145#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
146 struct ath_hal_5416 *ahp = AH5416(ah);
147 struct ar5416_eeprom_4k *eep = &ahp->ah_eeprom.map4k;
148 u16 *eep_data;
149 int addr, eep_start_loc = 0;
150
151 eep_start_loc = 64;
152
153 if (!ath9k_hw_use_flash(ah)) {
154 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
155 "Reading from EEPROM, not flash\n");
156 }
157
158 eep_data = (u16 *)eep;
159
160 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
161 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) {
162 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
163 "Unable to read eeprom region \n");
164 return false;
165 }
166 eep_data++;
167 }
168 return true;
169#undef SIZE_EEPROM_4K
170}
171
172static bool ath9k_hw_fill_def_eeprom(struct ath_hal *ah)
173{
174#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
175 struct ath_hal_5416 *ahp = AH5416(ah);
176 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def;
177 u16 *eep_data;
178 int addr, ar5416_eep_start_loc = 0x100;
179
180 eep_data = (u16 *)eep;
181
182 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
183 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
184 eep_data)) {
185 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
186 "Unable to read eeprom region\n");
187 return false;
188 }
189 eep_data++;
190 }
191 return true;
192#undef SIZE_EEPROM_DEF
193}
194
195static bool (*ath9k_fill_eeprom[]) (struct ath_hal *) = {
196 ath9k_hw_fill_def_eeprom,
197 ath9k_hw_fill_4k_eeprom
198};
199
200static inline bool ath9k_hw_fill_eeprom(struct ath_hal *ah)
201{
202 struct ath_hal_5416 *ahp = AH5416(ah);
203
204 return ath9k_fill_eeprom[ahp->ah_eep_map](ah);
205}
206
207static int ath9k_hw_check_def_eeprom(struct ath_hal *ah)
208{
209 struct ath_hal_5416 *ahp = AH5416(ah);
210 struct ar5416_eeprom_def *eep =
211 (struct ar5416_eeprom_def *) &ahp->ah_eeprom.def;
212 u16 *eepdata, temp, magic, magic2;
213 u32 sum = 0, el;
214 bool need_swap = false;
215 int i, addr, size;
216
217 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
218 &magic)) {
219 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
220 "Reading Magic # failed\n");
221 return false;
222 }
223
224 if (!ath9k_hw_use_flash(ah)) {
225
226 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
227 "Read Magic = 0x%04X\n", magic);
228
229 if (magic != AR5416_EEPROM_MAGIC) {
230 magic2 = swab16(magic);
231
232 if (magic2 == AR5416_EEPROM_MAGIC) {
233 size = sizeof(struct ar5416_eeprom_def);
234 need_swap = true;
235 eepdata = (u16 *) (&ahp->ah_eeprom);
236
237 for (addr = 0; addr < size / sizeof(u16); addr++) {
238 temp = swab16(*eepdata);
239 *eepdata = temp;
240 eepdata++;
241
242 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
243 "0x%04X ", *eepdata);
244
245 if (((addr + 1) % 6) == 0)
246 DPRINTF(ah->ah_sc,
247 ATH_DBG_EEPROM, "\n");
248 }
249 } else {
250 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
251 "Invalid EEPROM Magic. "
252 "endianness mismatch.\n");
253 return -EINVAL;
254 }
255 }
256 }
257
258 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n",
259 need_swap ? "True" : "False");
260
261 if (need_swap)
262 el = swab16(ahp->ah_eeprom.def.baseEepHeader.length);
263 else
264 el = ahp->ah_eeprom.def.baseEepHeader.length;
265
266 if (el > sizeof(struct ar5416_eeprom_def))
267 el = sizeof(struct ar5416_eeprom_def) / sizeof(u16);
268 else
269 el = el / sizeof(u16);
270
271 eepdata = (u16 *)(&ahp->ah_eeprom);
272
273 for (i = 0; i < el; i++)
274 sum ^= *eepdata++;
275
276 if (need_swap) {
277 u32 integer, j;
278 u16 word;
279
280 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
281 "EEPROM Endianness is not native.. Changing \n");
282
283 word = swab16(eep->baseEepHeader.length);
284 eep->baseEepHeader.length = word;
285
286 word = swab16(eep->baseEepHeader.checksum);
287 eep->baseEepHeader.checksum = word;
288
289 word = swab16(eep->baseEepHeader.version);
290 eep->baseEepHeader.version = word;
291
292 word = swab16(eep->baseEepHeader.regDmn[0]);
293 eep->baseEepHeader.regDmn[0] = word;
294
295 word = swab16(eep->baseEepHeader.regDmn[1]);
296 eep->baseEepHeader.regDmn[1] = word;
297
298 word = swab16(eep->baseEepHeader.rfSilent);
299 eep->baseEepHeader.rfSilent = word;
300
301 word = swab16(eep->baseEepHeader.blueToothOptions);
302 eep->baseEepHeader.blueToothOptions = word;
303
304 word = swab16(eep->baseEepHeader.deviceCap);
305 eep->baseEepHeader.deviceCap = word;
306
307 for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) {
308 struct modal_eep_header *pModal =
309 &eep->modalHeader[j];
310 integer = swab32(pModal->antCtrlCommon);
311 pModal->antCtrlCommon = integer;
312
313 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
314 integer = swab32(pModal->antCtrlChain[i]);
315 pModal->antCtrlChain[i] = integer;
316 }
317
318 for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) {
319 word = swab16(pModal->spurChans[i].spurChan);
320 pModal->spurChans[i].spurChan = word;
321 }
322 }
323 }
324
325 if (sum != 0xffff || ar5416_get_eep_ver(ahp) != AR5416_EEP_VER ||
326 ar5416_get_eep_rev(ahp) < AR5416_EEP_NO_BACK_VER) {
327 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
328 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
329 sum, ar5416_get_eep_ver(ahp));
330 return -EINVAL;
331 }
332
333 return 0;
334}
335
336static int ath9k_hw_check_4k_eeprom(struct ath_hal *ah)
337{
338#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
339 struct ath_hal_5416 *ahp = AH5416(ah);
340 struct ar5416_eeprom_4k *eep =
341 (struct ar5416_eeprom_4k *) &ahp->ah_eeprom.map4k;
342 u16 *eepdata, temp, magic, magic2;
343 u32 sum = 0, el;
344 bool need_swap = false;
345 int i, addr;
346
347
348 if (!ath9k_hw_use_flash(ah)) {
349
350 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
351 &magic)) {
352 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
353 "Reading Magic # failed\n");
354 return false;
355 }
356
357 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
358 "Read Magic = 0x%04X\n", magic);
359
360 if (magic != AR5416_EEPROM_MAGIC) {
361 magic2 = swab16(magic);
362
363 if (magic2 == AR5416_EEPROM_MAGIC) {
364 need_swap = true;
365 eepdata = (u16 *) (&ahp->ah_eeprom);
366
367 for (addr = 0; addr < EEPROM_4K_SIZE; addr++) {
368 temp = swab16(*eepdata);
369 *eepdata = temp;
370 eepdata++;
371
372 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
373 "0x%04X ", *eepdata);
374
375 if (((addr + 1) % 6) == 0)
376 DPRINTF(ah->ah_sc,
377 ATH_DBG_EEPROM, "\n");
378 }
379 } else {
380 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
381 "Invalid EEPROM Magic. "
382 "endianness mismatch.\n");
383 return -EINVAL;
384 }
385 }
386 }
387
388 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n",
389 need_swap ? "True" : "False");
390
391 if (need_swap)
392 el = swab16(ahp->ah_eeprom.map4k.baseEepHeader.length);
393 else
394 el = ahp->ah_eeprom.map4k.baseEepHeader.length;
395
396 if (el > sizeof(struct ar5416_eeprom_def))
397 el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16);
398 else
399 el = el / sizeof(u16);
400
401 eepdata = (u16 *)(&ahp->ah_eeprom);
402
403 for (i = 0; i < el; i++)
404 sum ^= *eepdata++;
405
406 if (need_swap) {
407 u32 integer;
408 u16 word;
409
410 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
411 "EEPROM Endianness is not native.. Changing \n");
412
413 word = swab16(eep->baseEepHeader.length);
414 eep->baseEepHeader.length = word;
415
416 word = swab16(eep->baseEepHeader.checksum);
417 eep->baseEepHeader.checksum = word;
418
419 word = swab16(eep->baseEepHeader.version);
420 eep->baseEepHeader.version = word;
421
422 word = swab16(eep->baseEepHeader.regDmn[0]);
423 eep->baseEepHeader.regDmn[0] = word;
424
425 word = swab16(eep->baseEepHeader.regDmn[1]);
426 eep->baseEepHeader.regDmn[1] = word;
427
428 word = swab16(eep->baseEepHeader.rfSilent);
429 eep->baseEepHeader.rfSilent = word;
430
431 word = swab16(eep->baseEepHeader.blueToothOptions);
432 eep->baseEepHeader.blueToothOptions = word;
433
434 word = swab16(eep->baseEepHeader.deviceCap);
435 eep->baseEepHeader.deviceCap = word;
436
437 integer = swab32(eep->modalHeader.antCtrlCommon);
438 eep->modalHeader.antCtrlCommon = integer;
439
440 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
441 integer = swab32(eep->modalHeader.antCtrlChain[i]);
442 eep->modalHeader.antCtrlChain[i] = integer;
443 }
444
445 for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) {
446 word = swab16(eep->modalHeader.spurChans[i].spurChan);
447 eep->modalHeader.spurChans[i].spurChan = word;
448 }
449 }
450
451 if (sum != 0xffff || ar5416_get_eep4k_ver(ahp) != AR5416_EEP_VER ||
452 ar5416_get_eep4k_rev(ahp) < AR5416_EEP_NO_BACK_VER) {
453 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
454 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
455 sum, ar5416_get_eep4k_ver(ahp));
456 return -EINVAL;
457 }
458
459 return 0;
460#undef EEPROM_4K_SIZE
461}
462
463static int (*ath9k_check_eeprom[]) (struct ath_hal *) = {
464 ath9k_hw_check_def_eeprom,
465 ath9k_hw_check_4k_eeprom
466};
467
468static inline int ath9k_hw_check_eeprom(struct ath_hal *ah)
469{
470 struct ath_hal_5416 *ahp = AH5416(ah);
471
472 return ath9k_check_eeprom[ahp->ah_eep_map](ah);
473}
474
475static inline bool ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
476 u8 *pVpdList, u16 numIntercepts,
477 u8 *pRetVpdList)
478{
479 u16 i, k;
480 u8 currPwr = pwrMin;
481 u16 idxL = 0, idxR = 0;
482
483 for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) {
484 ath9k_hw_get_lower_upper_index(currPwr, pPwrList,
485 numIntercepts, &(idxL),
486 &(idxR));
487 if (idxR < 1)
488 idxR = 1;
489 if (idxL == numIntercepts - 1)
490 idxL = (u16) (numIntercepts - 2);
491 if (pPwrList[idxL] == pPwrList[idxR])
492 k = pVpdList[idxL];
493 else
494 k = (u16)(((currPwr - pPwrList[idxL]) * pVpdList[idxR] +
495 (pPwrList[idxR] - currPwr) * pVpdList[idxL]) /
496 (pPwrList[idxR] - pPwrList[idxL]));
497 pRetVpdList[i] = (u8) k;
498 currPwr += 2;
499 }
500
501 return true;
502}
503
504static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hal *ah,
505 struct ath9k_channel *chan,
506 struct cal_data_per_freq_4k *pRawDataSet,
507 u8 *bChans, u16 availPiers,
508 u16 tPdGainOverlap, int16_t *pMinCalPower,
509 u16 *pPdGainBoundaries, u8 *pPDADCValues,
510 u16 numXpdGains)
511{
512#define TMP_VAL_VPD_TABLE \
513 ((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep));
514 int i, j, k;
515 int16_t ss;
516 u16 idxL = 0, idxR = 0, numPiers;
517 static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS]
518 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
519 static u8 vpdTableR[AR5416_EEP4K_NUM_PD_GAINS]
520 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
521 static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS]
522 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
523
524 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
525 u8 minPwrT4[AR5416_EEP4K_NUM_PD_GAINS];
526 u8 maxPwrT4[AR5416_EEP4K_NUM_PD_GAINS];
527 int16_t vpdStep;
528 int16_t tmpVal;
529 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
530 bool match;
531 int16_t minDelta = 0;
532 struct chan_centers centers;
533#define PD_GAIN_BOUNDARY_DEFAULT 58;
534
535 ath9k_hw_get_channel_centers(ah, chan, &centers);
536
537 for (numPiers = 0; numPiers < availPiers; numPiers++) {
538 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
539 break;
540 }
541
542 match = ath9k_hw_get_lower_upper_index(
543 (u8)FREQ2FBIN(centers.synth_center,
544 IS_CHAN_2GHZ(chan)), bChans, numPiers,
545 &idxL, &idxR);
546
547 if (match) {
548 for (i = 0; i < numXpdGains; i++) {
549 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
550 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
551 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
552 pRawDataSet[idxL].pwrPdg[i],
553 pRawDataSet[idxL].vpdPdg[i],
554 AR5416_EEP4K_PD_GAIN_ICEPTS,
555 vpdTableI[i]);
556 }
557 } else {
558 for (i = 0; i < numXpdGains; i++) {
559 pVpdL = pRawDataSet[idxL].vpdPdg[i];
560 pPwrL = pRawDataSet[idxL].pwrPdg[i];
561 pVpdR = pRawDataSet[idxR].vpdPdg[i];
562 pPwrR = pRawDataSet[idxR].pwrPdg[i];
563
564 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
565
566 maxPwrT4[i] =
567 min(pPwrL[AR5416_EEP4K_PD_GAIN_ICEPTS - 1],
568 pPwrR[AR5416_EEP4K_PD_GAIN_ICEPTS - 1]);
569
570
571 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
572 pPwrL, pVpdL,
573 AR5416_EEP4K_PD_GAIN_ICEPTS,
574 vpdTableL[i]);
575 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
576 pPwrR, pVpdR,
577 AR5416_EEP4K_PD_GAIN_ICEPTS,
578 vpdTableR[i]);
579
580 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
581 vpdTableI[i][j] =
582 (u8)(ath9k_hw_interpolate((u16)
583 FREQ2FBIN(centers.
584 synth_center,
585 IS_CHAN_2GHZ
586 (chan)),
587 bChans[idxL], bChans[idxR],
588 vpdTableL[i][j], vpdTableR[i][j]));
589 }
590 }
591 }
592
593 *pMinCalPower = (int16_t)(minPwrT4[0] / 2);
594
595 k = 0;
596
597 for (i = 0; i < numXpdGains; i++) {
598 if (i == (numXpdGains - 1))
599 pPdGainBoundaries[i] =
600 (u16)(maxPwrT4[i] / 2);
601 else
602 pPdGainBoundaries[i] =
603 (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
604
605 pPdGainBoundaries[i] =
606 min((u16)AR5416_MAX_RATE_POWER, pPdGainBoundaries[i]);
607
608 if ((i == 0) && !AR_SREV_5416_V20_OR_LATER(ah)) {
609 minDelta = pPdGainBoundaries[0] - 23;
610 pPdGainBoundaries[0] = 23;
611 } else {
612 minDelta = 0;
613 }
614
615 if (i == 0) {
616 if (AR_SREV_9280_10_OR_LATER(ah))
617 ss = (int16_t)(0 - (minPwrT4[i] / 2));
618 else
619 ss = 0;
620 } else {
621 ss = (int16_t)((pPdGainBoundaries[i - 1] -
622 (minPwrT4[i] / 2)) -
623 tPdGainOverlap + 1 + minDelta);
624 }
625 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
626 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
627
628 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
629 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
630 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
631 ss++;
632 }
633
634 sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
635 tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
636 (minPwrT4[i] / 2));
637 maxIndex = (tgtIndex < sizeCurrVpdTable) ?
638 tgtIndex : sizeCurrVpdTable;
639
640 while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1)))
641 pPDADCValues[k++] = vpdTableI[i][ss++];
642
643 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
644 vpdTableI[i][sizeCurrVpdTable - 2]);
645 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
646
647 if (tgtIndex > maxIndex) {
648 while ((ss <= tgtIndex) &&
649 (k < (AR5416_NUM_PDADC_VALUES - 1))) {
650 tmpVal = (int16_t) TMP_VAL_VPD_TABLE;
651 pPDADCValues[k++] = (u8)((tmpVal > 255) ?
652 255 : tmpVal);
653 ss++;
654 }
655 }
656 }
657
658 while (i < AR5416_EEP4K_PD_GAINS_IN_MASK) {
659 pPdGainBoundaries[i] = PD_GAIN_BOUNDARY_DEFAULT;
660 i++;
661 }
662
663 while (k < AR5416_NUM_PDADC_VALUES) {
664 pPDADCValues[k] = pPDADCValues[k - 1];
665 k++;
666 }
667
668 return;
669#undef TMP_VAL_VPD_TABLE
670}
671
672static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hal *ah,
673 struct ath9k_channel *chan,
674 struct cal_data_per_freq *pRawDataSet,
675 u8 *bChans, u16 availPiers,
676 u16 tPdGainOverlap, int16_t *pMinCalPower,
677 u16 *pPdGainBoundaries, u8 *pPDADCValues,
678 u16 numXpdGains)
679{
680 int i, j, k;
681 int16_t ss;
682 u16 idxL = 0, idxR = 0, numPiers;
683 static u8 vpdTableL[AR5416_NUM_PD_GAINS]
684 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
685 static u8 vpdTableR[AR5416_NUM_PD_GAINS]
686 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
687 static u8 vpdTableI[AR5416_NUM_PD_GAINS]
688 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
689
690 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
691 u8 minPwrT4[AR5416_NUM_PD_GAINS];
692 u8 maxPwrT4[AR5416_NUM_PD_GAINS];
693 int16_t vpdStep;
694 int16_t tmpVal;
695 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
696 bool match;
697 int16_t minDelta = 0;
698 struct chan_centers centers;
699
700 ath9k_hw_get_channel_centers(ah, chan, &centers);
701
702 for (numPiers = 0; numPiers < availPiers; numPiers++) {
703 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
704 break;
705 }
706
707 match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center,
708 IS_CHAN_2GHZ(chan)),
709 bChans, numPiers, &idxL, &idxR);
710
711 if (match) {
712 for (i = 0; i < numXpdGains; i++) {
713 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
714 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
715 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
716 pRawDataSet[idxL].pwrPdg[i],
717 pRawDataSet[idxL].vpdPdg[i],
718 AR5416_PD_GAIN_ICEPTS,
719 vpdTableI[i]);
720 }
721 } else {
722 for (i = 0; i < numXpdGains; i++) {
723 pVpdL = pRawDataSet[idxL].vpdPdg[i];
724 pPwrL = pRawDataSet[idxL].pwrPdg[i];
725 pVpdR = pRawDataSet[idxR].vpdPdg[i];
726 pPwrR = pRawDataSet[idxR].pwrPdg[i];
727
728 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
729
730 maxPwrT4[i] =
731 min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1],
732 pPwrR[AR5416_PD_GAIN_ICEPTS - 1]);
733
734
735 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
736 pPwrL, pVpdL,
737 AR5416_PD_GAIN_ICEPTS,
738 vpdTableL[i]);
739 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
740 pPwrR, pVpdR,
741 AR5416_PD_GAIN_ICEPTS,
742 vpdTableR[i]);
743
744 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
745 vpdTableI[i][j] =
746 (u8)(ath9k_hw_interpolate((u16)
747 FREQ2FBIN(centers.
748 synth_center,
749 IS_CHAN_2GHZ
750 (chan)),
751 bChans[idxL], bChans[idxR],
752 vpdTableL[i][j], vpdTableR[i][j]));
753 }
754 }
755 }
756
757 *pMinCalPower = (int16_t)(minPwrT4[0] / 2);
758
759 k = 0;
760
761 for (i = 0; i < numXpdGains; i++) {
762 if (i == (numXpdGains - 1))
763 pPdGainBoundaries[i] =
764 (u16)(maxPwrT4[i] / 2);
765 else
766 pPdGainBoundaries[i] =
767 (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
768
769 pPdGainBoundaries[i] =
770 min((u16)AR5416_MAX_RATE_POWER, pPdGainBoundaries[i]);
771
772 if ((i == 0) && !AR_SREV_5416_V20_OR_LATER(ah)) {
773 minDelta = pPdGainBoundaries[0] - 23;
774 pPdGainBoundaries[0] = 23;
775 } else {
776 minDelta = 0;
777 }
778
779 if (i == 0) {
780 if (AR_SREV_9280_10_OR_LATER(ah))
781 ss = (int16_t)(0 - (minPwrT4[i] / 2));
782 else
783 ss = 0;
784 } else {
785 ss = (int16_t)((pPdGainBoundaries[i - 1] -
786 (minPwrT4[i] / 2)) -
787 tPdGainOverlap + 1 + minDelta);
788 }
789 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
790 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
791
792 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
793 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
794 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
795 ss++;
796 }
797
798 sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
799 tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
800 (minPwrT4[i] / 2));
801 maxIndex = (tgtIndex < sizeCurrVpdTable) ?
802 tgtIndex : sizeCurrVpdTable;
803
804 while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
805 pPDADCValues[k++] = vpdTableI[i][ss++];
806 }
807
808 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
809 vpdTableI[i][sizeCurrVpdTable - 2]);
810 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
811
812 if (tgtIndex > maxIndex) {
813 while ((ss <= tgtIndex) &&
814 (k < (AR5416_NUM_PDADC_VALUES - 1))) {
815 tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
816 (ss - maxIndex + 1) * vpdStep));
817 pPDADCValues[k++] = (u8)((tmpVal > 255) ?
818 255 : tmpVal);
819 ss++;
820 }
821 }
822 }
823
824 while (i < AR5416_PD_GAINS_IN_MASK) {
825 pPdGainBoundaries[i] = pPdGainBoundaries[i - 1];
826 i++;
827 }
828
829 while (k < AR5416_NUM_PDADC_VALUES) {
830 pPDADCValues[k] = pPDADCValues[k - 1];
831 k++;
832 }
833
834 return;
835}
836
837static void ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
838 struct ath9k_channel *chan,
839 struct cal_target_power_leg *powInfo,
840 u16 numChannels,
841 struct cal_target_power_leg *pNewPower,
842 u16 numRates, bool isExtTarget)
843{
844 struct chan_centers centers;
845 u16 clo, chi;
846 int i;
847 int matchIndex = -1, lowIndex = -1;
848 u16 freq;
849
850 ath9k_hw_get_channel_centers(ah, chan, &centers);
851 freq = (isExtTarget) ? centers.ext_center : centers.ctl_center;
852
853 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel,
854 IS_CHAN_2GHZ(chan))) {
855 matchIndex = 0;
856 } else {
857 for (i = 0; (i < numChannels) &&
858 (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
859 if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel,
860 IS_CHAN_2GHZ(chan))) {
861 matchIndex = i;
862 break;
863 } else if ((freq < ath9k_hw_fbin2freq(powInfo[i].bChannel,
864 IS_CHAN_2GHZ(chan))) &&
865 (freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel,
866 IS_CHAN_2GHZ(chan)))) {
867 lowIndex = i - 1;
868 break;
869 }
870 }
871 if ((matchIndex == -1) && (lowIndex == -1))
872 matchIndex = i - 1;
873 }
874
875 if (matchIndex != -1) {
876 *pNewPower = powInfo[matchIndex];
877 } else {
878 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
879 IS_CHAN_2GHZ(chan));
880 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
881 IS_CHAN_2GHZ(chan));
882
883 for (i = 0; i < numRates; i++) {
884 pNewPower->tPow2x[i] =
885 (u8)ath9k_hw_interpolate(freq, clo, chi,
886 powInfo[lowIndex].tPow2x[i],
887 powInfo[lowIndex + 1].tPow2x[i]);
888 }
889 }
890}
891
892static void ath9k_hw_get_target_powers(struct ath_hal *ah,
893 struct ath9k_channel *chan,
894 struct cal_target_power_ht *powInfo,
895 u16 numChannels,
896 struct cal_target_power_ht *pNewPower,
897 u16 numRates, bool isHt40Target)
898{
899 struct chan_centers centers;
900 u16 clo, chi;
901 int i;
902 int matchIndex = -1, lowIndex = -1;
903 u16 freq;
904
905 ath9k_hw_get_channel_centers(ah, chan, &centers);
906 freq = isHt40Target ? centers.synth_center : centers.ctl_center;
907
908 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) {
909 matchIndex = 0;
910 } else {
911 for (i = 0; (i < numChannels) &&
912 (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
913 if (freq == ath9k_hw_fbin2freq(powInfo[i].bChannel,
914 IS_CHAN_2GHZ(chan))) {
915 matchIndex = i;
916 break;
917 } else
918 if ((freq < ath9k_hw_fbin2freq(powInfo[i].bChannel,
919 IS_CHAN_2GHZ(chan))) &&
920 (freq > ath9k_hw_fbin2freq(powInfo[i - 1].bChannel,
921 IS_CHAN_2GHZ(chan)))) {
922 lowIndex = i - 1;
923 break;
924 }
925 }
926 if ((matchIndex == -1) && (lowIndex == -1))
927 matchIndex = i - 1;
928 }
929
930 if (matchIndex != -1) {
931 *pNewPower = powInfo[matchIndex];
932 } else {
933 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel,
934 IS_CHAN_2GHZ(chan));
935 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel,
936 IS_CHAN_2GHZ(chan));
937
938 for (i = 0; i < numRates; i++) {
939 pNewPower->tPow2x[i] = (u8)ath9k_hw_interpolate(freq,
940 clo, chi,
941 powInfo[lowIndex].tPow2x[i],
942 powInfo[lowIndex + 1].tPow2x[i]);
943 }
944 }
945}
946
947static u16 ath9k_hw_get_max_edge_power(u16 freq,
948 struct cal_ctl_edges *pRdEdgesPower,
949 bool is2GHz, int num_band_edges)
950{
951 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
952 int i;
953
954 for (i = 0; (i < num_band_edges) &&
955 (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
956 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) {
957 twiceMaxEdgePower = pRdEdgesPower[i].tPower;
958 break;
959 } else if ((i > 0) &&
960 (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel,
961 is2GHz))) {
962 if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel,
963 is2GHz) < freq &&
964 pRdEdgesPower[i - 1].flag) {
965 twiceMaxEdgePower =
966 pRdEdgesPower[i - 1].tPower;
967 }
968 break;
969 }
970 }
971
972 return twiceMaxEdgePower;
973}
974
975static bool ath9k_hw_set_def_power_cal_table(struct ath_hal *ah,
976 struct ath9k_channel *chan,
977 int16_t *pTxPowerIndexOffset)
978{
979 struct ath_hal_5416 *ahp = AH5416(ah);
980 struct ar5416_eeprom_def *pEepData = &ahp->ah_eeprom.def;
981 struct cal_data_per_freq *pRawDataset;
982 u8 *pCalBChans = NULL;
983 u16 pdGainOverlap_t2;
984 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
985 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
986 u16 numPiers, i, j;
987 int16_t tMinCalPower;
988 u16 numXpdGain, xpdMask;
989 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
990 u32 reg32, regOffset, regChainOffset;
991 int16_t modalIdx;
992
993 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
994 xpdMask = pEepData->modalHeader[modalIdx].xpdGain;
995
996 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
997 AR5416_EEP_MINOR_VER_2) {
998 pdGainOverlap_t2 =
999 pEepData->modalHeader[modalIdx].pdGainOverlap;
1000 } else {
1001 pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
1002 AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
1003 }
1004
1005 if (IS_CHAN_2GHZ(chan)) {
1006 pCalBChans = pEepData->calFreqPier2G;
1007 numPiers = AR5416_NUM_2G_CAL_PIERS;
1008 } else {
1009 pCalBChans = pEepData->calFreqPier5G;
1010 numPiers = AR5416_NUM_5G_CAL_PIERS;
1011 }
1012
1013 numXpdGain = 0;
1014
1015 for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
1016 if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
1017 if (numXpdGain >= AR5416_NUM_PD_GAINS)
1018 break;
1019 xpdGainValues[numXpdGain] =
1020 (u16)(AR5416_PD_GAINS_IN_MASK - i);
1021 numXpdGain++;
1022 }
1023 }
1024
1025 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
1026 (numXpdGain - 1) & 0x3);
1027 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
1028 xpdGainValues[0]);
1029 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
1030 xpdGainValues[1]);
1031 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
1032 xpdGainValues[2]);
1033
1034 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
1035 if (AR_SREV_5416_V20_OR_LATER(ah) &&
1036 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5) &&
1037 (i != 0)) {
1038 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
1039 } else
1040 regChainOffset = i * 0x1000;
1041
1042 if (pEepData->baseEepHeader.txMask & (1 << i)) {
1043 if (IS_CHAN_2GHZ(chan))
1044 pRawDataset = pEepData->calPierData2G[i];
1045 else
1046 pRawDataset = pEepData->calPierData5G[i];
1047
1048 ath9k_hw_get_def_gain_boundaries_pdadcs(ah, chan,
1049 pRawDataset, pCalBChans,
1050 numPiers, pdGainOverlap_t2,
1051 &tMinCalPower, gainBoundaries,
1052 pdadcValues, numXpdGain);
1053
1054 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
1055 REG_WRITE(ah,
1056 AR_PHY_TPCRG5 + regChainOffset,
1057 SM(pdGainOverlap_t2,
1058 AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
1059 | SM(gainBoundaries[0],
1060 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
1061 | SM(gainBoundaries[1],
1062 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
1063 | SM(gainBoundaries[2],
1064 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
1065 | SM(gainBoundaries[3],
1066 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
1067 }
1068
1069 regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
1070 for (j = 0; j < 32; j++) {
1071 reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
1072 ((pdadcValues[4 * j + 1] & 0xFF) << 8) |
1073 ((pdadcValues[4 * j + 2] & 0xFF) << 16)|
1074 ((pdadcValues[4 * j + 3] & 0xFF) << 24);
1075 REG_WRITE(ah, regOffset, reg32);
1076
1077 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1078 "PDADC (%d,%4x): %4.4x %8.8x\n",
1079 i, regChainOffset, regOffset,
1080 reg32);
1081 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1082 "PDADC: Chain %d | PDADC %3d "
1083 "Value %3d | PDADC %3d Value %3d | "
1084 "PDADC %3d Value %3d | PDADC %3d "
1085 "Value %3d |\n",
1086 i, 4 * j, pdadcValues[4 * j],
1087 4 * j + 1, pdadcValues[4 * j + 1],
1088 4 * j + 2, pdadcValues[4 * j + 2],
1089 4 * j + 3,
1090 pdadcValues[4 * j + 3]);
1091
1092 regOffset += 4;
1093 }
1094 }
1095 }
1096
1097 *pTxPowerIndexOffset = 0;
1098
1099 return true;
1100}
1101
1102static bool ath9k_hw_set_4k_power_cal_table(struct ath_hal *ah,
1103 struct ath9k_channel *chan,
1104 int16_t *pTxPowerIndexOffset)
1105{
1106 struct ath_hal_5416 *ahp = AH5416(ah);
1107 struct ar5416_eeprom_4k *pEepData = &ahp->ah_eeprom.map4k;
1108 struct cal_data_per_freq_4k *pRawDataset;
1109 u8 *pCalBChans = NULL;
1110 u16 pdGainOverlap_t2;
1111 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
1112 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
1113 u16 numPiers, i, j;
1114 int16_t tMinCalPower;
1115 u16 numXpdGain, xpdMask;
1116 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
1117 u32 reg32, regOffset, regChainOffset;
1118
1119 xpdMask = pEepData->modalHeader.xpdGain;
1120
1121 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
1122 AR5416_EEP_MINOR_VER_2) {
1123 pdGainOverlap_t2 =
1124 pEepData->modalHeader.pdGainOverlap;
1125 } else {
1126 pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
1127 AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
1128 }
1129
1130 pCalBChans = pEepData->calFreqPier2G;
1131 numPiers = AR5416_NUM_2G_CAL_PIERS;
1132
1133 numXpdGain = 0;
1134
1135 for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
1136 if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
1137 if (numXpdGain >= AR5416_NUM_PD_GAINS)
1138 break;
1139 xpdGainValues[numXpdGain] =
1140 (u16)(AR5416_PD_GAINS_IN_MASK - i);
1141 numXpdGain++;
1142 }
1143 }
1144
1145 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN,
1146 (numXpdGain - 1) & 0x3);
1147 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1,
1148 xpdGainValues[0]);
1149 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
1150 xpdGainValues[1]);
1151 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
1152 xpdGainValues[2]);
1153
1154 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
1155 if (AR_SREV_5416_V20_OR_LATER(ah) &&
1156 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5) &&
1157 (i != 0)) {
1158 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
1159 } else
1160 regChainOffset = i * 0x1000;
1161
1162 if (pEepData->baseEepHeader.txMask & (1 << i)) {
1163 pRawDataset = pEepData->calPierData2G[i];
1164
1165 ath9k_hw_get_4k_gain_boundaries_pdadcs(ah, chan,
1166 pRawDataset, pCalBChans,
1167 numPiers, pdGainOverlap_t2,
1168 &tMinCalPower, gainBoundaries,
1169 pdadcValues, numXpdGain);
1170
1171 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
1172 REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset,
1173 SM(pdGainOverlap_t2,
1174 AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
1175 | SM(gainBoundaries[0],
1176 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
1177 | SM(gainBoundaries[1],
1178 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
1179 | SM(gainBoundaries[2],
1180 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
1181 | SM(gainBoundaries[3],
1182 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
1183 }
1184
1185 regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
1186 for (j = 0; j < 32; j++) {
1187 reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
1188 ((pdadcValues[4 * j + 1] & 0xFF) << 8) |
1189 ((pdadcValues[4 * j + 2] & 0xFF) << 16)|
1190 ((pdadcValues[4 * j + 3] & 0xFF) << 24);
1191 REG_WRITE(ah, regOffset, reg32);
1192
1193 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1194 "PDADC (%d,%4x): %4.4x %8.8x\n",
1195 i, regChainOffset, regOffset,
1196 reg32);
1197 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1198 "PDADC: Chain %d | "
1199 "PDADC %3d Value %3d | "
1200 "PDADC %3d Value %3d | "
1201 "PDADC %3d Value %3d | "
1202 "PDADC %3d Value %3d |\n",
1203 i, 4 * j, pdadcValues[4 * j],
1204 4 * j + 1, pdadcValues[4 * j + 1],
1205 4 * j + 2, pdadcValues[4 * j + 2],
1206 4 * j + 3,
1207 pdadcValues[4 * j + 3]);
1208
1209 regOffset += 4;
1210 }
1211 }
1212 }
1213
1214 *pTxPowerIndexOffset = 0;
1215
1216 return true;
1217}
1218
1219static bool ath9k_hw_set_def_power_per_rate_table(struct ath_hal *ah,
1220 struct ath9k_channel *chan,
1221 int16_t *ratesArray,
1222 u16 cfgCtl,
1223 u16 AntennaReduction,
1224 u16 twiceMaxRegulatoryPower,
1225 u16 powerLimit)
1226{
1227#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */
1228#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 /* 10*log10(3)*2 */
1229
1230 struct ath_hal_5416 *ahp = AH5416(ah);
1231 struct ar5416_eeprom_def *pEepData = &ahp->ah_eeprom.def;
1232 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
1233 static const u16 tpScaleReductionTable[5] =
1234 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
1235
1236 int i;
1237 int16_t twiceLargestAntenna;
1238 struct cal_ctl_data *rep;
1239 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
1240 0, { 0, 0, 0, 0}
1241 };
1242 struct cal_target_power_leg targetPowerOfdmExt = {
1243 0, { 0, 0, 0, 0} }, targetPowerCckExt = {
1244 0, { 0, 0, 0, 0 }
1245 };
1246 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
1247 0, {0, 0, 0, 0}
1248 };
1249 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
1250 u16 ctlModesFor11a[] =
1251 { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 };
1252 u16 ctlModesFor11g[] =
1253 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
1254 CTL_2GHT40
1255 };
1256 u16 numCtlModes, *pCtlMode, ctlMode, freq;
1257 struct chan_centers centers;
1258 int tx_chainmask;
1259 u16 twiceMinEdgePower;
1260
1261 tx_chainmask = ahp->ah_txchainmask;
1262
1263 ath9k_hw_get_channel_centers(ah, chan, &centers);
1264
1265 twiceLargestAntenna = max(
1266 pEepData->modalHeader
1267 [IS_CHAN_2GHZ(chan)].antennaGainCh[0],
1268 pEepData->modalHeader
1269 [IS_CHAN_2GHZ(chan)].antennaGainCh[1]);
1270
1271 twiceLargestAntenna = max((u8)twiceLargestAntenna,
1272 pEepData->modalHeader
1273 [IS_CHAN_2GHZ(chan)].antennaGainCh[2]);
1274
1275 twiceLargestAntenna = (int16_t)min(AntennaReduction -
1276 twiceLargestAntenna, 0);
1277
1278 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
1279
1280 if (ah->ah_tpScale != ATH9K_TP_SCALE_MAX) {
1281 maxRegAllowedPower -=
1282 (tpScaleReductionTable[(ah->ah_tpScale)] * 2);
1283 }
1284
1285 scaledPower = min(powerLimit, maxRegAllowedPower);
1286
1287 switch (ar5416_get_ntxchains(tx_chainmask)) {
1288 case 1:
1289 break;
1290 case 2:
1291 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
1292 break;
1293 case 3:
1294 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
1295 break;
1296 }
1297
1298 scaledPower = max((u16)0, scaledPower);
1299
1300 if (IS_CHAN_2GHZ(chan)) {
1301 numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
1302 SUB_NUM_CTL_MODES_AT_2G_40;
1303 pCtlMode = ctlModesFor11g;
1304
1305 ath9k_hw_get_legacy_target_powers(ah, chan,
1306 pEepData->calTargetPowerCck,
1307 AR5416_NUM_2G_CCK_TARGET_POWERS,
1308 &targetPowerCck, 4, false);
1309 ath9k_hw_get_legacy_target_powers(ah, chan,
1310 pEepData->calTargetPower2G,
1311 AR5416_NUM_2G_20_TARGET_POWERS,
1312 &targetPowerOfdm, 4, false);
1313 ath9k_hw_get_target_powers(ah, chan,
1314 pEepData->calTargetPower2GHT20,
1315 AR5416_NUM_2G_20_TARGET_POWERS,
1316 &targetPowerHt20, 8, false);
1317
1318 if (IS_CHAN_HT40(chan)) {
1319 numCtlModes = ARRAY_SIZE(ctlModesFor11g);
1320 ath9k_hw_get_target_powers(ah, chan,
1321 pEepData->calTargetPower2GHT40,
1322 AR5416_NUM_2G_40_TARGET_POWERS,
1323 &targetPowerHt40, 8, true);
1324 ath9k_hw_get_legacy_target_powers(ah, chan,
1325 pEepData->calTargetPowerCck,
1326 AR5416_NUM_2G_CCK_TARGET_POWERS,
1327 &targetPowerCckExt, 4, true);
1328 ath9k_hw_get_legacy_target_powers(ah, chan,
1329 pEepData->calTargetPower2G,
1330 AR5416_NUM_2G_20_TARGET_POWERS,
1331 &targetPowerOfdmExt, 4, true);
1332 }
1333 } else {
1334 numCtlModes = ARRAY_SIZE(ctlModesFor11a) -
1335 SUB_NUM_CTL_MODES_AT_5G_40;
1336 pCtlMode = ctlModesFor11a;
1337
1338 ath9k_hw_get_legacy_target_powers(ah, chan,
1339 pEepData->calTargetPower5G,
1340 AR5416_NUM_5G_20_TARGET_POWERS,
1341 &targetPowerOfdm, 4, false);
1342 ath9k_hw_get_target_powers(ah, chan,
1343 pEepData->calTargetPower5GHT20,
1344 AR5416_NUM_5G_20_TARGET_POWERS,
1345 &targetPowerHt20, 8, false);
1346
1347 if (IS_CHAN_HT40(chan)) {
1348 numCtlModes = ARRAY_SIZE(ctlModesFor11a);
1349 ath9k_hw_get_target_powers(ah, chan,
1350 pEepData->calTargetPower5GHT40,
1351 AR5416_NUM_5G_40_TARGET_POWERS,
1352 &targetPowerHt40, 8, true);
1353 ath9k_hw_get_legacy_target_powers(ah, chan,
1354 pEepData->calTargetPower5G,
1355 AR5416_NUM_5G_20_TARGET_POWERS,
1356 &targetPowerOfdmExt, 4, true);
1357 }
1358 }
1359
1360 for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
1361 bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
1362 (pCtlMode[ctlMode] == CTL_2GHT40);
1363 if (isHt40CtlMode)
1364 freq = centers.synth_center;
1365 else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
1366 freq = centers.ext_center;
1367 else
1368 freq = centers.ctl_center;
1369
1370 if (ar5416_get_eep_ver(ahp) == 14 && ar5416_get_eep_rev(ahp) <= 2)
1371 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
1372
1373 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1374 "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
1375 "EXT_ADDITIVE %d\n",
1376 ctlMode, numCtlModes, isHt40CtlMode,
1377 (pCtlMode[ctlMode] & EXT_ADDITIVE));
1378
1379 for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
1380 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1381 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
1382 "pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
1383 "chan %d\n",
1384 i, cfgCtl, pCtlMode[ctlMode],
1385 pEepData->ctlIndex[i], chan->channel);
1386
1387 if ((((cfgCtl & ~CTL_MODE_M) |
1388 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
1389 pEepData->ctlIndex[i]) ||
1390 (((cfgCtl & ~CTL_MODE_M) |
1391 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
1392 ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) {
1393 rep = &(pEepData->ctlData[i]);
1394
1395 twiceMinEdgePower = ath9k_hw_get_max_edge_power(freq,
1396 rep->ctlEdges[ar5416_get_ntxchains(tx_chainmask) - 1],
1397 IS_CHAN_2GHZ(chan), AR5416_NUM_BAND_EDGES);
1398
1399 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1400 " MATCH-EE_IDX %d: ch %d is2 %d "
1401 "2xMinEdge %d chainmask %d chains %d\n",
1402 i, freq, IS_CHAN_2GHZ(chan),
1403 twiceMinEdgePower, tx_chainmask,
1404 ar5416_get_ntxchains
1405 (tx_chainmask));
1406 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
1407 twiceMaxEdgePower = min(twiceMaxEdgePower,
1408 twiceMinEdgePower);
1409 } else {
1410 twiceMaxEdgePower = twiceMinEdgePower;
1411 break;
1412 }
1413 }
1414 }
1415
1416 minCtlPower = min(twiceMaxEdgePower, scaledPower);
1417
1418 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1419 " SEL-Min ctlMode %d pCtlMode %d "
1420 "2xMaxEdge %d sP %d minCtlPwr %d\n",
1421 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
1422 scaledPower, minCtlPower);
1423
1424 switch (pCtlMode[ctlMode]) {
1425 case CTL_11B:
1426 for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) {
1427 targetPowerCck.tPow2x[i] =
1428 min((u16)targetPowerCck.tPow2x[i],
1429 minCtlPower);
1430 }
1431 break;
1432 case CTL_11A:
1433 case CTL_11G:
1434 for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) {
1435 targetPowerOfdm.tPow2x[i] =
1436 min((u16)targetPowerOfdm.tPow2x[i],
1437 minCtlPower);
1438 }
1439 break;
1440 case CTL_5GHT20:
1441 case CTL_2GHT20:
1442 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) {
1443 targetPowerHt20.tPow2x[i] =
1444 min((u16)targetPowerHt20.tPow2x[i],
1445 minCtlPower);
1446 }
1447 break;
1448 case CTL_11B_EXT:
1449 targetPowerCckExt.tPow2x[0] = min((u16)
1450 targetPowerCckExt.tPow2x[0],
1451 minCtlPower);
1452 break;
1453 case CTL_11A_EXT:
1454 case CTL_11G_EXT:
1455 targetPowerOfdmExt.tPow2x[0] = min((u16)
1456 targetPowerOfdmExt.tPow2x[0],
1457 minCtlPower);
1458 break;
1459 case CTL_5GHT40:
1460 case CTL_2GHT40:
1461 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
1462 targetPowerHt40.tPow2x[i] =
1463 min((u16)targetPowerHt40.tPow2x[i],
1464 minCtlPower);
1465 }
1466 break;
1467 default:
1468 break;
1469 }
1470 }
1471
1472 ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] =
1473 ratesArray[rate18mb] = ratesArray[rate24mb] =
1474 targetPowerOfdm.tPow2x[0];
1475 ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
1476 ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
1477 ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3];
1478 ratesArray[rateXr] = targetPowerOfdm.tPow2x[0];
1479
1480 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++)
1481 ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i];
1482
1483 if (IS_CHAN_2GHZ(chan)) {
1484 ratesArray[rate1l] = targetPowerCck.tPow2x[0];
1485 ratesArray[rate2s] = ratesArray[rate2l] =
1486 targetPowerCck.tPow2x[1];
1487 ratesArray[rate5_5s] = ratesArray[rate5_5l] =
1488 targetPowerCck.tPow2x[2];
1489 ;
1490 ratesArray[rate11s] = ratesArray[rate11l] =
1491 targetPowerCck.tPow2x[3];
1492 ;
1493 }
1494 if (IS_CHAN_HT40(chan)) {
1495 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
1496 ratesArray[rateHt40_0 + i] =
1497 targetPowerHt40.tPow2x[i];
1498 }
1499 ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
1500 ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
1501 ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
1502 if (IS_CHAN_2GHZ(chan)) {
1503 ratesArray[rateExtCck] =
1504 targetPowerCckExt.tPow2x[0];
1505 }
1506 }
1507 return true;
1508}
1509
1510static bool ath9k_hw_set_4k_power_per_rate_table(struct ath_hal *ah,
1511 struct ath9k_channel *chan,
1512 int16_t *ratesArray,
1513 u16 cfgCtl,
1514 u16 AntennaReduction,
1515 u16 twiceMaxRegulatoryPower,
1516 u16 powerLimit)
1517{
1518 struct ath_hal_5416 *ahp = AH5416(ah);
1519 struct ar5416_eeprom_4k *pEepData = &ahp->ah_eeprom.map4k;
1520 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
1521 static const u16 tpScaleReductionTable[5] =
1522 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
1523
1524 int i;
1525 int16_t twiceLargestAntenna;
1526 struct cal_ctl_data_4k *rep;
1527 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
1528 0, { 0, 0, 0, 0}
1529 };
1530 struct cal_target_power_leg targetPowerOfdmExt = {
1531 0, { 0, 0, 0, 0} }, targetPowerCckExt = {
1532 0, { 0, 0, 0, 0 }
1533 };
1534 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
1535 0, {0, 0, 0, 0}
1536 };
1537 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
1538 u16 ctlModesFor11g[] =
1539 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
1540 CTL_2GHT40
1541 };
1542 u16 numCtlModes, *pCtlMode, ctlMode, freq;
1543 struct chan_centers centers;
1544 int tx_chainmask;
1545 u16 twiceMinEdgePower;
1546
1547 tx_chainmask = ahp->ah_txchainmask;
1548
1549 ath9k_hw_get_channel_centers(ah, chan, &centers);
1550
1551 twiceLargestAntenna = pEepData->modalHeader.antennaGainCh[0];
1552
1553 twiceLargestAntenna = (int16_t)min(AntennaReduction -
1554 twiceLargestAntenna, 0);
1555
1556 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna;
1557
1558 if (ah->ah_tpScale != ATH9K_TP_SCALE_MAX) {
1559 maxRegAllowedPower -=
1560 (tpScaleReductionTable[(ah->ah_tpScale)] * 2);
1561 }
1562
1563 scaledPower = min(powerLimit, maxRegAllowedPower);
1564 scaledPower = max((u16)0, scaledPower);
1565
1566 numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40;
1567 pCtlMode = ctlModesFor11g;
1568
1569 ath9k_hw_get_legacy_target_powers(ah, chan,
1570 pEepData->calTargetPowerCck,
1571 AR5416_NUM_2G_CCK_TARGET_POWERS,
1572 &targetPowerCck, 4, false);
1573 ath9k_hw_get_legacy_target_powers(ah, chan,
1574 pEepData->calTargetPower2G,
1575 AR5416_NUM_2G_20_TARGET_POWERS,
1576 &targetPowerOfdm, 4, false);
1577 ath9k_hw_get_target_powers(ah, chan,
1578 pEepData->calTargetPower2GHT20,
1579 AR5416_NUM_2G_20_TARGET_POWERS,
1580 &targetPowerHt20, 8, false);
1581
1582 if (IS_CHAN_HT40(chan)) {
1583 numCtlModes = ARRAY_SIZE(ctlModesFor11g);
1584 ath9k_hw_get_target_powers(ah, chan,
1585 pEepData->calTargetPower2GHT40,
1586 AR5416_NUM_2G_40_TARGET_POWERS,
1587 &targetPowerHt40, 8, true);
1588 ath9k_hw_get_legacy_target_powers(ah, chan,
1589 pEepData->calTargetPowerCck,
1590 AR5416_NUM_2G_CCK_TARGET_POWERS,
1591 &targetPowerCckExt, 4, true);
1592 ath9k_hw_get_legacy_target_powers(ah, chan,
1593 pEepData->calTargetPower2G,
1594 AR5416_NUM_2G_20_TARGET_POWERS,
1595 &targetPowerOfdmExt, 4, true);
1596 }
1597
1598 for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) {
1599 bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) ||
1600 (pCtlMode[ctlMode] == CTL_2GHT40);
1601 if (isHt40CtlMode)
1602 freq = centers.synth_center;
1603 else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
1604 freq = centers.ext_center;
1605 else
1606 freq = centers.ctl_center;
1607
1608 if (ar5416_get_eep_ver(ahp) == 14 &&
1609 ar5416_get_eep_rev(ahp) <= 2)
1610 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
1611
1612 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1613 "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, "
1614 "EXT_ADDITIVE %d\n",
1615 ctlMode, numCtlModes, isHt40CtlMode,
1616 (pCtlMode[ctlMode] & EXT_ADDITIVE));
1617
1618 for (i = 0; (i < AR5416_NUM_CTLS) &&
1619 pEepData->ctlIndex[i]; i++) {
1620 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1621 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x "
1622 "pCtlMode 0x%2.2x ctlIndex 0x%2.2x "
1623 "chan %d\n",
1624 i, cfgCtl, pCtlMode[ctlMode],
1625 pEepData->ctlIndex[i], chan->channel);
1626
1627 if ((((cfgCtl & ~CTL_MODE_M) |
1628 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
1629 pEepData->ctlIndex[i]) ||
1630 (((cfgCtl & ~CTL_MODE_M) |
1631 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
1632 ((pEepData->ctlIndex[i] & CTL_MODE_M) |
1633 SD_NO_CTL))) {
1634 rep = &(pEepData->ctlData[i]);
1635
1636 twiceMinEdgePower =
1637 ath9k_hw_get_max_edge_power(freq,
1638 rep->ctlEdges[ar5416_get_ntxchains
1639 (tx_chainmask) - 1],
1640 IS_CHAN_2GHZ(chan),
1641 AR5416_EEP4K_NUM_BAND_EDGES);
1642
1643 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1644 " MATCH-EE_IDX %d: ch %d is2 %d "
1645 "2xMinEdge %d chainmask %d chains %d\n",
1646 i, freq, IS_CHAN_2GHZ(chan),
1647 twiceMinEdgePower, tx_chainmask,
1648 ar5416_get_ntxchains
1649 (tx_chainmask));
1650 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
1651 twiceMaxEdgePower =
1652 min(twiceMaxEdgePower,
1653 twiceMinEdgePower);
1654 } else {
1655 twiceMaxEdgePower = twiceMinEdgePower;
1656 break;
1657 }
1658 }
1659 }
1660
1661 minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
1662
1663 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
1664 " SEL-Min ctlMode %d pCtlMode %d "
1665 "2xMaxEdge %d sP %d minCtlPwr %d\n",
1666 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
1667 scaledPower, minCtlPower);
1668
1669 switch (pCtlMode[ctlMode]) {
1670 case CTL_11B:
1671 for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x);
1672 i++) {
1673 targetPowerCck.tPow2x[i] =
1674 min((u16)targetPowerCck.tPow2x[i],
1675 minCtlPower);
1676 }
1677 break;
1678 case CTL_11G:
1679 for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x);
1680 i++) {
1681 targetPowerOfdm.tPow2x[i] =
1682 min((u16)targetPowerOfdm.tPow2x[i],
1683 minCtlPower);
1684 }
1685 break;
1686 case CTL_2GHT20:
1687 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x);
1688 i++) {
1689 targetPowerHt20.tPow2x[i] =
1690 min((u16)targetPowerHt20.tPow2x[i],
1691 minCtlPower);
1692 }
1693 break;
1694 case CTL_11B_EXT:
1695 targetPowerCckExt.tPow2x[0] = min((u16)
1696 targetPowerCckExt.tPow2x[0],
1697 minCtlPower);
1698 break;
1699 case CTL_11G_EXT:
1700 targetPowerOfdmExt.tPow2x[0] = min((u16)
1701 targetPowerOfdmExt.tPow2x[0],
1702 minCtlPower);
1703 break;
1704 case CTL_2GHT40:
1705 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x);
1706 i++) {
1707 targetPowerHt40.tPow2x[i] =
1708 min((u16)targetPowerHt40.tPow2x[i],
1709 minCtlPower);
1710 }
1711 break;
1712 default:
1713 break;
1714 }
1715 }
1716
1717 ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] =
1718 ratesArray[rate18mb] = ratesArray[rate24mb] =
1719 targetPowerOfdm.tPow2x[0];
1720 ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1];
1721 ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2];
1722 ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3];
1723 ratesArray[rateXr] = targetPowerOfdm.tPow2x[0];
1724
1725 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++)
1726 ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i];
1727
1728 ratesArray[rate1l] = targetPowerCck.tPow2x[0];
1729 ratesArray[rate2s] = ratesArray[rate2l] = targetPowerCck.tPow2x[1];
1730 ratesArray[rate5_5s] = ratesArray[rate5_5l] = targetPowerCck.tPow2x[2];
1731 ratesArray[rate11s] = ratesArray[rate11l] = targetPowerCck.tPow2x[3];
1732
1733 if (IS_CHAN_HT40(chan)) {
1734 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) {
1735 ratesArray[rateHt40_0 + i] =
1736 targetPowerHt40.tPow2x[i];
1737 }
1738 ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
1739 ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
1740 ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
1741 ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0];
1742 }
1743 return true;
1744}
1745
1746static int ath9k_hw_def_set_txpower(struct ath_hal *ah,
1747 struct ath9k_channel *chan,
1748 u16 cfgCtl,
1749 u8 twiceAntennaReduction,
1750 u8 twiceMaxRegulatoryPower,
1751 u8 powerLimit)
1752{
1753 struct ath_hal_5416 *ahp = AH5416(ah);
1754 struct ar5416_eeprom_def *pEepData = &ahp->ah_eeprom.def;
1755 struct modal_eep_header *pModal =
1756 &(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]);
1757 int16_t ratesArray[Ar5416RateSize];
1758 int16_t txPowerIndexOffset = 0;
1759 u8 ht40PowerIncForPdadc = 2;
1760 int i;
1761
1762 memset(ratesArray, 0, sizeof(ratesArray));
1763
1764 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
1765 AR5416_EEP_MINOR_VER_2) {
1766 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
1767 }
1768
1769 if (!ath9k_hw_set_def_power_per_rate_table(ah, chan,
1770 &ratesArray[0], cfgCtl,
1771 twiceAntennaReduction,
1772 twiceMaxRegulatoryPower,
1773 powerLimit)) {
1774 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1775 "ath9k_hw_set_txpower: unable to set "
1776 "tx power per rate table\n");
1777 return -EIO;
1778 }
1779
1780 if (!ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset)) {
1781 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1782 "ath9k_hw_set_txpower: unable to set power table\n");
1783 return -EIO;
1784 }
1785
1786 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
1787 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
1788 if (ratesArray[i] > AR5416_MAX_RATE_POWER)
1789 ratesArray[i] = AR5416_MAX_RATE_POWER;
1790 }
1791
1792 if (AR_SREV_9280_10_OR_LATER(ah)) {
1793 for (i = 0; i < Ar5416RateSize; i++)
1794 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2;
1795 }
1796
1797 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
1798 ATH9K_POW_SM(ratesArray[rate18mb], 24)
1799 | ATH9K_POW_SM(ratesArray[rate12mb], 16)
1800 | ATH9K_POW_SM(ratesArray[rate9mb], 8)
1801 | ATH9K_POW_SM(ratesArray[rate6mb], 0));
1802 REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
1803 ATH9K_POW_SM(ratesArray[rate54mb], 24)
1804 | ATH9K_POW_SM(ratesArray[rate48mb], 16)
1805 | ATH9K_POW_SM(ratesArray[rate36mb], 8)
1806 | ATH9K_POW_SM(ratesArray[rate24mb], 0));
1807
1808 if (IS_CHAN_2GHZ(chan)) {
1809 REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
1810 ATH9K_POW_SM(ratesArray[rate2s], 24)
1811 | ATH9K_POW_SM(ratesArray[rate2l], 16)
1812 | ATH9K_POW_SM(ratesArray[rateXr], 8)
1813 | ATH9K_POW_SM(ratesArray[rate1l], 0));
1814 REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
1815 ATH9K_POW_SM(ratesArray[rate11s], 24)
1816 | ATH9K_POW_SM(ratesArray[rate11l], 16)
1817 | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
1818 | ATH9K_POW_SM(ratesArray[rate5_5l], 0));
1819 }
1820
1821 REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
1822 ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
1823 | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
1824 | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
1825 | ATH9K_POW_SM(ratesArray[rateHt20_0], 0));
1826 REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
1827 ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
1828 | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
1829 | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
1830 | ATH9K_POW_SM(ratesArray[rateHt20_4], 0));
1831
1832 if (IS_CHAN_HT40(chan)) {
1833 REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
1834 ATH9K_POW_SM(ratesArray[rateHt40_3] +
1835 ht40PowerIncForPdadc, 24)
1836 | ATH9K_POW_SM(ratesArray[rateHt40_2] +
1837 ht40PowerIncForPdadc, 16)
1838 | ATH9K_POW_SM(ratesArray[rateHt40_1] +
1839 ht40PowerIncForPdadc, 8)
1840 | ATH9K_POW_SM(ratesArray[rateHt40_0] +
1841 ht40PowerIncForPdadc, 0));
1842 REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
1843 ATH9K_POW_SM(ratesArray[rateHt40_7] +
1844 ht40PowerIncForPdadc, 24)
1845 | ATH9K_POW_SM(ratesArray[rateHt40_6] +
1846 ht40PowerIncForPdadc, 16)
1847 | ATH9K_POW_SM(ratesArray[rateHt40_5] +
1848 ht40PowerIncForPdadc, 8)
1849 | ATH9K_POW_SM(ratesArray[rateHt40_4] +
1850 ht40PowerIncForPdadc, 0));
1851
1852 REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
1853 ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
1854 | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
1855 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
1856 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
1857 }
1858
1859 REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
1860 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
1861 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
1862
1863 i = rate6mb;
1864
1865 if (IS_CHAN_HT40(chan))
1866 i = rateHt40_0;
1867 else if (IS_CHAN_HT20(chan))
1868 i = rateHt20_0;
1869
1870 if (AR_SREV_9280_10_OR_LATER(ah))
1871 ah->ah_maxPowerLevel =
1872 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2;
1873 else
1874 ah->ah_maxPowerLevel = ratesArray[i];
1875
1876 return 0;
1877}
1878
1879static int ath9k_hw_4k_set_txpower(struct ath_hal *ah,
1880 struct ath9k_channel *chan,
1881 u16 cfgCtl,
1882 u8 twiceAntennaReduction,
1883 u8 twiceMaxRegulatoryPower,
1884 u8 powerLimit)
1885{
1886 struct ath_hal_5416 *ahp = AH5416(ah);
1887 struct ar5416_eeprom_4k *pEepData = &ahp->ah_eeprom.map4k;
1888 struct modal_eep_4k_header *pModal = &pEepData->modalHeader;
1889 int16_t ratesArray[Ar5416RateSize];
1890 int16_t txPowerIndexOffset = 0;
1891 u8 ht40PowerIncForPdadc = 2;
1892 int i;
1893
1894 memset(ratesArray, 0, sizeof(ratesArray));
1895
1896 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
1897 AR5416_EEP_MINOR_VER_2) {
1898 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
1899 }
1900
1901 if (!ath9k_hw_set_4k_power_per_rate_table(ah, chan,
1902 &ratesArray[0], cfgCtl,
1903 twiceAntennaReduction,
1904 twiceMaxRegulatoryPower,
1905 powerLimit)) {
1906 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1907 "ath9k_hw_set_txpower: unable to set "
1908 "tx power per rate table\n");
1909 return -EIO;
1910 }
1911
1912 if (!ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset)) {
1913 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1914 "ath9k_hw_set_txpower: unable to set power table\n");
1915 return -EIO;
1916 }
1917
1918 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
1919 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
1920 if (ratesArray[i] > AR5416_MAX_RATE_POWER)
1921 ratesArray[i] = AR5416_MAX_RATE_POWER;
1922 }
1923
1924 if (AR_SREV_9280_10_OR_LATER(ah)) {
1925 for (i = 0; i < Ar5416RateSize; i++)
1926 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2;
1927 }
1928
1929 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
1930 ATH9K_POW_SM(ratesArray[rate18mb], 24)
1931 | ATH9K_POW_SM(ratesArray[rate12mb], 16)
1932 | ATH9K_POW_SM(ratesArray[rate9mb], 8)
1933 | ATH9K_POW_SM(ratesArray[rate6mb], 0));
1934 REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
1935 ATH9K_POW_SM(ratesArray[rate54mb], 24)
1936 | ATH9K_POW_SM(ratesArray[rate48mb], 16)
1937 | ATH9K_POW_SM(ratesArray[rate36mb], 8)
1938 | ATH9K_POW_SM(ratesArray[rate24mb], 0));
1939
1940 if (IS_CHAN_2GHZ(chan)) {
1941 REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
1942 ATH9K_POW_SM(ratesArray[rate2s], 24)
1943 | ATH9K_POW_SM(ratesArray[rate2l], 16)
1944 | ATH9K_POW_SM(ratesArray[rateXr], 8)
1945 | ATH9K_POW_SM(ratesArray[rate1l], 0));
1946 REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
1947 ATH9K_POW_SM(ratesArray[rate11s], 24)
1948 | ATH9K_POW_SM(ratesArray[rate11l], 16)
1949 | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
1950 | ATH9K_POW_SM(ratesArray[rate5_5l], 0));
1951 }
1952
1953 REG_WRITE(ah, AR_PHY_POWER_TX_RATE5,
1954 ATH9K_POW_SM(ratesArray[rateHt20_3], 24)
1955 | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
1956 | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
1957 | ATH9K_POW_SM(ratesArray[rateHt20_0], 0));
1958 REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
1959 ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
1960 | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
1961 | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
1962 | ATH9K_POW_SM(ratesArray[rateHt20_4], 0));
1963
1964 if (IS_CHAN_HT40(chan)) {
1965 REG_WRITE(ah, AR_PHY_POWER_TX_RATE7,
1966 ATH9K_POW_SM(ratesArray[rateHt40_3] +
1967 ht40PowerIncForPdadc, 24)
1968 | ATH9K_POW_SM(ratesArray[rateHt40_2] +
1969 ht40PowerIncForPdadc, 16)
1970 | ATH9K_POW_SM(ratesArray[rateHt40_1] +
1971 ht40PowerIncForPdadc, 8)
1972 | ATH9K_POW_SM(ratesArray[rateHt40_0] +
1973 ht40PowerIncForPdadc, 0));
1974 REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
1975 ATH9K_POW_SM(ratesArray[rateHt40_7] +
1976 ht40PowerIncForPdadc, 24)
1977 | ATH9K_POW_SM(ratesArray[rateHt40_6] +
1978 ht40PowerIncForPdadc, 16)
1979 | ATH9K_POW_SM(ratesArray[rateHt40_5] +
1980 ht40PowerIncForPdadc, 8)
1981 | ATH9K_POW_SM(ratesArray[rateHt40_4] +
1982 ht40PowerIncForPdadc, 0));
1983
1984 REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
1985 ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
1986 | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
1987 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
1988 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
1989 }
1990
1991 i = rate6mb;
1992
1993 if (IS_CHAN_HT40(chan))
1994 i = rateHt40_0;
1995 else if (IS_CHAN_HT20(chan))
1996 i = rateHt20_0;
1997
1998 if (AR_SREV_9280_10_OR_LATER(ah))
1999 ah->ah_maxPowerLevel =
2000 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2;
2001 else
2002 ah->ah_maxPowerLevel = ratesArray[i];
2003
2004 return 0;
2005}
2006
2007static int (*ath9k_set_txpower[]) (struct ath_hal *,
2008 struct ath9k_channel *,
2009 u16, u8, u8, u8) = {
2010 ath9k_hw_def_set_txpower,
2011 ath9k_hw_4k_set_txpower
2012};
2013
2014int ath9k_hw_set_txpower(struct ath_hal *ah,
2015 struct ath9k_channel *chan,
2016 u16 cfgCtl,
2017 u8 twiceAntennaReduction,
2018 u8 twiceMaxRegulatoryPower,
2019 u8 powerLimit)
2020{
2021 struct ath_hal_5416 *ahp = AH5416(ah);
2022
2023 return ath9k_set_txpower[ahp->ah_eep_map](ah, chan, cfgCtl,
2024 twiceAntennaReduction, twiceMaxRegulatoryPower,
2025 powerLimit);
2026}
2027
2028static void ath9k_hw_set_def_addac(struct ath_hal *ah,
2029 struct ath9k_channel *chan)
2030{
2031#define XPA_LVL_FREQ(cnt) (pModal->xpaBiasLvlFreq[cnt])
2032 struct modal_eep_header *pModal;
2033 struct ath_hal_5416 *ahp = AH5416(ah);
2034 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def;
2035 u8 biaslevel;
2036
2037 if (ah->ah_macVersion != AR_SREV_VERSION_9160)
2038 return;
2039
2040 if (ar5416_get_eep_rev(ahp) < AR5416_EEP_MINOR_VER_7)
2041 return;
2042
2043 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
2044
2045 if (pModal->xpaBiasLvl != 0xff) {
2046 biaslevel = pModal->xpaBiasLvl;
2047 } else {
2048 u16 resetFreqBin, freqBin, freqCount = 0;
2049 struct chan_centers centers;
2050
2051 ath9k_hw_get_channel_centers(ah, chan, &centers);
2052
2053 resetFreqBin = FREQ2FBIN(centers.synth_center,
2054 IS_CHAN_2GHZ(chan));
2055 freqBin = XPA_LVL_FREQ(0) & 0xff;
2056 biaslevel = (u8) (XPA_LVL_FREQ(0) >> 14);
2057
2058 freqCount++;
2059
2060 while (freqCount < 3) {
2061 if (XPA_LVL_FREQ(freqCount) == 0x0)
2062 break;
2063
2064 freqBin = XPA_LVL_FREQ(freqCount) & 0xff;
2065 if (resetFreqBin >= freqBin)
2066 biaslevel = (u8)(XPA_LVL_FREQ(freqCount) >> 14);
2067 else
2068 break;
2069 freqCount++;
2070 }
2071 }
2072
2073 if (IS_CHAN_2GHZ(chan)) {
2074 INI_RA(&ahp->ah_iniAddac, 7, 1) = (INI_RA(&ahp->ah_iniAddac,
2075 7, 1) & (~0x18)) | biaslevel << 3;
2076 } else {
2077 INI_RA(&ahp->ah_iniAddac, 6, 1) = (INI_RA(&ahp->ah_iniAddac,
2078 6, 1) & (~0xc0)) | biaslevel << 6;
2079 }
2080#undef XPA_LVL_FREQ
2081}
2082
2083static void ath9k_hw_set_4k_addac(struct ath_hal *ah,
2084 struct ath9k_channel *chan)
2085{
2086 struct modal_eep_4k_header *pModal;
2087 struct ath_hal_5416 *ahp = AH5416(ah);
2088 struct ar5416_eeprom_4k *eep = &ahp->ah_eeprom.map4k;
2089 u8 biaslevel;
2090
2091 if (ah->ah_macVersion != AR_SREV_VERSION_9160)
2092 return;
2093
2094 if (ar5416_get_eep_rev(ahp) < AR5416_EEP_MINOR_VER_7)
2095 return;
2096
2097 pModal = &eep->modalHeader;
2098
2099 if (pModal->xpaBiasLvl != 0xff) {
2100 biaslevel = pModal->xpaBiasLvl;
2101 INI_RA(&ahp->ah_iniAddac, 7, 1) =
2102 (INI_RA(&ahp->ah_iniAddac, 7, 1) & (~0x18)) | biaslevel << 3;
2103 }
2104}
2105
2106static void (*ath9k_set_addac[]) (struct ath_hal *, struct ath9k_channel *) = {
2107 ath9k_hw_set_def_addac,
2108 ath9k_hw_set_4k_addac
2109};
2110
2111void ath9k_hw_set_addac(struct ath_hal *ah, struct ath9k_channel *chan)
2112{
2113 struct ath_hal_5416 *ahp = AH5416(ah);
2114
2115 ath9k_set_addac[ahp->ah_eep_map](ah, chan);
2116}
2117
2118
2119
2120/* XXX: Clean me up, make me more legible */
2121static bool ath9k_hw_eeprom_set_def_board_values(struct ath_hal *ah,
2122 struct ath9k_channel *chan)
2123{
2124 struct modal_eep_header *pModal;
2125 struct ath_hal_5416 *ahp = AH5416(ah);
2126 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def;
2127 int i, regChainOffset;
2128 u8 txRxAttenLocal;
2129 u16 ant_config;
2130
2131 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
2132
2133 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
2134
2135 ath9k_hw_get_eeprom_antenna_cfg(ah, chan, 0, &ant_config);
2136 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
2137
2138 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
2139 if (AR_SREV_9280(ah)) {
2140 if (i >= 2)
2141 break;
2142 }
2143
2144 if (AR_SREV_5416_V20_OR_LATER(ah) &&
2145 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5)
2146 && (i != 0))
2147 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
2148 else
2149 regChainOffset = i * 0x1000;
2150
2151 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
2152 pModal->antCtrlChain[i]);
2153
2154 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
2155 (REG_READ(ah,
2156 AR_PHY_TIMING_CTRL4(0) +
2157 regChainOffset) &
2158 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
2159 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
2160 SM(pModal->iqCalICh[i],
2161 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
2162 SM(pModal->iqCalQCh[i],
2163 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
2164
2165 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
2166 if ((eep->baseEepHeader.version &
2167 AR5416_EEP_VER_MINOR_MASK) >=
2168 AR5416_EEP_MINOR_VER_3) {
2169 txRxAttenLocal = pModal->txRxAttenCh[i];
2170 if (AR_SREV_9280_10_OR_LATER(ah)) {
2171 REG_RMW_FIELD(ah,
2172 AR_PHY_GAIN_2GHZ +
2173 regChainOffset,
2174 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
2175 pModal->
2176 bswMargin[i]);
2177 REG_RMW_FIELD(ah,
2178 AR_PHY_GAIN_2GHZ +
2179 regChainOffset,
2180 AR_PHY_GAIN_2GHZ_XATTEN1_DB,
2181 pModal->
2182 bswAtten[i]);
2183 REG_RMW_FIELD(ah,
2184 AR_PHY_GAIN_2GHZ +
2185 regChainOffset,
2186 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
2187 pModal->
2188 xatten2Margin[i]);
2189 REG_RMW_FIELD(ah,
2190 AR_PHY_GAIN_2GHZ +
2191 regChainOffset,
2192 AR_PHY_GAIN_2GHZ_XATTEN2_DB,
2193 pModal->
2194 xatten2Db[i]);
2195 } else {
2196 REG_WRITE(ah,
2197 AR_PHY_GAIN_2GHZ +
2198 regChainOffset,
2199 (REG_READ(ah,
2200 AR_PHY_GAIN_2GHZ +
2201 regChainOffset) &
2202 ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
2203 | SM(pModal->
2204 bswMargin[i],
2205 AR_PHY_GAIN_2GHZ_BSW_MARGIN));
2206 REG_WRITE(ah,
2207 AR_PHY_GAIN_2GHZ +
2208 regChainOffset,
2209 (REG_READ(ah,
2210 AR_PHY_GAIN_2GHZ +
2211 regChainOffset) &
2212 ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
2213 | SM(pModal->bswAtten[i],
2214 AR_PHY_GAIN_2GHZ_BSW_ATTEN));
2215 }
2216 }
2217 if (AR_SREV_9280_10_OR_LATER(ah)) {
2218 REG_RMW_FIELD(ah,
2219 AR_PHY_RXGAIN +
2220 regChainOffset,
2221 AR9280_PHY_RXGAIN_TXRX_ATTEN,
2222 txRxAttenLocal);
2223 REG_RMW_FIELD(ah,
2224 AR_PHY_RXGAIN +
2225 regChainOffset,
2226 AR9280_PHY_RXGAIN_TXRX_MARGIN,
2227 pModal->rxTxMarginCh[i]);
2228 } else {
2229 REG_WRITE(ah,
2230 AR_PHY_RXGAIN + regChainOffset,
2231 (REG_READ(ah,
2232 AR_PHY_RXGAIN +
2233 regChainOffset) &
2234 ~AR_PHY_RXGAIN_TXRX_ATTEN) |
2235 SM(txRxAttenLocal,
2236 AR_PHY_RXGAIN_TXRX_ATTEN));
2237 REG_WRITE(ah,
2238 AR_PHY_GAIN_2GHZ +
2239 regChainOffset,
2240 (REG_READ(ah,
2241 AR_PHY_GAIN_2GHZ +
2242 regChainOffset) &
2243 ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
2244 SM(pModal->rxTxMarginCh[i],
2245 AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
2246 }
2247 }
2248 }
2249
2250 if (AR_SREV_9280_10_OR_LATER(ah)) {
2251 if (IS_CHAN_2GHZ(chan)) {
2252 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
2253 AR_AN_RF2G1_CH0_OB,
2254 AR_AN_RF2G1_CH0_OB_S,
2255 pModal->ob);
2256 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
2257 AR_AN_RF2G1_CH0_DB,
2258 AR_AN_RF2G1_CH0_DB_S,
2259 pModal->db);
2260 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
2261 AR_AN_RF2G1_CH1_OB,
2262 AR_AN_RF2G1_CH1_OB_S,
2263 pModal->ob_ch1);
2264 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
2265 AR_AN_RF2G1_CH1_DB,
2266 AR_AN_RF2G1_CH1_DB_S,
2267 pModal->db_ch1);
2268 } else {
2269 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
2270 AR_AN_RF5G1_CH0_OB5,
2271 AR_AN_RF5G1_CH0_OB5_S,
2272 pModal->ob);
2273 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
2274 AR_AN_RF5G1_CH0_DB5,
2275 AR_AN_RF5G1_CH0_DB5_S,
2276 pModal->db);
2277 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
2278 AR_AN_RF5G1_CH1_OB5,
2279 AR_AN_RF5G1_CH1_OB5_S,
2280 pModal->ob_ch1);
2281 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
2282 AR_AN_RF5G1_CH1_DB5,
2283 AR_AN_RF5G1_CH1_DB5_S,
2284 pModal->db_ch1);
2285 }
2286 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
2287 AR_AN_TOP2_XPABIAS_LVL,
2288 AR_AN_TOP2_XPABIAS_LVL_S,
2289 pModal->xpaBiasLvl);
2290 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
2291 AR_AN_TOP2_LOCALBIAS,
2292 AR_AN_TOP2_LOCALBIAS_S,
2293 pModal->local_bias);
2294 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "ForceXPAon: %d\n",
2295 pModal->force_xpaon);
2296 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
2297 pModal->force_xpaon);
2298 }
2299
2300 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
2301 pModal->switchSettling);
2302 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
2303 pModal->adcDesiredSize);
2304
2305 if (!AR_SREV_9280_10_OR_LATER(ah))
2306 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
2307 AR_PHY_DESIRED_SZ_PGA,
2308 pModal->pgaDesiredSize);
2309
2310 REG_WRITE(ah, AR_PHY_RF_CTL4,
2311 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF)
2312 | SM(pModal->txEndToXpaOff,
2313 AR_PHY_RF_CTL4_TX_END_XPAB_OFF)
2314 | SM(pModal->txFrameToXpaOn,
2315 AR_PHY_RF_CTL4_FRAME_XPAA_ON)
2316 | SM(pModal->txFrameToXpaOn,
2317 AR_PHY_RF_CTL4_FRAME_XPAB_ON));
2318
2319 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
2320 pModal->txEndToRxOn);
2321 if (AR_SREV_9280_10_OR_LATER(ah)) {
2322 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
2323 pModal->thresh62);
2324 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
2325 AR_PHY_EXT_CCA0_THRESH62,
2326 pModal->thresh62);
2327 } else {
2328 REG_RMW_FIELD(ah, AR_PHY_CCA, AR_PHY_CCA_THRESH62,
2329 pModal->thresh62);
2330 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
2331 AR_PHY_EXT_CCA_THRESH62,
2332 pModal->thresh62);
2333 }
2334
2335 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
2336 AR5416_EEP_MINOR_VER_2) {
2337 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
2338 AR_PHY_TX_END_DATA_START,
2339 pModal->txFrameToDataStart);
2340 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
2341 pModal->txFrameToPaOn);
2342 }
2343
2344 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
2345 AR5416_EEP_MINOR_VER_3) {
2346 if (IS_CHAN_HT40(chan))
2347 REG_RMW_FIELD(ah, AR_PHY_SETTLING,
2348 AR_PHY_SETTLING_SWITCH,
2349 pModal->swSettleHt40);
2350 }
2351
2352 return true;
2353}
2354
2355static bool ath9k_hw_eeprom_set_4k_board_values(struct ath_hal *ah,
2356 struct ath9k_channel *chan)
2357{
2358 struct modal_eep_4k_header *pModal;
2359 struct ath_hal_5416 *ahp = AH5416(ah);
2360 struct ar5416_eeprom_4k *eep = &ahp->ah_eeprom.map4k;
2361 int regChainOffset;
2362 u8 txRxAttenLocal;
2363 u16 ant_config = 0;
2364 u8 ob[5], db1[5], db2[5];
2365 u8 ant_div_control1, ant_div_control2;
2366 u32 regVal;
2367
2368
2369 pModal = &eep->modalHeader;
2370
2371 txRxAttenLocal = 23;
2372
2373 ath9k_hw_get_eeprom_antenna_cfg(ah, chan, 0, &ant_config);
2374 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
2375
2376 regChainOffset = 0;
2377 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
2378 pModal->antCtrlChain[0]);
2379
2380 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
2381 (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
2382 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
2383 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
2384 SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
2385 SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
2386
2387 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
2388 AR5416_EEP_MINOR_VER_3) {
2389 txRxAttenLocal = pModal->txRxAttenCh[0];
2390 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
2391 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]);
2392 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
2393 AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]);
2394 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
2395 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
2396 pModal->xatten2Margin[0]);
2397 REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + regChainOffset,
2398 AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]);
2399 }
2400
2401 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
2402 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal);
2403 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + regChainOffset,
2404 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]);
2405
2406 if (AR_SREV_9285_11(ah))
2407 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
2408
2409 /* Initialize Ant Diversity settings from EEPROM */
2410 if (pModal->version == 3) {
2411 ant_div_control1 = ((pModal->ob_234 >> 12) & 0xf);
2412 ant_div_control2 = ((pModal->db1_234 >> 12) & 0xf);
2413 regVal = REG_READ(ah, 0x99ac);
2414 regVal &= (~(0x7f000000));
2415 regVal |= ((ant_div_control1 & 0x1) << 24);
2416 regVal |= (((ant_div_control1 >> 1) & 0x1) << 29);
2417 regVal |= (((ant_div_control1 >> 2) & 0x1) << 30);
2418 regVal |= ((ant_div_control2 & 0x3) << 25);
2419 regVal |= (((ant_div_control2 >> 2) & 0x3) << 27);
2420 REG_WRITE(ah, 0x99ac, regVal);
2421 regVal = REG_READ(ah, 0x99ac);
2422 regVal = REG_READ(ah, 0xa208);
2423 regVal &= (~(0x1 << 13));
2424 regVal |= (((ant_div_control1 >> 3) & 0x1) << 13);
2425 REG_WRITE(ah, 0xa208, regVal);
2426 regVal = REG_READ(ah, 0xa208);
2427 }
2428
2429 if (pModal->version >= 2) {
2430 ob[0] = (pModal->ob_01 & 0xf);
2431 ob[1] = (pModal->ob_01 >> 4) & 0xf;
2432 ob[2] = (pModal->ob_234 & 0xf);
2433 ob[3] = ((pModal->ob_234 >> 4) & 0xf);
2434 ob[4] = ((pModal->ob_234 >> 8) & 0xf);
2435
2436 db1[0] = (pModal->db1_01 & 0xf);
2437 db1[1] = ((pModal->db1_01 >> 4) & 0xf);
2438 db1[2] = (pModal->db1_234 & 0xf);
2439 db1[3] = ((pModal->db1_234 >> 4) & 0xf);
2440 db1[4] = ((pModal->db1_234 >> 8) & 0xf);
2441
2442 db2[0] = (pModal->db2_01 & 0xf);
2443 db2[1] = ((pModal->db2_01 >> 4) & 0xf);
2444 db2[2] = (pModal->db2_234 & 0xf);
2445 db2[3] = ((pModal->db2_234 >> 4) & 0xf);
2446 db2[4] = ((pModal->db2_234 >> 8) & 0xf);
2447
2448 } else if (pModal->version == 1) {
2449
2450 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
2451 "EEPROM Model version is set to 1 \n");
2452 ob[0] = (pModal->ob_01 & 0xf);
2453 ob[1] = ob[2] = ob[3] = ob[4] = (pModal->ob_01 >> 4) & 0xf;
2454 db1[0] = (pModal->db1_01 & 0xf);
2455 db1[1] = db1[2] = db1[3] =
2456 db1[4] = ((pModal->db1_01 >> 4) & 0xf);
2457 db2[0] = (pModal->db2_01 & 0xf);
2458 db2[1] = db2[2] = db2[3] =
2459 db2[4] = ((pModal->db2_01 >> 4) & 0xf);
2460 } else {
2461 int i;
2462 for (i = 0; i < 5; i++) {
2463 ob[i] = pModal->ob_01;
2464 db1[i] = pModal->db1_01;
2465 db2[i] = pModal->db1_01;
2466 }
2467 }
2468
2469 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2470 AR9285_AN_RF2G3_OB_0, AR9285_AN_RF2G3_OB_0_S, ob[0]);
2471 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2472 AR9285_AN_RF2G3_OB_1, AR9285_AN_RF2G3_OB_1_S, ob[1]);
2473 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2474 AR9285_AN_RF2G3_OB_2, AR9285_AN_RF2G3_OB_2_S, ob[2]);
2475 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2476 AR9285_AN_RF2G3_OB_3, AR9285_AN_RF2G3_OB_3_S, ob[3]);
2477 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2478 AR9285_AN_RF2G3_OB_4, AR9285_AN_RF2G3_OB_4_S, ob[4]);
2479
2480 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2481 AR9285_AN_RF2G3_DB1_0, AR9285_AN_RF2G3_DB1_0_S, db1[0]);
2482 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2483 AR9285_AN_RF2G3_DB1_1, AR9285_AN_RF2G3_DB1_1_S, db1[1]);
2484 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3,
2485 AR9285_AN_RF2G3_DB1_2, AR9285_AN_RF2G3_DB1_2_S, db1[2]);
2486 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
2487 AR9285_AN_RF2G4_DB1_3, AR9285_AN_RF2G4_DB1_3_S, db1[3]);
2488 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
2489 AR9285_AN_RF2G4_DB1_4, AR9285_AN_RF2G4_DB1_4_S, db1[4]);
2490
2491 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
2492 AR9285_AN_RF2G4_DB2_0, AR9285_AN_RF2G4_DB2_0_S, db2[0]);
2493 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
2494 AR9285_AN_RF2G4_DB2_1, AR9285_AN_RF2G4_DB2_1_S, db2[1]);
2495 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
2496 AR9285_AN_RF2G4_DB2_2, AR9285_AN_RF2G4_DB2_2_S, db2[2]);
2497 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
2498 AR9285_AN_RF2G4_DB2_3, AR9285_AN_RF2G4_DB2_3_S, db2[3]);
2499 ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4,
2500 AR9285_AN_RF2G4_DB2_4, AR9285_AN_RF2G4_DB2_4_S, db2[4]);
2501
2502
2503 if (AR_SREV_9285_11(ah))
2504 REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
2505
2506 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH,
2507 pModal->switchSettling);
2508 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
2509 pModal->adcDesiredSize);
2510
2511 REG_WRITE(ah, AR_PHY_RF_CTL4,
2512 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) |
2513 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) |
2514 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) |
2515 SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON));
2516
2517 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
2518 pModal->txEndToRxOn);
2519 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
2520 pModal->thresh62);
2521 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62,
2522 pModal->thresh62);
2523
2524 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
2525 AR5416_EEP_MINOR_VER_2) {
2526 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START,
2527 pModal->txFrameToDataStart);
2528 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
2529 pModal->txFrameToPaOn);
2530 }
2531
2532 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
2533 AR5416_EEP_MINOR_VER_3) {
2534 if (IS_CHAN_HT40(chan))
2535 REG_RMW_FIELD(ah, AR_PHY_SETTLING,
2536 AR_PHY_SETTLING_SWITCH,
2537 pModal->swSettleHt40);
2538 }
2539
2540 return true;
2541}
2542
2543static bool (*ath9k_eeprom_set_board_values[])(struct ath_hal *,
2544 struct ath9k_channel *) = {
2545 ath9k_hw_eeprom_set_def_board_values,
2546 ath9k_hw_eeprom_set_4k_board_values
2547};
2548
2549bool ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
2550 struct ath9k_channel *chan)
2551{
2552 struct ath_hal_5416 *ahp = AH5416(ah);
2553
2554 return ath9k_eeprom_set_board_values[ahp->ah_eep_map](ah, chan);
2555}
2556
2557static int ath9k_hw_get_def_eeprom_antenna_cfg(struct ath_hal *ah,
2558 struct ath9k_channel *chan,
2559 u8 index, u16 *config)
2560{
2561 struct ath_hal_5416 *ahp = AH5416(ah);
2562 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def;
2563 struct modal_eep_header *pModal =
2564 &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
2565 struct base_eep_header *pBase = &eep->baseEepHeader;
2566
2567 switch (index) {
2568 case 0:
2569 *config = pModal->antCtrlCommon & 0xFFFF;
2570 return 0;
2571 case 1:
2572 if (pBase->version >= 0x0E0D) {
2573 if (pModal->useAnt1) {
2574 *config =
2575 ((pModal->antCtrlCommon & 0xFFFF0000) >> 16);
2576 return 0;
2577 }
2578 }
2579 break;
2580 default:
2581 break;
2582 }
2583
2584 return -EINVAL;
2585}
2586
2587static int ath9k_hw_get_4k_eeprom_antenna_cfg(struct ath_hal *ah,
2588 struct ath9k_channel *chan,
2589 u8 index, u16 *config)
2590{
2591 struct ath_hal_5416 *ahp = AH5416(ah);
2592 struct ar5416_eeprom_4k *eep = &ahp->ah_eeprom.map4k;
2593 struct modal_eep_4k_header *pModal = &eep->modalHeader;
2594
2595 switch (index) {
2596 case 0:
2597 *config = pModal->antCtrlCommon & 0xFFFF;
2598 return 0;
2599 default:
2600 break;
2601 }
2602
2603 return -EINVAL;
2604}
2605
2606static int (*ath9k_get_eeprom_antenna_cfg[])(struct ath_hal *,
2607 struct ath9k_channel *,
2608 u8, u16 *) = {
2609 ath9k_hw_get_def_eeprom_antenna_cfg,
2610 ath9k_hw_get_4k_eeprom_antenna_cfg
2611};
2612
2613int ath9k_hw_get_eeprom_antenna_cfg(struct ath_hal *ah,
2614 struct ath9k_channel *chan,
2615 u8 index, u16 *config)
2616{
2617 struct ath_hal_5416 *ahp = AH5416(ah);
2618
2619 return ath9k_get_eeprom_antenna_cfg[ahp->ah_eep_map](ah, chan,
2620 index, config);
2621}
2622
2623static u8 ath9k_hw_get_4k_num_ant_config(struct ath_hal *ah,
2624 enum ieee80211_band freq_band)
2625{
2626 return 1;
2627}
2628
2629static u8 ath9k_hw_get_def_num_ant_config(struct ath_hal *ah,
2630 enum ieee80211_band freq_band)
2631{
2632 struct ath_hal_5416 *ahp = AH5416(ah);
2633 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def;
2634 struct modal_eep_header *pModal =
2635 &(eep->modalHeader[ATH9K_HAL_FREQ_BAND_2GHZ == freq_band]);
2636 struct base_eep_header *pBase = &eep->baseEepHeader;
2637 u8 num_ant_config;
2638
2639 num_ant_config = 1;
2640
2641 if (pBase->version >= 0x0E0D)
2642 if (pModal->useAnt1)
2643 num_ant_config += 1;
2644
2645 return num_ant_config;
2646}
2647
2648static u8 (*ath9k_get_num_ant_config[])(struct ath_hal *,
2649 enum ieee80211_band) = {
2650 ath9k_hw_get_def_num_ant_config,
2651 ath9k_hw_get_4k_num_ant_config
2652};
2653
2654u8 ath9k_hw_get_num_ant_config(struct ath_hal *ah,
2655 enum ieee80211_band freq_band)
2656{
2657 struct ath_hal_5416 *ahp = AH5416(ah);
2658
2659 return ath9k_get_num_ant_config[ahp->ah_eep_map](ah, freq_band);
2660}
2661
2662u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah, u16 i, bool is2GHz)
2663{
2664#define EEP_MAP4K_SPURCHAN \
2665 (ahp->ah_eeprom.map4k.modalHeader.spurChans[i].spurChan)
2666#define EEP_DEF_SPURCHAN \
2667 (ahp->ah_eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan)
2668 struct ath_hal_5416 *ahp = AH5416(ah);
2669 u16 spur_val = AR_NO_SPUR;
2670
2671 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2672 "Getting spur idx %d is2Ghz. %d val %x\n",
2673 i, is2GHz, ah->ah_config.spurchans[i][is2GHz]);
2674
2675 switch (ah->ah_config.spurmode) {
2676 case SPUR_DISABLE:
2677 break;
2678 case SPUR_ENABLE_IOCTL:
2679 spur_val = ah->ah_config.spurchans[i][is2GHz];
2680 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2681 "Getting spur val from new loc. %d\n", spur_val);
2682 break;
2683 case SPUR_ENABLE_EEPROM:
2684 if (ahp->ah_eep_map == EEP_MAP_4KBITS)
2685 spur_val = EEP_MAP4K_SPURCHAN;
2686 else
2687 spur_val = EEP_DEF_SPURCHAN;
2688 break;
2689
2690 }
2691
2692 return spur_val;
2693#undef EEP_DEF_SPURCHAN
2694#undef EEP_MAP4K_SPURCHAN
2695}
2696
2697static u32 ath9k_hw_get_eeprom_4k(struct ath_hal *ah,
2698 enum eeprom_param param)
2699{
2700 struct ath_hal_5416 *ahp = AH5416(ah);
2701 struct ar5416_eeprom_4k *eep = &ahp->ah_eeprom.map4k;
2702 struct modal_eep_4k_header *pModal = &eep->modalHeader;
2703 struct base_eep_header_4k *pBase = &eep->baseEepHeader;
2704
2705 switch (param) {
2706 case EEP_NFTHRESH_2:
2707 return pModal[1].noiseFloorThreshCh[0];
2708 case AR_EEPROM_MAC(0):
2709 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
2710 case AR_EEPROM_MAC(1):
2711 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
2712 case AR_EEPROM_MAC(2):
2713 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
2714 case EEP_REG_0:
2715 return pBase->regDmn[0];
2716 case EEP_REG_1:
2717 return pBase->regDmn[1];
2718 case EEP_OP_CAP:
2719 return pBase->deviceCap;
2720 case EEP_OP_MODE:
2721 return pBase->opCapFlags;
2722 case EEP_RF_SILENT:
2723 return pBase->rfSilent;
2724 case EEP_OB_2:
2725 return pModal->ob_01;
2726 case EEP_DB_2:
2727 return pModal->db1_01;
2728 case EEP_MINOR_REV:
2729 return pBase->version & AR5416_EEP_VER_MINOR_MASK;
2730 case EEP_TX_MASK:
2731 return pBase->txMask;
2732 case EEP_RX_MASK:
2733 return pBase->rxMask;
2734 default:
2735 return 0;
2736 }
2737}
2738
2739static u32 ath9k_hw_get_eeprom_def(struct ath_hal *ah,
2740 enum eeprom_param param)
2741{
2742 struct ath_hal_5416 *ahp = AH5416(ah);
2743 struct ar5416_eeprom_def *eep = &ahp->ah_eeprom.def;
2744 struct modal_eep_header *pModal = eep->modalHeader;
2745 struct base_eep_header *pBase = &eep->baseEepHeader;
2746
2747 switch (param) {
2748 case EEP_NFTHRESH_5:
2749 return pModal[0].noiseFloorThreshCh[0];
2750 case EEP_NFTHRESH_2:
2751 return pModal[1].noiseFloorThreshCh[0];
2752 case AR_EEPROM_MAC(0):
2753 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
2754 case AR_EEPROM_MAC(1):
2755 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
2756 case AR_EEPROM_MAC(2):
2757 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
2758 case EEP_REG_0:
2759 return pBase->regDmn[0];
2760 case EEP_REG_1:
2761 return pBase->regDmn[1];
2762 case EEP_OP_CAP:
2763 return pBase->deviceCap;
2764 case EEP_OP_MODE:
2765 return pBase->opCapFlags;
2766 case EEP_RF_SILENT:
2767 return pBase->rfSilent;
2768 case EEP_OB_5:
2769 return pModal[0].ob;
2770 case EEP_DB_5:
2771 return pModal[0].db;
2772 case EEP_OB_2:
2773 return pModal[1].ob;
2774 case EEP_DB_2:
2775 return pModal[1].db;
2776 case EEP_MINOR_REV:
2777 return pBase->version & AR5416_EEP_VER_MINOR_MASK;
2778 case EEP_TX_MASK:
2779 return pBase->txMask;
2780 case EEP_RX_MASK:
2781 return pBase->rxMask;
2782 case EEP_RXGAIN_TYPE:
2783 return pBase->rxGainType;
2784 case EEP_TXGAIN_TYPE:
2785 return pBase->txGainType;
2786
2787 default:
2788 return 0;
2789 }
2790}
2791
2792static u32 (*ath9k_get_eeprom[])(struct ath_hal *, enum eeprom_param) = {
2793 ath9k_hw_get_eeprom_def,
2794 ath9k_hw_get_eeprom_4k
2795};
2796
2797u32 ath9k_hw_get_eeprom(struct ath_hal *ah,
2798 enum eeprom_param param)
2799{
2800 struct ath_hal_5416 *ahp = AH5416(ah);
2801
2802 return ath9k_get_eeprom[ahp->ah_eep_map](ah, param);
2803}
2804
2805int ath9k_hw_eeprom_attach(struct ath_hal *ah)
2806{
2807 int status;
2808 struct ath_hal_5416 *ahp = AH5416(ah);
2809
2810 if (ath9k_hw_use_flash(ah))
2811 ath9k_hw_flash_map(ah);
2812
2813 if (AR_SREV_9285(ah))
2814 ahp->ah_eep_map = EEP_MAP_4KBITS;
2815 else
2816 ahp->ah_eep_map = EEP_MAP_DEFAULT;
2817
2818 if (!ath9k_hw_fill_eeprom(ah))
2819 return -EIO;
2820
2821 status = ath9k_hw_check_eeprom(ah);
2822
2823 return status;
2824}
diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
index 98bc25c9b3cf..34474edefc97 100644
--- a/drivers/net/wireless/ath9k/hw.c
+++ b/drivers/net/wireless/ath9k/hw.c
@@ -23,277 +23,93 @@
23#include "phy.h" 23#include "phy.h"
24#include "initvals.h" 24#include "initvals.h"
25 25
26static void ath9k_hw_iqcal_collect(struct ath_hal *ah);
27static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains);
28static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah);
29static void ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah,
30 u8 numChains);
31static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah);
32static void ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah,
33 u8 numChains);
34
35static const u8 CLOCK_RATE[] = { 40, 80, 22, 44, 88, 40 }; 26static const u8 CLOCK_RATE[] = { 40, 80, 22, 44, 88, 40 };
36static const int16_t NOISE_FLOOR[] = { -96, -93, -98, -96, -93, -96 };
37
38static const struct hal_percal_data iq_cal_multi_sample = {
39 IQ_MISMATCH_CAL,
40 MAX_CAL_SAMPLES,
41 PER_MIN_LOG_COUNT,
42 ath9k_hw_iqcal_collect,
43 ath9k_hw_iqcalibrate
44};
45static const struct hal_percal_data iq_cal_single_sample = {
46 IQ_MISMATCH_CAL,
47 MIN_CAL_SAMPLES,
48 PER_MAX_LOG_COUNT,
49 ath9k_hw_iqcal_collect,
50 ath9k_hw_iqcalibrate
51};
52static const struct hal_percal_data adc_gain_cal_multi_sample = {
53 ADC_GAIN_CAL,
54 MAX_CAL_SAMPLES,
55 PER_MIN_LOG_COUNT,
56 ath9k_hw_adc_gaincal_collect,
57 ath9k_hw_adc_gaincal_calibrate
58};
59static const struct hal_percal_data adc_gain_cal_single_sample = {
60 ADC_GAIN_CAL,
61 MIN_CAL_SAMPLES,
62 PER_MAX_LOG_COUNT,
63 ath9k_hw_adc_gaincal_collect,
64 ath9k_hw_adc_gaincal_calibrate
65};
66static const struct hal_percal_data adc_dc_cal_multi_sample = {
67 ADC_DC_CAL,
68 MAX_CAL_SAMPLES,
69 PER_MIN_LOG_COUNT,
70 ath9k_hw_adc_dccal_collect,
71 ath9k_hw_adc_dccal_calibrate
72};
73static const struct hal_percal_data adc_dc_cal_single_sample = {
74 ADC_DC_CAL,
75 MIN_CAL_SAMPLES,
76 PER_MAX_LOG_COUNT,
77 ath9k_hw_adc_dccal_collect,
78 ath9k_hw_adc_dccal_calibrate
79};
80static const struct hal_percal_data adc_init_dc_cal = {
81 ADC_DC_INIT_CAL,
82 MIN_CAL_SAMPLES,
83 INIT_LOG_COUNT,
84 ath9k_hw_adc_dccal_collect,
85 ath9k_hw_adc_dccal_calibrate
86};
87
88static struct ath9k_rate_table ar5416_11a_table = {
89 8,
90 {0},
91 {
92 {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
93 {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
94 {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
95 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
96 {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
97 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
98 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
99 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4}
100 },
101};
102
103static struct ath9k_rate_table ar5416_11b_table = {
104 4,
105 {0},
106 {
107 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
108 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
109 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 1},
110 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 1}
111 },
112};
113
114static struct ath9k_rate_table ar5416_11g_table = {
115 12,
116 {0},
117 {
118 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
119 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
120 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
121 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
122
123 {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
124 {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
125 {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
126 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
127 {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
128 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
129 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
130 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8}
131 },
132};
133
134static struct ath9k_rate_table ar5416_11ng_table = {
135 28,
136 {0},
137 {
138 {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0},
139 {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1},
140 {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2},
141 {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3},
142
143 {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4},
144 {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4},
145 {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6},
146 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6},
147 {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8},
148 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8},
149 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8},
150 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8},
151 {true, PHY_HT, 6500, 0x80, 0x00, 0, 4},
152 {true, PHY_HT, 13000, 0x81, 0x00, 1, 6},
153 {true, PHY_HT, 19500, 0x82, 0x00, 2, 6},
154 {true, PHY_HT, 26000, 0x83, 0x00, 3, 8},
155 {true, PHY_HT, 39000, 0x84, 0x00, 4, 8},
156 {true, PHY_HT, 52000, 0x85, 0x00, 5, 8},
157 {true, PHY_HT, 58500, 0x86, 0x00, 6, 8},
158 {true, PHY_HT, 65000, 0x87, 0x00, 7, 8},
159 {true, PHY_HT, 13000, 0x88, 0x00, 8, 4},
160 {true, PHY_HT, 26000, 0x89, 0x00, 9, 6},
161 {true, PHY_HT, 39000, 0x8a, 0x00, 10, 6},
162 {true, PHY_HT, 52000, 0x8b, 0x00, 11, 8},
163 {true, PHY_HT, 78000, 0x8c, 0x00, 12, 8},
164 {true, PHY_HT, 104000, 0x8d, 0x00, 13, 8},
165 {true, PHY_HT, 117000, 0x8e, 0x00, 14, 8},
166 {true, PHY_HT, 130000, 0x8f, 0x00, 15, 8},
167 },
168};
169
170static struct ath9k_rate_table ar5416_11na_table = {
171 24,
172 {0},
173 {
174 {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0},
175 {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0},
176 {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2},
177 {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2},
178 {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4},
179 {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4},
180 {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4},
181 {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4},
182 {true, PHY_HT, 6500, 0x80, 0x00, 0, 0},
183 {true, PHY_HT, 13000, 0x81, 0x00, 1, 2},
184 {true, PHY_HT, 19500, 0x82, 0x00, 2, 2},
185 {true, PHY_HT, 26000, 0x83, 0x00, 3, 4},
186 {true, PHY_HT, 39000, 0x84, 0x00, 4, 4},
187 {true, PHY_HT, 52000, 0x85, 0x00, 5, 4},
188 {true, PHY_HT, 58500, 0x86, 0x00, 6, 4},
189 {true, PHY_HT, 65000, 0x87, 0x00, 7, 4},
190 {true, PHY_HT, 13000, 0x88, 0x00, 8, 0},
191 {true, PHY_HT, 26000, 0x89, 0x00, 9, 2},
192 {true, PHY_HT, 39000, 0x8a, 0x00, 10, 2},
193 {true, PHY_HT, 52000, 0x8b, 0x00, 11, 4},
194 {true, PHY_HT, 78000, 0x8c, 0x00, 12, 4},
195 {true, PHY_HT, 104000, 0x8d, 0x00, 13, 4},
196 {true, PHY_HT, 117000, 0x8e, 0x00, 14, 4},
197 {true, PHY_HT, 130000, 0x8f, 0x00, 15, 4},
198 },
199};
200
201static enum wireless_mode ath9k_hw_chan2wmode(struct ath_hal *ah,
202 const struct ath9k_channel *chan)
203{
204 if (IS_CHAN_CCK(chan))
205 return ATH9K_MODE_11A;
206 if (IS_CHAN_G(chan))
207 return ATH9K_MODE_11G;
208 return ATH9K_MODE_11A;
209}
210 27
211static bool ath9k_hw_wait(struct ath_hal *ah, 28extern struct hal_percal_data iq_cal_multi_sample;
212 u32 reg, 29extern struct hal_percal_data iq_cal_single_sample;
213 u32 mask, 30extern struct hal_percal_data adc_gain_cal_multi_sample;
214 u32 val) 31extern struct hal_percal_data adc_gain_cal_single_sample;
215{ 32extern struct hal_percal_data adc_dc_cal_multi_sample;
216 int i; 33extern struct hal_percal_data adc_dc_cal_single_sample;
34extern struct hal_percal_data adc_init_dc_cal;
217 35
218 for (i = 0; i < (AH_TIMEOUT / AH_TIME_QUANTUM); i++) { 36static bool ath9k_hw_set_reset_reg(struct ath_hal *ah, u32 type);
219 if ((REG_READ(ah, reg) & mask) == val) 37static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan,
220 return true; 38 enum ath9k_ht_macmode macmode);
39static u32 ath9k_hw_ini_fixup(struct ath_hal *ah,
40 struct ar5416_eeprom_def *pEepData,
41 u32 reg, u32 value);
42static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah, struct ath9k_channel *chan);
43static void ath9k_hw_spur_mitigate(struct ath_hal *ah, struct ath9k_channel *chan);
221 44
222 udelay(AH_TIME_QUANTUM); 45/********************/
223 } 46/* Helper Functions */
224 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO, 47/********************/
225 "%s: timeout on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
226 __func__, reg, REG_READ(ah, reg), mask, val);
227 return false;
228}
229 48
230static bool ath9k_hw_eeprom_read(struct ath_hal *ah, u32 off, 49static u32 ath9k_hw_mac_usec(struct ath_hal *ah, u32 clks)
231 u16 *data)
232{ 50{
233 (void) REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); 51 if (ah->ah_curchan != NULL)
234 52 return clks / CLOCK_RATE[ath9k_hw_chan2wmode(ah, ah->ah_curchan)];
235 if (!ath9k_hw_wait(ah, 53 else
236 AR_EEPROM_STATUS_DATA, 54 return clks / CLOCK_RATE[ATH9K_MODE_11B];
237 AR_EEPROM_STATUS_DATA_BUSY |
238 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0)) {
239 return false;
240 }
241
242 *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
243 AR_EEPROM_STATUS_DATA_VAL);
244
245 return true;
246} 55}
247 56
248static int ath9k_hw_flash_map(struct ath_hal *ah) 57static u32 ath9k_hw_mac_to_usec(struct ath_hal *ah, u32 clks)
249{ 58{
250 struct ath_hal_5416 *ahp = AH5416(ah); 59 struct ath9k_channel *chan = ah->ah_curchan;
251
252 ahp->ah_cal_mem = ioremap(AR5416_EEPROM_START_ADDR, AR5416_EEPROM_MAX);
253 60
254 if (!ahp->ah_cal_mem) { 61 if (chan && IS_CHAN_HT40(chan))
255 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 62 return ath9k_hw_mac_usec(ah, clks) / 2;
256 "%s: cannot remap eeprom region \n", __func__); 63 else
257 return -EIO; 64 return ath9k_hw_mac_usec(ah, clks);
258 } 65}
259 66
260 return 0; 67static u32 ath9k_hw_mac_clks(struct ath_hal *ah, u32 usecs)
68{
69 if (ah->ah_curchan != NULL)
70 return usecs * CLOCK_RATE[ath9k_hw_chan2wmode(ah,
71 ah->ah_curchan)];
72 else
73 return usecs * CLOCK_RATE[ATH9K_MODE_11B];
261} 74}
262 75
263static bool ath9k_hw_flash_read(struct ath_hal *ah, u32 off, 76static u32 ath9k_hw_mac_to_clks(struct ath_hal *ah, u32 usecs)
264 u16 *data)
265{ 77{
266 struct ath_hal_5416 *ahp = AH5416(ah); 78 struct ath9k_channel *chan = ah->ah_curchan;
267 79
268 *data = ioread16(ahp->ah_cal_mem + off); 80 if (chan && IS_CHAN_HT40(chan))
269 return true; 81 return ath9k_hw_mac_clks(ah, usecs) * 2;
82 else
83 return ath9k_hw_mac_clks(ah, usecs);
270} 84}
271 85
272static void ath9k_hw_read_revisions(struct ath_hal *ah) 86enum wireless_mode ath9k_hw_chan2wmode(struct ath_hal *ah,
87 const struct ath9k_channel *chan)
273{ 88{
274 u32 val; 89 if (IS_CHAN_B(chan))
275 90 return ATH9K_MODE_11B;
276 val = REG_READ(ah, AR_SREV) & AR_SREV_ID; 91 if (IS_CHAN_G(chan))
92 return ATH9K_MODE_11G;
277 93
278 if (val == 0xFF) { 94 return ATH9K_MODE_11A;
279 val = REG_READ(ah, AR_SREV); 95}
280 96
281 ah->ah_macVersion = 97bool ath9k_hw_wait(struct ath_hal *ah, u32 reg, u32 mask, u32 val)
282 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; 98{
99 int i;
283 100
284 ah->ah_macRev = MS(val, AR_SREV_REVISION2); 101 for (i = 0; i < (AH_TIMEOUT / AH_TIME_QUANTUM); i++) {
285 ah->ah_isPciExpress = 102 if ((REG_READ(ah, reg) & mask) == val)
286 (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1; 103 return true;
287 104
288 } else { 105 udelay(AH_TIME_QUANTUM);
289 if (!AR_SREV_9100(ah)) 106 }
290 ah->ah_macVersion = MS(val, AR_SREV_VERSION);
291 107
292 ah->ah_macRev = val & AR_SREV_REVISION; 108 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
109 "timeout on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
110 reg, REG_READ(ah, reg), mask, val);
293 111
294 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) 112 return false;
295 ah->ah_isPciExpress = true;
296 }
297} 113}
298 114
299u32 ath9k_hw_reverse_bits(u32 val, u32 n) 115u32 ath9k_hw_reverse_bits(u32 val, u32 n)
@@ -308,596 +124,215 @@ u32 ath9k_hw_reverse_bits(u32 val, u32 n)
308 return retval; 124 return retval;
309} 125}
310 126
311static void ath9k_hw_set_defaults(struct ath_hal *ah) 127bool ath9k_get_channel_edges(struct ath_hal *ah,
128 u16 flags, u16 *low,
129 u16 *high)
312{ 130{
313 int i; 131 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
314
315 ah->ah_config.dma_beacon_response_time = 2;
316 ah->ah_config.sw_beacon_response_time = 10;
317 ah->ah_config.additional_swba_backoff = 0;
318 ah->ah_config.ack_6mb = 0x0;
319 ah->ah_config.cwm_ignore_extcca = 0;
320 ah->ah_config.pcie_powersave_enable = 0;
321 ah->ah_config.pcie_l1skp_enable = 0;
322 ah->ah_config.pcie_clock_req = 0;
323 ah->ah_config.pcie_power_reset = 0x100;
324 ah->ah_config.pcie_restore = 0;
325 ah->ah_config.pcie_waen = 0;
326 ah->ah_config.analog_shiftreg = 1;
327 ah->ah_config.ht_enable = 1;
328 ah->ah_config.ofdm_trig_low = 200;
329 ah->ah_config.ofdm_trig_high = 500;
330 ah->ah_config.cck_trig_high = 200;
331 ah->ah_config.cck_trig_low = 100;
332 ah->ah_config.enable_ani = 1;
333 ah->ah_config.noise_immunity_level = 4;
334 ah->ah_config.ofdm_weaksignal_det = 1;
335 ah->ah_config.cck_weaksignal_thr = 0;
336 ah->ah_config.spur_immunity_level = 2;
337 ah->ah_config.firstep_level = 0;
338 ah->ah_config.rssi_thr_high = 40;
339 ah->ah_config.rssi_thr_low = 7;
340 ah->ah_config.diversity_control = 0;
341 ah->ah_config.antenna_switch_swap = 0;
342 132
343 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 133 if (flags & CHANNEL_5GHZ) {
344 ah->ah_config.spurchans[i][0] = AR_NO_SPUR; 134 *low = pCap->low_5ghz_chan;
345 ah->ah_config.spurchans[i][1] = AR_NO_SPUR; 135 *high = pCap->high_5ghz_chan;
136 return true;
346 } 137 }
347 138 if ((flags & CHANNEL_2GHZ)) {
348 ah->ah_config.intr_mitigation = 0; 139 *low = pCap->low_2ghz_chan;
349} 140 *high = pCap->high_2ghz_chan;
350 141 return true;
351static void ath9k_hw_override_ini(struct ath_hal *ah,
352 struct ath9k_channel *chan)
353{
354 if (!AR_SREV_5416_V20_OR_LATER(ah)
355 || AR_SREV_9280_10_OR_LATER(ah))
356 return;
357
358 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
359}
360
361static void ath9k_hw_init_bb(struct ath_hal *ah,
362 struct ath9k_channel *chan)
363{
364 u32 synthDelay;
365
366 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
367 if (IS_CHAN_CCK(chan))
368 synthDelay = (4 * synthDelay) / 22;
369 else
370 synthDelay /= 10;
371
372 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
373
374 udelay(synthDelay + BASE_ACTIVATE_DELAY);
375}
376
377static void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
378 enum ath9k_opmode opmode)
379{
380 struct ath_hal_5416 *ahp = AH5416(ah);
381
382 ahp->ah_maskReg = AR_IMR_TXERR |
383 AR_IMR_TXURN |
384 AR_IMR_RXERR |
385 AR_IMR_RXORN |
386 AR_IMR_BCNMISC;
387
388 if (ahp->ah_intrMitigation)
389 ahp->ah_maskReg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
390 else
391 ahp->ah_maskReg |= AR_IMR_RXOK;
392
393 ahp->ah_maskReg |= AR_IMR_TXOK;
394
395 if (opmode == ATH9K_M_HOSTAP)
396 ahp->ah_maskReg |= AR_IMR_MIB;
397
398 REG_WRITE(ah, AR_IMR, ahp->ah_maskReg);
399 REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
400
401 if (!AR_SREV_9100(ah)) {
402 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
403 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
404 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
405 } 142 }
143 return false;
406} 144}
407 145
408static void ath9k_hw_init_qos(struct ath_hal *ah) 146u16 ath9k_hw_computetxtime(struct ath_hal *ah,
409{ 147 struct ath_rate_table *rates,
410 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); 148 u32 frameLen, u16 rateix,
411 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); 149 bool shortPreamble)
412
413 REG_WRITE(ah, AR_QOS_NO_ACK,
414 SM(2, AR_QOS_NO_ACK_TWO_BIT) |
415 SM(5, AR_QOS_NO_ACK_BIT_OFF) |
416 SM(0, AR_QOS_NO_ACK_BYTE_OFF));
417
418 REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL);
419 REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF);
420 REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
421 REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
422 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
423}
424
425static void ath9k_hw_analog_shift_rmw(struct ath_hal *ah,
426 u32 reg,
427 u32 mask,
428 u32 shift,
429 u32 val)
430{
431 u32 regVal;
432
433 regVal = REG_READ(ah, reg) & ~mask;
434 regVal |= (val << shift) & mask;
435
436 REG_WRITE(ah, reg, regVal);
437
438 if (ah->ah_config.analog_shiftreg)
439 udelay(100);
440
441 return;
442}
443
444static u8 ath9k_hw_get_num_ant_config(struct ath_hal_5416 *ahp,
445 enum ieee80211_band freq_band)
446{ 150{
447 struct ar5416_eeprom *eep = &ahp->ah_eeprom; 151 u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime;
448 struct modal_eep_header *pModal = 152 u32 kbps;
449 &(eep->modalHeader[IEEE80211_BAND_5GHZ == freq_band]);
450 struct base_eep_header *pBase = &eep->baseEepHeader;
451 u8 num_ant_config;
452
453 num_ant_config = 1;
454
455 if (pBase->version >= 0x0E0D)
456 if (pModal->useAnt1)
457 num_ant_config += 1;
458
459 return num_ant_config;
460}
461 153
462static int 154 kbps = rates->info[rateix].ratekbps;
463ath9k_hw_get_eeprom_antenna_cfg(struct ath_hal_5416 *ahp,
464 struct ath9k_channel *chan,
465 u8 index,
466 u16 *config)
467{
468 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
469 struct modal_eep_header *pModal =
470 &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
471 struct base_eep_header *pBase = &eep->baseEepHeader;
472 155
473 switch (index) { 156 if (kbps == 0)
474 case 0:
475 *config = pModal->antCtrlCommon & 0xFFFF;
476 return 0; 157 return 0;
477 case 1: 158
478 if (pBase->version >= 0x0E0D) { 159 switch (rates->info[rateix].phy) {
479 if (pModal->useAnt1) { 160 case WLAN_RC_PHY_CCK:
480 *config = 161 phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS;
481 ((pModal->antCtrlCommon & 0xFFFF0000) >> 16); 162 if (shortPreamble && rates->info[rateix].short_preamble)
482 return 0; 163 phyTime >>= 1;
483 } 164 numBits = frameLen << 3;
165 txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps);
166 break;
167 case WLAN_RC_PHY_OFDM:
168 if (ah->ah_curchan && IS_CHAN_QUARTER_RATE(ah->ah_curchan)) {
169 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000;
170 numBits = OFDM_PLCP_BITS + (frameLen << 3);
171 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
172 txTime = OFDM_SIFS_TIME_QUARTER
173 + OFDM_PREAMBLE_TIME_QUARTER
174 + (numSymbols * OFDM_SYMBOL_TIME_QUARTER);
175 } else if (ah->ah_curchan &&
176 IS_CHAN_HALF_RATE(ah->ah_curchan)) {
177 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000;
178 numBits = OFDM_PLCP_BITS + (frameLen << 3);
179 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
180 txTime = OFDM_SIFS_TIME_HALF +
181 OFDM_PREAMBLE_TIME_HALF
182 + (numSymbols * OFDM_SYMBOL_TIME_HALF);
183 } else {
184 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000;
185 numBits = OFDM_PLCP_BITS + (frameLen << 3);
186 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
187 txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME
188 + (numSymbols * OFDM_SYMBOL_TIME);
484 } 189 }
485 break; 190 break;
486 default: 191 default:
192 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
193 "Unknown phy %u (rate ix %u)\n",
194 rates->info[rateix].phy, rateix);
195 txTime = 0;
487 break; 196 break;
488 } 197 }
489 198
490 return -EINVAL; 199 return txTime;
491}
492
493static inline bool ath9k_hw_nvram_read(struct ath_hal *ah,
494 u32 off,
495 u16 *data)
496{
497 if (ath9k_hw_use_flash(ah))
498 return ath9k_hw_flash_read(ah, off, data);
499 else
500 return ath9k_hw_eeprom_read(ah, off, data);
501} 200}
502 201
503static bool ath9k_hw_fill_eeprom(struct ath_hal *ah) 202u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags)
504{ 203{
505 struct ath_hal_5416 *ahp = AH5416(ah); 204 if (flags & CHANNEL_2GHZ) {
506 struct ar5416_eeprom *eep = &ahp->ah_eeprom; 205 if (freq == 2484)
507 u16 *eep_data; 206 return 14;
508 int addr, ar5416_eep_start_loc = 0; 207 if (freq < 2484)
509 208 return (freq - 2407) / 5;
510 if (!ath9k_hw_use_flash(ah)) { 209 else
511 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 210 return 15 + ((freq - 2512) / 20);
512 "%s: Reading from EEPROM, not flash\n", __func__); 211 } else if (flags & CHANNEL_5GHZ) {
513 ar5416_eep_start_loc = 256; 212 if (ath9k_regd_is_public_safety_sku(ah) &&
514 } 213 IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
515 if (AR_SREV_9100(ah)) 214 return ((freq * 10) +
516 ar5416_eep_start_loc = 256; 215 (((freq % 5) == 2) ? 5 : 0) - 49400) / 5;
517 216 } else if ((flags & CHANNEL_A) && (freq <= 5000)) {
518 eep_data = (u16 *) eep; 217 return (freq - 4000) / 5;
519 for (addr = 0; 218 } else {
520 addr < sizeof(struct ar5416_eeprom) / sizeof(u16); 219 return (freq - 5000) / 5;
521 addr++) { 220 }
522 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc, 221 } else {
523 eep_data)) { 222 if (freq == 2484)
524 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 223 return 14;
525 "%s: Unable to read eeprom region \n", 224 if (freq < 2484)
526 __func__); 225 return (freq - 2407) / 5;
527 return false; 226 if (freq < 5000) {
227 if (ath9k_regd_is_public_safety_sku(ah)
228 && IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
229 return ((freq * 10) +
230 (((freq % 5) ==
231 2) ? 5 : 0) - 49400) / 5;
232 } else if (freq > 4900) {
233 return (freq - 4000) / 5;
234 } else {
235 return 15 + ((freq - 2512) / 20);
236 }
528 } 237 }
529 eep_data++; 238 return (freq - 5000) / 5;
530 } 239 }
531 return true;
532} 240}
533 241
534/* XXX: Clean me up, make me more legible */ 242void ath9k_hw_get_channel_centers(struct ath_hal *ah,
535static bool 243 struct ath9k_channel *chan,
536ath9k_hw_eeprom_set_board_values(struct ath_hal *ah, 244 struct chan_centers *centers)
537 struct ath9k_channel *chan)
538{ 245{
539 struct modal_eep_header *pModal; 246 int8_t extoff;
540 int i, regChainOffset;
541 struct ath_hal_5416 *ahp = AH5416(ah); 247 struct ath_hal_5416 *ahp = AH5416(ah);
542 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
543 u8 txRxAttenLocal;
544 u16 ant_config;
545 248
546 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); 249 if (!IS_CHAN_HT40(chan)) {
250 centers->ctl_center = centers->ext_center =
251 centers->synth_center = chan->channel;
252 return;
253 }
547 254
548 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44; 255 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
256 (chan->chanmode == CHANNEL_G_HT40PLUS)) {
257 centers->synth_center =
258 chan->channel + HT40_CHANNEL_CENTER_SHIFT;
259 extoff = 1;
260 } else {
261 centers->synth_center =
262 chan->channel - HT40_CHANNEL_CENTER_SHIFT;
263 extoff = -1;
264 }
549 265
550 ath9k_hw_get_eeprom_antenna_cfg(ahp, chan, 1, &ant_config); 266 centers->ctl_center =
551 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config); 267 centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT);
268 centers->ext_center =
269 centers->synth_center + (extoff *
270 ((ahp->ah_extprotspacing == ATH9K_HT_EXTPROTSPACING_20) ?
271 HT40_CHANNEL_CENTER_SHIFT : 15));
552 272
553 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 273}
554 if (AR_SREV_9280(ah)) {
555 if (i >= 2)
556 break;
557 }
558 274
559 if (AR_SREV_5416_V20_OR_LATER(ah) && 275/******************/
560 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5) 276/* Chip Revisions */
561 && (i != 0)) 277/******************/
562 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
563 else
564 regChainOffset = i * 0x1000;
565
566 REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
567 pModal->antCtrlChain[i]);
568
569 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
570 (REG_READ(ah,
571 AR_PHY_TIMING_CTRL4(0) +
572 regChainOffset) &
573 ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF |
574 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) |
575 SM(pModal->iqCalICh[i],
576 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
577 SM(pModal->iqCalQCh[i],
578 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF));
579
580 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
581 if ((eep->baseEepHeader.version &
582 AR5416_EEP_VER_MINOR_MASK) >=
583 AR5416_EEP_MINOR_VER_3) {
584 txRxAttenLocal = pModal->txRxAttenCh[i];
585 if (AR_SREV_9280_10_OR_LATER(ah)) {
586 REG_RMW_FIELD(ah,
587 AR_PHY_GAIN_2GHZ +
588 regChainOffset,
589 AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN,
590 pModal->
591 bswMargin[i]);
592 REG_RMW_FIELD(ah,
593 AR_PHY_GAIN_2GHZ +
594 regChainOffset,
595 AR_PHY_GAIN_2GHZ_XATTEN1_DB,
596 pModal->
597 bswAtten[i]);
598 REG_RMW_FIELD(ah,
599 AR_PHY_GAIN_2GHZ +
600 regChainOffset,
601 AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN,
602 pModal->
603 xatten2Margin[i]);
604 REG_RMW_FIELD(ah,
605 AR_PHY_GAIN_2GHZ +
606 regChainOffset,
607 AR_PHY_GAIN_2GHZ_XATTEN2_DB,
608 pModal->
609 xatten2Db[i]);
610 } else {
611 REG_WRITE(ah,
612 AR_PHY_GAIN_2GHZ +
613 regChainOffset,
614 (REG_READ(ah,
615 AR_PHY_GAIN_2GHZ +
616 regChainOffset) &
617 ~AR_PHY_GAIN_2GHZ_BSW_MARGIN)
618 | SM(pModal->
619 bswMargin[i],
620 AR_PHY_GAIN_2GHZ_BSW_MARGIN));
621 REG_WRITE(ah,
622 AR_PHY_GAIN_2GHZ +
623 regChainOffset,
624 (REG_READ(ah,
625 AR_PHY_GAIN_2GHZ +
626 regChainOffset) &
627 ~AR_PHY_GAIN_2GHZ_BSW_ATTEN)
628 | SM(pModal->bswAtten[i],
629 AR_PHY_GAIN_2GHZ_BSW_ATTEN));
630 }
631 }
632 if (AR_SREV_9280_10_OR_LATER(ah)) {
633 REG_RMW_FIELD(ah,
634 AR_PHY_RXGAIN +
635 regChainOffset,
636 AR9280_PHY_RXGAIN_TXRX_ATTEN,
637 txRxAttenLocal);
638 REG_RMW_FIELD(ah,
639 AR_PHY_RXGAIN +
640 regChainOffset,
641 AR9280_PHY_RXGAIN_TXRX_MARGIN,
642 pModal->rxTxMarginCh[i]);
643 } else {
644 REG_WRITE(ah,
645 AR_PHY_RXGAIN + regChainOffset,
646 (REG_READ(ah,
647 AR_PHY_RXGAIN +
648 regChainOffset) &
649 ~AR_PHY_RXGAIN_TXRX_ATTEN) |
650 SM(txRxAttenLocal,
651 AR_PHY_RXGAIN_TXRX_ATTEN));
652 REG_WRITE(ah,
653 AR_PHY_GAIN_2GHZ +
654 regChainOffset,
655 (REG_READ(ah,
656 AR_PHY_GAIN_2GHZ +
657 regChainOffset) &
658 ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) |
659 SM(pModal->rxTxMarginCh[i],
660 AR_PHY_GAIN_2GHZ_RXTX_MARGIN));
661 }
662 }
663 }
664 278
665 if (AR_SREV_9280_10_OR_LATER(ah)) { 279static void ath9k_hw_read_revisions(struct ath_hal *ah)
666 if (IS_CHAN_2GHZ(chan)) { 280{
667 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0, 281 u32 val;
668 AR_AN_RF2G1_CH0_OB,
669 AR_AN_RF2G1_CH0_OB_S,
670 pModal->ob);
671 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0,
672 AR_AN_RF2G1_CH0_DB,
673 AR_AN_RF2G1_CH0_DB_S,
674 pModal->db);
675 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
676 AR_AN_RF2G1_CH1_OB,
677 AR_AN_RF2G1_CH1_OB_S,
678 pModal->ob_ch1);
679 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1,
680 AR_AN_RF2G1_CH1_DB,
681 AR_AN_RF2G1_CH1_DB_S,
682 pModal->db_ch1);
683 } else {
684 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
685 AR_AN_RF5G1_CH0_OB5,
686 AR_AN_RF5G1_CH0_OB5_S,
687 pModal->ob);
688 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0,
689 AR_AN_RF5G1_CH0_DB5,
690 AR_AN_RF5G1_CH0_DB5_S,
691 pModal->db);
692 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
693 AR_AN_RF5G1_CH1_OB5,
694 AR_AN_RF5G1_CH1_OB5_S,
695 pModal->ob_ch1);
696 ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1,
697 AR_AN_RF5G1_CH1_DB5,
698 AR_AN_RF5G1_CH1_DB5_S,
699 pModal->db_ch1);
700 }
701 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
702 AR_AN_TOP2_XPABIAS_LVL,
703 AR_AN_TOP2_XPABIAS_LVL_S,
704 pModal->xpaBiasLvl);
705 ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2,
706 AR_AN_TOP2_LOCALBIAS,
707 AR_AN_TOP2_LOCALBIAS_S,
708 pModal->local_bias);
709 DPRINTF(ah->ah_sc, ATH_DBG_ANY, "ForceXPAon: %d\n",
710 pModal->force_xpaon);
711 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
712 pModal->force_xpaon);
713 }
714 282
715 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, 283 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
716 pModal->switchSettling);
717 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC,
718 pModal->adcDesiredSize);
719 284
720 if (!AR_SREV_9280_10_OR_LATER(ah)) 285 if (val == 0xFF) {
721 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, 286 val = REG_READ(ah, AR_SREV);
722 AR_PHY_DESIRED_SZ_PGA, 287 ah->ah_macVersion = (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
723 pModal->pgaDesiredSize); 288 ah->ah_macRev = MS(val, AR_SREV_REVISION2);
724 289 ah->ah_isPciExpress = (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
725 REG_WRITE(ah, AR_PHY_RF_CTL4,
726 SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF)
727 | SM(pModal->txEndToXpaOff,
728 AR_PHY_RF_CTL4_TX_END_XPAB_OFF)
729 | SM(pModal->txFrameToXpaOn,
730 AR_PHY_RF_CTL4_FRAME_XPAA_ON)
731 | SM(pModal->txFrameToXpaOn,
732 AR_PHY_RF_CTL4_FRAME_XPAB_ON));
733
734 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
735 pModal->txEndToRxOn);
736 if (AR_SREV_9280_10_OR_LATER(ah)) {
737 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
738 pModal->thresh62);
739 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0,
740 AR_PHY_EXT_CCA0_THRESH62,
741 pModal->thresh62);
742 } else { 290 } else {
743 REG_RMW_FIELD(ah, AR_PHY_CCA, AR_PHY_CCA_THRESH62, 291 if (!AR_SREV_9100(ah))
744 pModal->thresh62); 292 ah->ah_macVersion = MS(val, AR_SREV_VERSION);
745 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA,
746 AR_PHY_EXT_CCA_THRESH62,
747 pModal->thresh62);
748 }
749 293
750 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 294 ah->ah_macRev = val & AR_SREV_REVISION;
751 AR5416_EEP_MINOR_VER_2) {
752 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
753 AR_PHY_TX_END_DATA_START,
754 pModal->txFrameToDataStart);
755 REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
756 pModal->txFrameToPaOn);
757 }
758 295
759 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 296 if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE)
760 AR5416_EEP_MINOR_VER_3) { 297 ah->ah_isPciExpress = true;
761 if (IS_CHAN_HT40(chan))
762 REG_RMW_FIELD(ah, AR_PHY_SETTLING,
763 AR_PHY_SETTLING_SWITCH,
764 pModal->swSettleHt40);
765 } 298 }
766
767 return true;
768} 299}
769 300
770static int ath9k_hw_check_eeprom(struct ath_hal *ah) 301static int ath9k_hw_get_radiorev(struct ath_hal *ah)
771{ 302{
772 u32 sum = 0, el; 303 u32 val;
773 u16 *eepdata;
774 int i; 304 int i;
775 struct ath_hal_5416 *ahp = AH5416(ah);
776 bool need_swap = false;
777 struct ar5416_eeprom *eep =
778 (struct ar5416_eeprom *) &ahp->ah_eeprom;
779
780 if (!ath9k_hw_use_flash(ah)) {
781 u16 magic, magic2;
782 int addr;
783
784 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
785 &magic)) {
786 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
787 "%s: Reading Magic # failed\n", __func__);
788 return false;
789 }
790 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "%s: Read Magic = 0x%04X\n",
791 __func__, magic);
792
793 if (magic != AR5416_EEPROM_MAGIC) {
794 magic2 = swab16(magic);
795
796 if (magic2 == AR5416_EEPROM_MAGIC) {
797 need_swap = true;
798 eepdata = (u16 *) (&ahp->ah_eeprom);
799
800 for (addr = 0;
801 addr <
802 sizeof(struct ar5416_eeprom) /
803 sizeof(u16); addr++) {
804 u16 temp;
805
806 temp = swab16(*eepdata);
807 *eepdata = temp;
808 eepdata++;
809
810 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
811 "0x%04X ", *eepdata);
812 if (((addr + 1) % 6) == 0)
813 DPRINTF(ah->ah_sc,
814 ATH_DBG_EEPROM,
815 "\n");
816 }
817 } else {
818 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
819 "Invalid EEPROM Magic. "
820 "endianness missmatch.\n");
821 return -EINVAL;
822 }
823 }
824 }
825 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n",
826 need_swap ? "True" : "False");
827
828 if (need_swap)
829 el = swab16(ahp->ah_eeprom.baseEepHeader.length);
830 else
831 el = ahp->ah_eeprom.baseEepHeader.length;
832
833 if (el > sizeof(struct ar5416_eeprom))
834 el = sizeof(struct ar5416_eeprom) / sizeof(u16);
835 else
836 el = el / sizeof(u16);
837
838 eepdata = (u16 *) (&ahp->ah_eeprom);
839
840 for (i = 0; i < el; i++)
841 sum ^= *eepdata++;
842
843 if (need_swap) {
844 u32 integer, j;
845 u16 word;
846 305
847 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 306 REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
848 "EEPROM Endianness is not native.. Changing \n");
849
850 word = swab16(eep->baseEepHeader.length);
851 eep->baseEepHeader.length = word;
852
853 word = swab16(eep->baseEepHeader.checksum);
854 eep->baseEepHeader.checksum = word;
855
856 word = swab16(eep->baseEepHeader.version);
857 eep->baseEepHeader.version = word;
858
859 word = swab16(eep->baseEepHeader.regDmn[0]);
860 eep->baseEepHeader.regDmn[0] = word;
861
862 word = swab16(eep->baseEepHeader.regDmn[1]);
863 eep->baseEepHeader.regDmn[1] = word;
864
865 word = swab16(eep->baseEepHeader.rfSilent);
866 eep->baseEepHeader.rfSilent = word;
867
868 word = swab16(eep->baseEepHeader.blueToothOptions);
869 eep->baseEepHeader.blueToothOptions = word;
870 307
871 word = swab16(eep->baseEepHeader.deviceCap); 308 for (i = 0; i < 8; i++)
872 eep->baseEepHeader.deviceCap = word; 309 REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
310 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
311 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
873 312
874 for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) { 313 return ath9k_hw_reverse_bits(val, 8);
875 struct modal_eep_header *pModal = 314}
876 &eep->modalHeader[j];
877 integer = swab32(pModal->antCtrlCommon);
878 pModal->antCtrlCommon = integer;
879 315
880 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 316/************************************/
881 integer = swab32(pModal->antCtrlChain[i]); 317/* HW Attach, Detach, Init Routines */
882 pModal->antCtrlChain[i] = integer; 318/************************************/
883 }
884 319
885 for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) { 320static void ath9k_hw_disablepcie(struct ath_hal *ah)
886 word = swab16(pModal->spurChans[i].spurChan); 321{
887 pModal->spurChans[i].spurChan = word; 322 if (!AR_SREV_9100(ah))
888 } 323 return;
889 }
890 }
891 324
892 if (sum != 0xffff || ar5416_get_eep_ver(ahp) != AR5416_EEP_VER || 325 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
893 ar5416_get_eep_rev(ahp) < AR5416_EEP_NO_BACK_VER) { 326 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
894 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 327 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
895 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 328 REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824);
896 sum, ar5416_get_eep_ver(ahp)); 329 REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579);
897 return -EINVAL; 330 REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000);
898 } 331 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
332 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
333 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
899 334
900 return 0; 335 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
901} 336}
902 337
903static bool ath9k_hw_chip_test(struct ath_hal *ah) 338static bool ath9k_hw_chip_test(struct ath_hal *ah)
@@ -905,9 +340,9 @@ static bool ath9k_hw_chip_test(struct ath_hal *ah)
905 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) }; 340 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
906 u32 regHold[2]; 341 u32 regHold[2];
907 u32 patternData[4] = { 0x55555555, 342 u32 patternData[4] = { 0x55555555,
908 0xaaaaaaaa, 343 0xaaaaaaaa,
909 0x66666666, 344 0x66666666,
910 0x99999999 }; 345 0x99999999 };
911 int i, j; 346 int i, j;
912 347
913 for (i = 0; i < 2; i++) { 348 for (i = 0; i < 2; i++) {
@@ -921,9 +356,9 @@ static bool ath9k_hw_chip_test(struct ath_hal *ah)
921 rdData = REG_READ(ah, addr); 356 rdData = REG_READ(ah, addr);
922 if (rdData != wrData) { 357 if (rdData != wrData) {
923 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, 358 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
924 "%s: address test failed " 359 "address test failed "
925 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", 360 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
926 __func__, addr, wrData, rdData); 361 addr, wrData, rdData);
927 return false; 362 return false;
928 } 363 }
929 } 364 }
@@ -933,9 +368,9 @@ static bool ath9k_hw_chip_test(struct ath_hal *ah)
933 rdData = REG_READ(ah, addr); 368 rdData = REG_READ(ah, addr);
934 if (wrData != rdData) { 369 if (wrData != rdData) {
935 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, 370 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
936 "%s: address test failed " 371 "address test failed "
937 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", 372 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
938 __func__, addr, wrData, rdData); 373 addr, wrData, rdData);
939 return false; 374 return false;
940 } 375 }
941 } 376 }
@@ -945,213 +380,65 @@ static bool ath9k_hw_chip_test(struct ath_hal *ah)
945 return true; 380 return true;
946} 381}
947 382
948u32 ath9k_hw_getrxfilter(struct ath_hal *ah) 383static const char *ath9k_hw_devname(u16 devid)
949{
950 u32 bits = REG_READ(ah, AR_RX_FILTER);
951 u32 phybits = REG_READ(ah, AR_PHY_ERR);
952
953 if (phybits & AR_PHY_ERR_RADAR)
954 bits |= ATH9K_RX_FILTER_PHYRADAR;
955 if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING))
956 bits |= ATH9K_RX_FILTER_PHYERR;
957 return bits;
958}
959
960void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits)
961{
962 u32 phybits;
963
964 REG_WRITE(ah, AR_RX_FILTER, (bits & 0xffff) | AR_RX_COMPR_BAR);
965 phybits = 0;
966 if (bits & ATH9K_RX_FILTER_PHYRADAR)
967 phybits |= AR_PHY_ERR_RADAR;
968 if (bits & ATH9K_RX_FILTER_PHYERR)
969 phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING;
970 REG_WRITE(ah, AR_PHY_ERR, phybits);
971
972 if (phybits)
973 REG_WRITE(ah, AR_RXCFG,
974 REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
975 else
976 REG_WRITE(ah, AR_RXCFG,
977 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
978}
979
980bool ath9k_hw_setcapability(struct ath_hal *ah,
981 enum ath9k_capability_type type,
982 u32 capability,
983 u32 setting,
984 int *status)
985{
986 struct ath_hal_5416 *ahp = AH5416(ah);
987 u32 v;
988
989 switch (type) {
990 case ATH9K_CAP_TKIP_MIC:
991 if (setting)
992 ahp->ah_staId1Defaults |=
993 AR_STA_ID1_CRPT_MIC_ENABLE;
994 else
995 ahp->ah_staId1Defaults &=
996 ~AR_STA_ID1_CRPT_MIC_ENABLE;
997 return true;
998 case ATH9K_CAP_DIVERSITY:
999 v = REG_READ(ah, AR_PHY_CCK_DETECT);
1000 if (setting)
1001 v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
1002 else
1003 v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
1004 REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
1005 return true;
1006 case ATH9K_CAP_MCAST_KEYSRCH:
1007 if (setting)
1008 ahp->ah_staId1Defaults |= AR_STA_ID1_MCAST_KSRCH;
1009 else
1010 ahp->ah_staId1Defaults &= ~AR_STA_ID1_MCAST_KSRCH;
1011 return true;
1012 case ATH9K_CAP_TSF_ADJUST:
1013 if (setting)
1014 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
1015 else
1016 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
1017 return true;
1018 default:
1019 return false;
1020 }
1021}
1022
1023void ath9k_hw_dmaRegDump(struct ath_hal *ah)
1024{ 384{
1025 u32 val[ATH9K_NUM_DMA_DEBUG_REGS]; 385 switch (devid) {
1026 int qcuOffset = 0, dcuOffset = 0; 386 case AR5416_DEVID_PCI:
1027 u32 *qcuBase = &val[0], *dcuBase = &val[4]; 387 return "Atheros 5416";
1028 int i; 388 case AR5416_DEVID_PCIE:
1029 389 return "Atheros 5418";
1030 REG_WRITE(ah, AR_MACMISC, 390 case AR9160_DEVID_PCI:
1031 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | 391 return "Atheros 9160";
1032 (AR_MACMISC_MISC_OBS_BUS_1 << 392 case AR9280_DEVID_PCI:
1033 AR_MACMISC_MISC_OBS_BUS_MSB_S))); 393 case AR9280_DEVID_PCIE:
1034 394 return "Atheros 9280";
1035 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "Raw DMA Debug values:\n"); 395 case AR9285_DEVID_PCIE:
1036 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) { 396 return "Atheros 9285";
1037 if (i % 4 == 0)
1038 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
1039
1040 val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32)));
1041 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "%d: %08x ", i, val[i]);
1042 }
1043
1044 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n\n");
1045 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1046 "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
1047
1048 for (i = 0; i < ATH9K_NUM_QUEUES;
1049 i++, qcuOffset += 4, dcuOffset += 5) {
1050 if (i == 8) {
1051 qcuOffset = 0;
1052 qcuBase++;
1053 }
1054
1055 if (i == 6) {
1056 dcuOffset = 0;
1057 dcuBase++;
1058 }
1059
1060 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1061 "%2d %2x %1x %2x %2x\n",
1062 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
1063 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset +
1064 3),
1065 val[2] & (0x7 << (i * 3)) >> (i * 3),
1066 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
1067 } 397 }
1068 398
1069 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n"); 399 return NULL;
1070 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1071 "qcu_stitch state: %2x qcu_fetch state: %2x\n",
1072 (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
1073 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1074 "qcu_complete state: %2x dcu_complete state: %2x\n",
1075 (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
1076 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1077 "dcu_arb state: %2x dcu_fp state: %2x\n",
1078 (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
1079 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1080 "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
1081 (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
1082 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1083 "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
1084 (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
1085 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1086 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
1087 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
1088
1089 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "pcu observe 0x%x \n",
1090 REG_READ(ah, AR_OBS_BUS_1));
1091 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1092 "AR_CR 0x%x \n", REG_READ(ah, AR_CR));
1093} 400}
1094 401
1095u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah, 402static void ath9k_hw_set_defaults(struct ath_hal *ah)
1096 u32 *rxc_pcnt,
1097 u32 *rxf_pcnt,
1098 u32 *txf_pcnt)
1099{ 403{
1100 static u32 cycles, rx_clear, rx_frame, tx_frame; 404 int i;
1101 u32 good = 1;
1102 405
1103 u32 rc = REG_READ(ah, AR_RCCNT); 406 ah->ah_config.dma_beacon_response_time = 2;
1104 u32 rf = REG_READ(ah, AR_RFCNT); 407 ah->ah_config.sw_beacon_response_time = 10;
1105 u32 tf = REG_READ(ah, AR_TFCNT); 408 ah->ah_config.additional_swba_backoff = 0;
1106 u32 cc = REG_READ(ah, AR_CCCNT); 409 ah->ah_config.ack_6mb = 0x0;
410 ah->ah_config.cwm_ignore_extcca = 0;
411 ah->ah_config.pcie_powersave_enable = 0;
412 ah->ah_config.pcie_l1skp_enable = 0;
413 ah->ah_config.pcie_clock_req = 0;
414 ah->ah_config.pcie_power_reset = 0x100;
415 ah->ah_config.pcie_restore = 0;
416 ah->ah_config.pcie_waen = 0;
417 ah->ah_config.analog_shiftreg = 1;
418 ah->ah_config.ht_enable = 1;
419 ah->ah_config.ofdm_trig_low = 200;
420 ah->ah_config.ofdm_trig_high = 500;
421 ah->ah_config.cck_trig_high = 200;
422 ah->ah_config.cck_trig_low = 100;
423 ah->ah_config.enable_ani = 1;
424 ah->ah_config.noise_immunity_level = 4;
425 ah->ah_config.ofdm_weaksignal_det = 1;
426 ah->ah_config.cck_weaksignal_thr = 0;
427 ah->ah_config.spur_immunity_level = 2;
428 ah->ah_config.firstep_level = 0;
429 ah->ah_config.rssi_thr_high = 40;
430 ah->ah_config.rssi_thr_low = 7;
431 ah->ah_config.diversity_control = 0;
432 ah->ah_config.antenna_switch_swap = 0;
1107 433
1108 if (cycles == 0 || cycles > cc) { 434 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
1109 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, 435 ah->ah_config.spurchans[i][0] = AR_NO_SPUR;
1110 "%s: cycle counter wrap. ExtBusy = 0\n", 436 ah->ah_config.spurchans[i][1] = AR_NO_SPUR;
1111 __func__);
1112 good = 0;
1113 } else {
1114 u32 cc_d = cc - cycles;
1115 u32 rc_d = rc - rx_clear;
1116 u32 rf_d = rf - rx_frame;
1117 u32 tf_d = tf - tx_frame;
1118
1119 if (cc_d != 0) {
1120 *rxc_pcnt = rc_d * 100 / cc_d;
1121 *rxf_pcnt = rf_d * 100 / cc_d;
1122 *txf_pcnt = tf_d * 100 / cc_d;
1123 } else {
1124 good = 0;
1125 }
1126 } 437 }
1127 438
1128 cycles = cc; 439 ah->ah_config.intr_mitigation = 1;
1129 rx_frame = rf;
1130 rx_clear = rc;
1131 tx_frame = tf;
1132
1133 return good;
1134} 440}
1135 441
1136void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode)
1137{
1138 u32 macmode;
1139
1140 if (mode == ATH9K_HT_MACMODE_2040 &&
1141 !ah->ah_config.cwm_ignore_extcca)
1142 macmode = AR_2040_JOINED_RX_CLEAR;
1143 else
1144 macmode = 0;
1145
1146 REG_WRITE(ah, AR_2040_MODE, macmode);
1147}
1148
1149static void ath9k_hw_mark_phy_inactive(struct ath_hal *ah)
1150{
1151 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
1152}
1153
1154
1155static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid, 442static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid,
1156 struct ath_softc *sc, 443 struct ath_softc *sc,
1157 void __iomem *mem, 444 void __iomem *mem,
@@ -1165,20 +452,16 @@ static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid,
1165 ahp = kzalloc(sizeof(struct ath_hal_5416), GFP_KERNEL); 452 ahp = kzalloc(sizeof(struct ath_hal_5416), GFP_KERNEL);
1166 if (ahp == NULL) { 453 if (ahp == NULL) {
1167 DPRINTF(sc, ATH_DBG_FATAL, 454 DPRINTF(sc, ATH_DBG_FATAL,
1168 "%s: cannot allocate memory for state block\n", 455 "Cannot allocate memory for state block\n");
1169 __func__);
1170 *status = -ENOMEM; 456 *status = -ENOMEM;
1171 return NULL; 457 return NULL;
1172 } 458 }
1173 459
1174 ah = &ahp->ah; 460 ah = &ahp->ah;
1175
1176 ah->ah_sc = sc; 461 ah->ah_sc = sc;
1177 ah->ah_sh = mem; 462 ah->ah_sh = mem;
1178
1179 ah->ah_magic = AR5416_MAGIC; 463 ah->ah_magic = AR5416_MAGIC;
1180 ah->ah_countryCode = CTRY_DEFAULT; 464 ah->ah_countryCode = CTRY_DEFAULT;
1181
1182 ah->ah_devid = devid; 465 ah->ah_devid = devid;
1183 ah->ah_subvendorid = 0; 466 ah->ah_subvendorid = 0;
1184 467
@@ -1190,12 +473,10 @@ static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid,
1190 473
1191 ah->ah_powerLimit = MAX_RATE_POWER; 474 ah->ah_powerLimit = MAX_RATE_POWER;
1192 ah->ah_tpScale = ATH9K_TP_SCALE_MAX; 475 ah->ah_tpScale = ATH9K_TP_SCALE_MAX;
1193
1194 ahp->ah_atimWindow = 0; 476 ahp->ah_atimWindow = 0;
1195 ahp->ah_diversityControl = ah->ah_config.diversity_control; 477 ahp->ah_diversityControl = ah->ah_config.diversity_control;
1196 ahp->ah_antennaSwitchSwap = 478 ahp->ah_antennaSwitchSwap =
1197 ah->ah_config.antenna_switch_swap; 479 ah->ah_config.antenna_switch_swap;
1198
1199 ahp->ah_staId1Defaults = AR_STA_ID1_CRPT_MIC_ENABLE; 480 ahp->ah_staId1Defaults = AR_STA_ID1_CRPT_MIC_ENABLE;
1200 ahp->ah_beaconInterval = 100; 481 ahp->ah_beaconInterval = 100;
1201 ahp->ah_enable32kHzClock = DONT_USE_32KHZ; 482 ahp->ah_enable32kHzClock = DONT_USE_32KHZ;
@@ -1210,163 +491,6 @@ static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid,
1210 return ahp; 491 return ahp;
1211} 492}
1212 493
1213static int ath9k_hw_eeprom_attach(struct ath_hal *ah)
1214{
1215 int status;
1216
1217 if (ath9k_hw_use_flash(ah))
1218 ath9k_hw_flash_map(ah);
1219
1220 if (!ath9k_hw_fill_eeprom(ah))
1221 return -EIO;
1222
1223 status = ath9k_hw_check_eeprom(ah);
1224
1225 return status;
1226}
1227
1228u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp,
1229 enum eeprom_param param)
1230{
1231 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
1232 struct modal_eep_header *pModal = eep->modalHeader;
1233 struct base_eep_header *pBase = &eep->baseEepHeader;
1234
1235 switch (param) {
1236 case EEP_NFTHRESH_5:
1237 return -pModal[0].noiseFloorThreshCh[0];
1238 case EEP_NFTHRESH_2:
1239 return -pModal[1].noiseFloorThreshCh[0];
1240 case AR_EEPROM_MAC(0):
1241 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
1242 case AR_EEPROM_MAC(1):
1243 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
1244 case AR_EEPROM_MAC(2):
1245 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
1246 case EEP_REG_0:
1247 return pBase->regDmn[0];
1248 case EEP_REG_1:
1249 return pBase->regDmn[1];
1250 case EEP_OP_CAP:
1251 return pBase->deviceCap;
1252 case EEP_OP_MODE:
1253 return pBase->opCapFlags;
1254 case EEP_RF_SILENT:
1255 return pBase->rfSilent;
1256 case EEP_OB_5:
1257 return pModal[0].ob;
1258 case EEP_DB_5:
1259 return pModal[0].db;
1260 case EEP_OB_2:
1261 return pModal[1].ob;
1262 case EEP_DB_2:
1263 return pModal[1].db;
1264 case EEP_MINOR_REV:
1265 return pBase->version & AR5416_EEP_VER_MINOR_MASK;
1266 case EEP_TX_MASK:
1267 return pBase->txMask;
1268 case EEP_RX_MASK:
1269 return pBase->rxMask;
1270 default:
1271 return 0;
1272 }
1273}
1274
1275static int ath9k_hw_get_radiorev(struct ath_hal *ah)
1276{
1277 u32 val;
1278 int i;
1279
1280 REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
1281 for (i = 0; i < 8; i++)
1282 REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
1283 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
1284 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
1285 return ath9k_hw_reverse_bits(val, 8);
1286}
1287
1288static int ath9k_hw_init_macaddr(struct ath_hal *ah)
1289{
1290 u32 sum;
1291 int i;
1292 u16 eeval;
1293 struct ath_hal_5416 *ahp = AH5416(ah);
1294 DECLARE_MAC_BUF(mac);
1295
1296 sum = 0;
1297 for (i = 0; i < 3; i++) {
1298 eeval = ath9k_hw_get_eeprom(ahp, AR_EEPROM_MAC(i));
1299 sum += eeval;
1300 ahp->ah_macaddr[2 * i] = eeval >> 8;
1301 ahp->ah_macaddr[2 * i + 1] = eeval & 0xff;
1302 }
1303 if (sum == 0 || sum == 0xffff * 3) {
1304 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1305 "%s: mac address read failed: %s\n", __func__,
1306 print_mac(mac, ahp->ah_macaddr));
1307 return -EADDRNOTAVAIL;
1308 }
1309
1310 return 0;
1311}
1312
1313static inline int16_t ath9k_hw_interpolate(u16 target,
1314 u16 srcLeft,
1315 u16 srcRight,
1316 int16_t targetLeft,
1317 int16_t targetRight)
1318{
1319 int16_t rv;
1320
1321 if (srcRight == srcLeft) {
1322 rv = targetLeft;
1323 } else {
1324 rv = (int16_t) (((target - srcLeft) * targetRight +
1325 (srcRight - target) * targetLeft) /
1326 (srcRight - srcLeft));
1327 }
1328 return rv;
1329}
1330
1331static inline u16 ath9k_hw_fbin2freq(u8 fbin,
1332 bool is2GHz)
1333{
1334
1335 if (fbin == AR5416_BCHAN_UNUSED)
1336 return fbin;
1337
1338 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
1339}
1340
1341static u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah,
1342 u16 i,
1343 bool is2GHz)
1344{
1345 struct ath_hal_5416 *ahp = AH5416(ah);
1346 struct ar5416_eeprom *eep =
1347 (struct ar5416_eeprom *) &ahp->ah_eeprom;
1348 u16 spur_val = AR_NO_SPUR;
1349
1350 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
1351 "Getting spur idx %d is2Ghz. %d val %x\n",
1352 i, is2GHz, ah->ah_config.spurchans[i][is2GHz]);
1353
1354 switch (ah->ah_config.spurmode) {
1355 case SPUR_DISABLE:
1356 break;
1357 case SPUR_ENABLE_IOCTL:
1358 spur_val = ah->ah_config.spurchans[i][is2GHz];
1359 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
1360 "Getting spur val from new loc. %d\n", spur_val);
1361 break;
1362 case SPUR_ENABLE_EEPROM:
1363 spur_val = eep->modalHeader[is2GHz].spurChans[i].spurChan;
1364 break;
1365
1366 }
1367 return spur_val;
1368}
1369
1370static int ath9k_hw_rfattach(struct ath_hal *ah) 494static int ath9k_hw_rfattach(struct ath_hal *ah)
1371{ 495{
1372 bool rfStatus = false; 496 bool rfStatus = false;
@@ -1375,8 +499,7 @@ static int ath9k_hw_rfattach(struct ath_hal *ah)
1375 rfStatus = ath9k_hw_init_rf(ah, &ecode); 499 rfStatus = ath9k_hw_init_rf(ah, &ecode);
1376 if (!rfStatus) { 500 if (!rfStatus) {
1377 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 501 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
1378 "%s: RF setup failed, status %u\n", __func__, 502 "RF setup failed, status %u\n", ecode);
1379 ecode);
1380 return ecode; 503 return ecode;
1381 } 504 }
1382 505
@@ -1401,9 +524,9 @@ static int ath9k_hw_rf_claim(struct ath_hal *ah)
1401 break; 524 break;
1402 default: 525 default:
1403 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, 526 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1404 "%s: 5G Radio Chip Rev 0x%02X is not " 527 "5G Radio Chip Rev 0x%02X is not "
1405 "supported by this driver\n", 528 "supported by this driver\n",
1406 __func__, ah->ah_analog5GhzRev); 529 ah->ah_analog5GhzRev);
1407 return -EOPNOTSUPP; 530 return -EOPNOTSUPP;
1408 } 531 }
1409 532
@@ -1412,1473 +535,76 @@ static int ath9k_hw_rf_claim(struct ath_hal *ah)
1412 return 0; 535 return 0;
1413} 536}
1414 537
1415static void ath9k_hw_init_pll(struct ath_hal *ah, 538static int ath9k_hw_init_macaddr(struct ath_hal *ah)
1416 struct ath9k_channel *chan)
1417{
1418 u32 pll;
1419
1420 if (AR_SREV_9100(ah)) {
1421 if (chan && IS_CHAN_5GHZ(chan))
1422 pll = 0x1450;
1423 else
1424 pll = 0x1458;
1425 } else {
1426 if (AR_SREV_9280_10_OR_LATER(ah)) {
1427 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1428
1429 if (chan && IS_CHAN_HALF_RATE(chan))
1430 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1431 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1432 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1433
1434 if (chan && IS_CHAN_5GHZ(chan)) {
1435 pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
1436
1437
1438 if (AR_SREV_9280_20(ah)) {
1439 if (((chan->channel % 20) == 0)
1440 || ((chan->channel % 10) == 0))
1441 pll = 0x2850;
1442 else
1443 pll = 0x142c;
1444 }
1445 } else {
1446 pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
1447 }
1448
1449 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
1450
1451 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1452
1453 if (chan && IS_CHAN_HALF_RATE(chan))
1454 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1455 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1456 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1457
1458 if (chan && IS_CHAN_5GHZ(chan))
1459 pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
1460 else
1461 pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
1462 } else {
1463 pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
1464
1465 if (chan && IS_CHAN_HALF_RATE(chan))
1466 pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
1467 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1468 pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
1469
1470 if (chan && IS_CHAN_5GHZ(chan))
1471 pll |= SM(0xa, AR_RTC_PLL_DIV);
1472 else
1473 pll |= SM(0xb, AR_RTC_PLL_DIV);
1474 }
1475 }
1476 REG_WRITE(ah, (u16) (AR_RTC_PLL_CONTROL), pll);
1477
1478 udelay(RTC_PLL_SETTLE_DELAY);
1479
1480 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
1481}
1482
1483static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan,
1484 enum ath9k_ht_macmode macmode)
1485{
1486 u32 phymode;
1487 struct ath_hal_5416 *ahp = AH5416(ah);
1488
1489 phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
1490 | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH;
1491
1492 if (IS_CHAN_HT40(chan)) {
1493 phymode |= AR_PHY_FC_DYN2040_EN;
1494
1495 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
1496 (chan->chanmode == CHANNEL_G_HT40PLUS))
1497 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
1498
1499 if (ahp->ah_extprotspacing == ATH9K_HT_EXTPROTSPACING_25)
1500 phymode |= AR_PHY_FC_DYN2040_EXT_CH;
1501 }
1502 REG_WRITE(ah, AR_PHY_TURBO, phymode);
1503
1504 ath9k_hw_set11nmac2040(ah, macmode);
1505
1506 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
1507 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
1508}
1509
1510static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode)
1511{
1512 u32 val;
1513
1514 val = REG_READ(ah, AR_STA_ID1);
1515 val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
1516 switch (opmode) {
1517 case ATH9K_M_HOSTAP:
1518 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
1519 | AR_STA_ID1_KSRCH_MODE);
1520 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1521 break;
1522 case ATH9K_M_IBSS:
1523 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
1524 | AR_STA_ID1_KSRCH_MODE);
1525 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1526 break;
1527 case ATH9K_M_STA:
1528 case ATH9K_M_MONITOR:
1529 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
1530 break;
1531 }
1532}
1533
1534static void
1535ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan)
1536{
1537 u32 rfMode = 0;
1538
1539 if (chan == NULL)
1540 return;
1541
1542 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
1543 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
1544
1545 if (!AR_SREV_9280_10_OR_LATER(ah))
1546 rfMode |= (IS_CHAN_5GHZ(chan)) ? AR_PHY_MODE_RF5GHZ :
1547 AR_PHY_MODE_RF2GHZ;
1548
1549 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
1550 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
1551
1552 REG_WRITE(ah, AR_PHY_MODE, rfMode);
1553}
1554
1555static bool ath9k_hw_set_reset(struct ath_hal *ah, int type)
1556{
1557 u32 rst_flags;
1558 u32 tmpReg;
1559
1560 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1561 AR_RTC_FORCE_WAKE_ON_INT);
1562
1563 if (AR_SREV_9100(ah)) {
1564 rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
1565 AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
1566 } else {
1567 tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
1568 if (tmpReg &
1569 (AR_INTR_SYNC_LOCAL_TIMEOUT |
1570 AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
1571 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
1572 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
1573 } else {
1574 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1575 }
1576
1577 rst_flags = AR_RTC_RC_MAC_WARM;
1578 if (type == ATH9K_RESET_COLD)
1579 rst_flags |= AR_RTC_RC_MAC_COLD;
1580 }
1581
1582 REG_WRITE(ah, (u16) (AR_RTC_RC), rst_flags);
1583 udelay(50);
1584
1585 REG_WRITE(ah, (u16) (AR_RTC_RC), 0);
1586 if (!ath9k_hw_wait(ah, (u16) (AR_RTC_RC), AR_RTC_RC_M, 0)) {
1587 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
1588 "%s: RTC stuck in MAC reset\n",
1589 __func__);
1590 return false;
1591 }
1592
1593 if (!AR_SREV_9100(ah))
1594 REG_WRITE(ah, AR_RC, 0);
1595
1596 ath9k_hw_init_pll(ah, NULL);
1597
1598 if (AR_SREV_9100(ah))
1599 udelay(50);
1600
1601 return true;
1602}
1603
1604static bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
1605{
1606 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1607 AR_RTC_FORCE_WAKE_ON_INT);
1608
1609 REG_WRITE(ah, (u16) (AR_RTC_RESET), 0);
1610 REG_WRITE(ah, (u16) (AR_RTC_RESET), 1);
1611
1612 if (!ath9k_hw_wait(ah,
1613 AR_RTC_STATUS,
1614 AR_RTC_STATUS_M,
1615 AR_RTC_STATUS_ON)) {
1616 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: RTC not waking up\n",
1617 __func__);
1618 return false;
1619 }
1620
1621 ath9k_hw_read_revisions(ah);
1622
1623 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
1624}
1625
1626static bool ath9k_hw_set_reset_reg(struct ath_hal *ah,
1627 u32 type)
1628{
1629 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
1630 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
1631
1632 switch (type) {
1633 case ATH9K_RESET_POWER_ON:
1634 return ath9k_hw_set_reset_power_on(ah);
1635 break;
1636 case ATH9K_RESET_WARM:
1637 case ATH9K_RESET_COLD:
1638 return ath9k_hw_set_reset(ah, type);
1639 break;
1640 default:
1641 return false;
1642 }
1643}
1644
1645static
1646struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah,
1647 struct ath9k_channel *chan)
1648{
1649 if (!(IS_CHAN_2GHZ(chan) ^ IS_CHAN_5GHZ(chan))) {
1650 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1651 "%s: invalid channel %u/0x%x; not marked as "
1652 "2GHz or 5GHz\n", __func__, chan->channel,
1653 chan->channelFlags);
1654 return NULL;
1655 }
1656
1657 if (!IS_CHAN_OFDM(chan) &&
1658 !IS_CHAN_CCK(chan) &&
1659 !IS_CHAN_HT20(chan) &&
1660 !IS_CHAN_HT40(chan)) {
1661 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1662 "%s: invalid channel %u/0x%x; not marked as "
1663 "OFDM or CCK or HT20 or HT40PLUS or HT40MINUS\n",
1664 __func__, chan->channel, chan->channelFlags);
1665 return NULL;
1666 }
1667
1668 return ath9k_regd_check_channel(ah, chan);
1669}
1670
1671static inline bool
1672ath9k_hw_get_lower_upper_index(u8 target,
1673 u8 *pList,
1674 u16 listSize,
1675 u16 *indexL,
1676 u16 *indexR)
1677{
1678 u16 i;
1679
1680 if (target <= pList[0]) {
1681 *indexL = *indexR = 0;
1682 return true;
1683 }
1684 if (target >= pList[listSize - 1]) {
1685 *indexL = *indexR = (u16) (listSize - 1);
1686 return true;
1687 }
1688
1689 for (i = 0; i < listSize - 1; i++) {
1690 if (pList[i] == target) {
1691 *indexL = *indexR = i;
1692 return true;
1693 }
1694 if (target < pList[i + 1]) {
1695 *indexL = i;
1696 *indexR = (u16) (i + 1);
1697 return false;
1698 }
1699 }
1700 return false;
1701}
1702
1703static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
1704{
1705 int16_t nfval;
1706 int16_t sort[ATH9K_NF_CAL_HIST_MAX];
1707 int i, j;
1708
1709 for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
1710 sort[i] = nfCalBuffer[i];
1711
1712 for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
1713 for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
1714 if (sort[j] > sort[j - 1]) {
1715 nfval = sort[j];
1716 sort[j] = sort[j - 1];
1717 sort[j - 1] = nfval;
1718 }
1719 }
1720 }
1721 nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
1722
1723 return nfval;
1724}
1725
1726static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
1727 int16_t *nfarray)
1728{ 539{
540 u32 sum;
1729 int i; 541 int i;
1730 542 u16 eeval;
1731 for (i = 0; i < NUM_NF_READINGS; i++) {
1732 h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
1733
1734 if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
1735 h[i].currIndex = 0;
1736
1737 if (h[i].invalidNFcount > 0) {
1738 if (nfarray[i] < AR_PHY_CCA_MIN_BAD_VALUE
1739 || nfarray[i] > AR_PHY_CCA_MAX_HIGH_VALUE) {
1740 h[i].invalidNFcount = ATH9K_NF_CAL_HIST_MAX;
1741 } else {
1742 h[i].invalidNFcount--;
1743 h[i].privNF = nfarray[i];
1744 }
1745 } else {
1746 h[i].privNF =
1747 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
1748 }
1749 }
1750 return;
1751}
1752
1753static void ar5416GetNoiseFloor(struct ath_hal *ah,
1754 int16_t nfarray[NUM_NF_READINGS])
1755{
1756 int16_t nf;
1757
1758 if (AR_SREV_9280_10_OR_LATER(ah))
1759 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
1760 else
1761 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
1762
1763 if (nf & 0x100)
1764 nf = 0 - ((nf ^ 0x1ff) + 1);
1765 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1766 "NF calibrated [ctl] [chain 0] is %d\n", nf);
1767 nfarray[0] = nf;
1768
1769 if (AR_SREV_9280_10_OR_LATER(ah))
1770 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
1771 AR9280_PHY_CH1_MINCCA_PWR);
1772 else
1773 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
1774 AR_PHY_CH1_MINCCA_PWR);
1775
1776 if (nf & 0x100)
1777 nf = 0 - ((nf ^ 0x1ff) + 1);
1778 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1779 "NF calibrated [ctl] [chain 1] is %d\n", nf);
1780 nfarray[1] = nf;
1781
1782 if (!AR_SREV_9280(ah)) {
1783 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
1784 AR_PHY_CH2_MINCCA_PWR);
1785 if (nf & 0x100)
1786 nf = 0 - ((nf ^ 0x1ff) + 1);
1787 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1788 "NF calibrated [ctl] [chain 2] is %d\n", nf);
1789 nfarray[2] = nf;
1790 }
1791
1792 if (AR_SREV_9280_10_OR_LATER(ah))
1793 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
1794 AR9280_PHY_EXT_MINCCA_PWR);
1795 else
1796 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
1797 AR_PHY_EXT_MINCCA_PWR);
1798
1799 if (nf & 0x100)
1800 nf = 0 - ((nf ^ 0x1ff) + 1);
1801 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1802 "NF calibrated [ext] [chain 0] is %d\n", nf);
1803 nfarray[3] = nf;
1804
1805 if (AR_SREV_9280_10_OR_LATER(ah))
1806 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
1807 AR9280_PHY_CH1_EXT_MINCCA_PWR);
1808 else
1809 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
1810 AR_PHY_CH1_EXT_MINCCA_PWR);
1811
1812 if (nf & 0x100)
1813 nf = 0 - ((nf ^ 0x1ff) + 1);
1814 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1815 "NF calibrated [ext] [chain 1] is %d\n", nf);
1816 nfarray[4] = nf;
1817
1818 if (!AR_SREV_9280(ah)) {
1819 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
1820 AR_PHY_CH2_EXT_MINCCA_PWR);
1821 if (nf & 0x100)
1822 nf = 0 - ((nf ^ 0x1ff) + 1);
1823 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
1824 "NF calibrated [ext] [chain 2] is %d\n", nf);
1825 nfarray[5] = nf;
1826 }
1827}
1828
1829static bool
1830getNoiseFloorThresh(struct ath_hal *ah,
1831 const struct ath9k_channel *chan,
1832 int16_t *nft)
1833{
1834 struct ath_hal_5416 *ahp = AH5416(ah); 543 struct ath_hal_5416 *ahp = AH5416(ah);
1835 544
1836 switch (chan->chanmode) { 545 sum = 0;
1837 case CHANNEL_A: 546 for (i = 0; i < 3; i++) {
1838 case CHANNEL_A_HT20: 547 eeval = ath9k_hw_get_eeprom(ah, AR_EEPROM_MAC(i));
1839 case CHANNEL_A_HT40PLUS: 548 sum += eeval;
1840 case CHANNEL_A_HT40MINUS: 549 ahp->ah_macaddr[2 * i] = eeval >> 8;
1841 *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_5); 550 ahp->ah_macaddr[2 * i + 1] = eeval & 0xff;
1842 break;
1843 case CHANNEL_B:
1844 case CHANNEL_G:
1845 case CHANNEL_G_HT20:
1846 case CHANNEL_G_HT40PLUS:
1847 case CHANNEL_G_HT40MINUS:
1848 *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_2);
1849 break;
1850 default:
1851 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1852 "%s: invalid channel flags 0x%x\n", __func__,
1853 chan->channelFlags);
1854 return false;
1855 }
1856 return true;
1857}
1858
1859static void ath9k_hw_start_nfcal(struct ath_hal *ah)
1860{
1861 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
1862 AR_PHY_AGC_CONTROL_ENABLE_NF);
1863 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
1864 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1865 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1866}
1867
1868static void
1869ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan)
1870{
1871 struct ath9k_nfcal_hist *h;
1872 int i, j;
1873 int32_t val;
1874 const u32 ar5416_cca_regs[6] = {
1875 AR_PHY_CCA,
1876 AR_PHY_CH1_CCA,
1877 AR_PHY_CH2_CCA,
1878 AR_PHY_EXT_CCA,
1879 AR_PHY_CH1_EXT_CCA,
1880 AR_PHY_CH2_EXT_CCA
1881 };
1882 u8 chainmask;
1883
1884 if (AR_SREV_9280(ah))
1885 chainmask = 0x1B;
1886 else
1887 chainmask = 0x3F;
1888
1889#ifdef ATH_NF_PER_CHAN
1890 h = chan->nfCalHist;
1891#else
1892 h = ah->nfCalHist;
1893#endif
1894
1895 for (i = 0; i < NUM_NF_READINGS; i++) {
1896 if (chainmask & (1 << i)) {
1897 val = REG_READ(ah, ar5416_cca_regs[i]);
1898 val &= 0xFFFFFE00;
1899 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
1900 REG_WRITE(ah, ar5416_cca_regs[i], val);
1901 }
1902 }
1903
1904 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1905 AR_PHY_AGC_CONTROL_ENABLE_NF);
1906 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1907 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1908 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1909
1910 for (j = 0; j < 1000; j++) {
1911 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
1912 AR_PHY_AGC_CONTROL_NF) == 0)
1913 break;
1914 udelay(10);
1915 }
1916
1917 for (i = 0; i < NUM_NF_READINGS; i++) {
1918 if (chainmask & (1 << i)) {
1919 val = REG_READ(ah, ar5416_cca_regs[i]);
1920 val &= 0xFFFFFE00;
1921 val |= (((u32) (-50) << 1) & 0x1ff);
1922 REG_WRITE(ah, ar5416_cca_regs[i], val);
1923 }
1924 }
1925}
1926
1927static int16_t ath9k_hw_getnf(struct ath_hal *ah,
1928 struct ath9k_channel *chan)
1929{
1930 int16_t nf, nfThresh;
1931 int16_t nfarray[NUM_NF_READINGS] = { 0 };
1932 struct ath9k_nfcal_hist *h;
1933 u8 chainmask;
1934
1935 if (AR_SREV_9280(ah))
1936 chainmask = 0x1B;
1937 else
1938 chainmask = 0x3F;
1939
1940 chan->channelFlags &= (~CHANNEL_CW_INT);
1941 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
1942 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1943 "%s: NF did not complete in calibration window\n",
1944 __func__);
1945 nf = 0;
1946 chan->rawNoiseFloor = nf;
1947 return chan->rawNoiseFloor;
1948 } else {
1949 ar5416GetNoiseFloor(ah, nfarray);
1950 nf = nfarray[0];
1951 if (getNoiseFloorThresh(ah, chan, &nfThresh)
1952 && nf > nfThresh) {
1953 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
1954 "%s: noise floor failed detected; "
1955 "detected %d, threshold %d\n", __func__,
1956 nf, nfThresh);
1957 chan->channelFlags |= CHANNEL_CW_INT;
1958 }
1959 } 551 }
1960 552 if (sum == 0 || sum == 0xffff * 3) {
1961#ifdef ATH_NF_PER_CHAN 553 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1962 h = chan->nfCalHist; 554 "mac address read failed: %pM\n",
1963#else 555 ahp->ah_macaddr);
1964 h = ah->nfCalHist; 556 return -EADDRNOTAVAIL;
1965#endif
1966
1967 ath9k_hw_update_nfcal_hist_buffer(h, nfarray);
1968 chan->rawNoiseFloor = h[0].privNF;
1969
1970 return chan->rawNoiseFloor;
1971}
1972
1973static void ath9k_hw_update_mibstats(struct ath_hal *ah,
1974 struct ath9k_mib_stats *stats)
1975{
1976 stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL);
1977 stats->rts_bad += REG_READ(ah, AR_RTS_FAIL);
1978 stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL);
1979 stats->rts_good += REG_READ(ah, AR_RTS_OK);
1980 stats->beacons += REG_READ(ah, AR_BEACON_CNT);
1981}
1982
1983static void ath9k_enable_mib_counters(struct ath_hal *ah)
1984{
1985 struct ath_hal_5416 *ahp = AH5416(ah);
1986
1987 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable mib counters\n");
1988
1989 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
1990
1991 REG_WRITE(ah, AR_FILT_OFDM, 0);
1992 REG_WRITE(ah, AR_FILT_CCK, 0);
1993 REG_WRITE(ah, AR_MIBC,
1994 ~(AR_MIBC_COW | AR_MIBC_FMC | AR_MIBC_CMC | AR_MIBC_MCS)
1995 & 0x0f);
1996 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
1997 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
1998}
1999
2000static void ath9k_hw_disable_mib_counters(struct ath_hal *ah)
2001{
2002 struct ath_hal_5416 *ahp = AH5416(ah);
2003
2004 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disabling MIB counters\n");
2005
2006 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC | AR_MIBC_CMC);
2007
2008 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2009
2010 REG_WRITE(ah, AR_FILT_OFDM, 0);
2011 REG_WRITE(ah, AR_FILT_CCK, 0);
2012}
2013
2014static int ath9k_hw_get_ani_channel_idx(struct ath_hal *ah,
2015 struct ath9k_channel *chan)
2016{
2017 struct ath_hal_5416 *ahp = AH5416(ah);
2018 int i;
2019
2020 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
2021 if (ahp->ah_ani[i].c.channel == chan->channel)
2022 return i;
2023 if (ahp->ah_ani[i].c.channel == 0) {
2024 ahp->ah_ani[i].c.channel = chan->channel;
2025 ahp->ah_ani[i].c.channelFlags = chan->channelFlags;
2026 return i;
2027 }
2028 } 557 }
2029 558
2030 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2031 "No more channel states left. Using channel 0\n");
2032 return 0; 559 return 0;
2033} 560}
2034 561
2035static void ath9k_hw_ani_attach(struct ath_hal *ah) 562static void ath9k_hw_init_rxgain_ini(struct ath_hal *ah)
2036{ 563{
564 u32 rxgain_type;
2037 struct ath_hal_5416 *ahp = AH5416(ah); 565 struct ath_hal_5416 *ahp = AH5416(ah);
2038 int i;
2039 566
2040 ahp->ah_hasHwPhyCounters = 1; 567 if (ath9k_hw_get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) {
2041 568 rxgain_type = ath9k_hw_get_eeprom(ah, EEP_RXGAIN_TYPE);
2042 memset(ahp->ah_ani, 0, sizeof(ahp->ah_ani));
2043 for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) {
2044 ahp->ah_ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH;
2045 ahp->ah_ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW;
2046 ahp->ah_ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH;
2047 ahp->ah_ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW;
2048 ahp->ah_ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
2049 ahp->ah_ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
2050 ahp->ah_ani[i].ofdmWeakSigDetectOff =
2051 !ATH9K_ANI_USE_OFDM_WEAK_SIG;
2052 ahp->ah_ani[i].cckWeakSigThreshold =
2053 ATH9K_ANI_CCK_WEAK_SIG_THR;
2054 ahp->ah_ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
2055 ahp->ah_ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
2056 if (ahp->ah_hasHwPhyCounters) {
2057 ahp->ah_ani[i].ofdmPhyErrBase =
2058 AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH;
2059 ahp->ah_ani[i].cckPhyErrBase =
2060 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
2061 }
2062 }
2063 if (ahp->ah_hasHwPhyCounters) {
2064 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2065 "Setting OfdmErrBase = 0x%08x\n",
2066 ahp->ah_ani[0].ofdmPhyErrBase);
2067 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
2068 ahp->ah_ani[0].cckPhyErrBase);
2069
2070 REG_WRITE(ah, AR_PHY_ERR_1, ahp->ah_ani[0].ofdmPhyErrBase);
2071 REG_WRITE(ah, AR_PHY_ERR_2, ahp->ah_ani[0].cckPhyErrBase);
2072 ath9k_enable_mib_counters(ah);
2073 }
2074 ahp->ah_aniPeriod = ATH9K_ANI_PERIOD;
2075 if (ah->ah_config.enable_ani)
2076 ahp->ah_procPhyErr |= HAL_PROCESS_ANI;
2077}
2078
2079static void ath9k_hw_ani_setup(struct ath_hal *ah)
2080{
2081 struct ath_hal_5416 *ahp = AH5416(ah);
2082 int i;
2083 569
2084 const int totalSizeDesired[] = { -55, -55, -55, -55, -62 }; 570 if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
2085 const int coarseHigh[] = { -14, -14, -14, -14, -12 }; 571 INIT_INI_ARRAY(&ahp->ah_iniModesRxGain,
2086 const int coarseLow[] = { -64, -64, -64, -64, -70 }; 572 ar9280Modes_backoff_13db_rxgain_9280_2,
2087 const int firpwr[] = { -78, -78, -78, -78, -80 }; 573 ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
2088 574 else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
2089 for (i = 0; i < 5; i++) { 575 INIT_INI_ARRAY(&ahp->ah_iniModesRxGain,
2090 ahp->ah_totalSizeDesired[i] = totalSizeDesired[i]; 576 ar9280Modes_backoff_23db_rxgain_9280_2,
2091 ahp->ah_coarseHigh[i] = coarseHigh[i]; 577 ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
2092 ahp->ah_coarseLow[i] = coarseLow[i];
2093 ahp->ah_firpwr[i] = firpwr[i];
2094 }
2095}
2096
2097static void ath9k_hw_ani_detach(struct ath_hal *ah)
2098{
2099 struct ath_hal_5416 *ahp = AH5416(ah);
2100
2101 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Detaching Ani\n");
2102 if (ahp->ah_hasHwPhyCounters) {
2103 ath9k_hw_disable_mib_counters(ah);
2104 REG_WRITE(ah, AR_PHY_ERR_1, 0);
2105 REG_WRITE(ah, AR_PHY_ERR_2, 0);
2106 }
2107}
2108
2109
2110static bool ath9k_hw_ani_control(struct ath_hal *ah,
2111 enum ath9k_ani_cmd cmd, int param)
2112{
2113 struct ath_hal_5416 *ahp = AH5416(ah);
2114 struct ar5416AniState *aniState = ahp->ah_curani;
2115
2116 switch (cmd & ahp->ah_ani_function) {
2117 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
2118 u32 level = param;
2119
2120 if (level >= ARRAY_SIZE(ahp->ah_totalSizeDesired)) {
2121 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2122 "%s: level out of range (%u > %u)\n",
2123 __func__, level,
2124 (unsigned) ARRAY_SIZE(ahp->
2125 ah_totalSizeDesired));
2126 return false;
2127 }
2128
2129 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
2130 AR_PHY_DESIRED_SZ_TOT_DES,
2131 ahp->ah_totalSizeDesired[level]);
2132 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
2133 AR_PHY_AGC_CTL1_COARSE_LOW,
2134 ahp->ah_coarseLow[level]);
2135 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
2136 AR_PHY_AGC_CTL1_COARSE_HIGH,
2137 ahp->ah_coarseHigh[level]);
2138 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
2139 AR_PHY_FIND_SIG_FIRPWR,
2140 ahp->ah_firpwr[level]);
2141
2142 if (level > aniState->noiseImmunityLevel)
2143 ahp->ah_stats.ast_ani_niup++;
2144 else if (level < aniState->noiseImmunityLevel)
2145 ahp->ah_stats.ast_ani_nidown++;
2146 aniState->noiseImmunityLevel = level;
2147 break;
2148 }
2149 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
2150 const int m1ThreshLow[] = { 127, 50 };
2151 const int m2ThreshLow[] = { 127, 40 };
2152 const int m1Thresh[] = { 127, 0x4d };
2153 const int m2Thresh[] = { 127, 0x40 };
2154 const int m2CountThr[] = { 31, 16 };
2155 const int m2CountThrLow[] = { 63, 48 };
2156 u32 on = param ? 1 : 0;
2157
2158 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2159 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
2160 m1ThreshLow[on]);
2161 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2162 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
2163 m2ThreshLow[on]);
2164 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2165 AR_PHY_SFCORR_M1_THRESH,
2166 m1Thresh[on]);
2167 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2168 AR_PHY_SFCORR_M2_THRESH,
2169 m2Thresh[on]);
2170 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
2171 AR_PHY_SFCORR_M2COUNT_THR,
2172 m2CountThr[on]);
2173 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
2174 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
2175 m2CountThrLow[on]);
2176
2177 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2178 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
2179 m1ThreshLow[on]);
2180 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2181 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
2182 m2ThreshLow[on]);
2183 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2184 AR_PHY_SFCORR_EXT_M1_THRESH,
2185 m1Thresh[on]);
2186 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
2187 AR_PHY_SFCORR_EXT_M2_THRESH,
2188 m2Thresh[on]);
2189
2190 if (on)
2191 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
2192 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
2193 else 578 else
2194 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW, 579 INIT_INI_ARRAY(&ahp->ah_iniModesRxGain,
2195 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); 580 ar9280Modes_original_rxgain_9280_2,
2196 581 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
2197 if (!on != aniState->ofdmWeakSigDetectOff) { 582 } else
2198 if (on) 583 INIT_INI_ARRAY(&ahp->ah_iniModesRxGain,
2199 ahp->ah_stats.ast_ani_ofdmon++; 584 ar9280Modes_original_rxgain_9280_2,
2200 else 585 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
2201 ahp->ah_stats.ast_ani_ofdmoff++;
2202 aniState->ofdmWeakSigDetectOff = !on;
2203 }
2204 break;
2205 }
2206 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
2207 const int weakSigThrCck[] = { 8, 6 };
2208 u32 high = param ? 1 : 0;
2209
2210 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
2211 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
2212 weakSigThrCck[high]);
2213 if (high != aniState->cckWeakSigThreshold) {
2214 if (high)
2215 ahp->ah_stats.ast_ani_cckhigh++;
2216 else
2217 ahp->ah_stats.ast_ani_ccklow++;
2218 aniState->cckWeakSigThreshold = high;
2219 }
2220 break;
2221 }
2222 case ATH9K_ANI_FIRSTEP_LEVEL:{
2223 const int firstep[] = { 0, 4, 8 };
2224 u32 level = param;
2225
2226 if (level >= ARRAY_SIZE(firstep)) {
2227 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2228 "%s: level out of range (%u > %u)\n",
2229 __func__, level,
2230 (unsigned) ARRAY_SIZE(firstep));
2231 return false;
2232 }
2233 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
2234 AR_PHY_FIND_SIG_FIRSTEP,
2235 firstep[level]);
2236 if (level > aniState->firstepLevel)
2237 ahp->ah_stats.ast_ani_stepup++;
2238 else if (level < aniState->firstepLevel)
2239 ahp->ah_stats.ast_ani_stepdown++;
2240 aniState->firstepLevel = level;
2241 break;
2242 }
2243 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
2244 const int cycpwrThr1[] =
2245 { 2, 4, 6, 8, 10, 12, 14, 16 };
2246 u32 level = param;
2247
2248 if (level >= ARRAY_SIZE(cycpwrThr1)) {
2249 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2250 "%s: level out of range (%u > %u)\n",
2251 __func__, level,
2252 (unsigned)
2253 ARRAY_SIZE(cycpwrThr1));
2254 return false;
2255 }
2256 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
2257 AR_PHY_TIMING5_CYCPWR_THR1,
2258 cycpwrThr1[level]);
2259 if (level > aniState->spurImmunityLevel)
2260 ahp->ah_stats.ast_ani_spurup++;
2261 else if (level < aniState->spurImmunityLevel)
2262 ahp->ah_stats.ast_ani_spurdown++;
2263 aniState->spurImmunityLevel = level;
2264 break;
2265 }
2266 case ATH9K_ANI_PRESENT:
2267 break;
2268 default:
2269 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2270 "%s: invalid cmd %u\n", __func__, cmd);
2271 return false;
2272 }
2273
2274 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "%s: ANI parameters:\n", __func__);
2275 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2276 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
2277 "ofdmWeakSigDetectOff=%d\n",
2278 aniState->noiseImmunityLevel, aniState->spurImmunityLevel,
2279 !aniState->ofdmWeakSigDetectOff);
2280 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2281 "cckWeakSigThreshold=%d, "
2282 "firstepLevel=%d, listenTime=%d\n",
2283 aniState->cckWeakSigThreshold, aniState->firstepLevel,
2284 aniState->listenTime);
2285 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2286 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
2287 aniState->cycleCount, aniState->ofdmPhyErrCount,
2288 aniState->cckPhyErrCount);
2289 return true;
2290}
2291
2292static void ath9k_ani_restart(struct ath_hal *ah)
2293{
2294 struct ath_hal_5416 *ahp = AH5416(ah);
2295 struct ar5416AniState *aniState;
2296
2297 if (!DO_ANI(ah))
2298 return;
2299
2300 aniState = ahp->ah_curani;
2301
2302 aniState->listenTime = 0;
2303 if (ahp->ah_hasHwPhyCounters) {
2304 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) {
2305 aniState->ofdmPhyErrBase = 0;
2306 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2307 "OFDM Trigger is too high for hw counters\n");
2308 } else {
2309 aniState->ofdmPhyErrBase =
2310 AR_PHY_COUNTMAX - aniState->ofdmTrigHigh;
2311 }
2312 if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) {
2313 aniState->cckPhyErrBase = 0;
2314 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2315 "CCK Trigger is too high for hw counters\n");
2316 } else {
2317 aniState->cckPhyErrBase =
2318 AR_PHY_COUNTMAX - aniState->cckTrigHigh;
2319 }
2320 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2321 "%s: Writing ofdmbase=%u cckbase=%u\n",
2322 __func__, aniState->ofdmPhyErrBase,
2323 aniState->cckPhyErrBase);
2324 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
2325 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
2326 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
2327 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
2328
2329 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2330 }
2331 aniState->ofdmPhyErrCount = 0;
2332 aniState->cckPhyErrCount = 0;
2333}
2334
2335static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hal *ah)
2336{
2337 struct ath_hal_5416 *ahp = AH5416(ah);
2338 struct ath9k_channel *chan = ah->ah_curchan;
2339 struct ar5416AniState *aniState;
2340 enum wireless_mode mode;
2341 int32_t rssi;
2342
2343 if (!DO_ANI(ah))
2344 return;
2345
2346 aniState = ahp->ah_curani;
2347
2348 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
2349 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2350 aniState->noiseImmunityLevel + 1)) {
2351 return;
2352 }
2353 }
2354
2355 if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) {
2356 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2357 aniState->spurImmunityLevel + 1)) {
2358 return;
2359 }
2360 }
2361
2362 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2363 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2364 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2365 aniState->firstepLevel + 1);
2366 }
2367 return;
2368 }
2369 rssi = BEACON_RSSI(ahp);
2370 if (rssi > aniState->rssiThrHigh) {
2371 if (!aniState->ofdmWeakSigDetectOff) {
2372 if (ath9k_hw_ani_control(ah,
2373 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2374 false)) {
2375 ath9k_hw_ani_control(ah,
2376 ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2377 0);
2378 return;
2379 }
2380 }
2381 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2382 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2383 aniState->firstepLevel + 1);
2384 return;
2385 }
2386 } else if (rssi > aniState->rssiThrLow) {
2387 if (aniState->ofdmWeakSigDetectOff)
2388 ath9k_hw_ani_control(ah,
2389 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2390 true);
2391 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
2392 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2393 aniState->firstepLevel + 1);
2394 return;
2395 } else {
2396 mode = ath9k_hw_chan2wmode(ah, chan);
2397 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
2398 if (!aniState->ofdmWeakSigDetectOff)
2399 ath9k_hw_ani_control(ah,
2400 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2401 false);
2402 if (aniState->firstepLevel > 0)
2403 ath9k_hw_ani_control(ah,
2404 ATH9K_ANI_FIRSTEP_LEVEL,
2405 0);
2406 return;
2407 }
2408 }
2409}
2410
2411static void ath9k_hw_ani_cck_err_trigger(struct ath_hal *ah)
2412{
2413 struct ath_hal_5416 *ahp = AH5416(ah);
2414 struct ath9k_channel *chan = ah->ah_curchan;
2415 struct ar5416AniState *aniState;
2416 enum wireless_mode mode;
2417 int32_t rssi;
2418
2419 if (!DO_ANI(ah))
2420 return;
2421
2422 aniState = ahp->ah_curani;
2423 if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
2424 if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2425 aniState->noiseImmunityLevel + 1)) {
2426 return;
2427 }
2428 }
2429 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2430 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
2431 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2432 aniState->firstepLevel + 1);
2433 }
2434 return;
2435 }
2436 rssi = BEACON_RSSI(ahp);
2437 if (rssi > aniState->rssiThrLow) {
2438 if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
2439 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2440 aniState->firstepLevel + 1);
2441 } else {
2442 mode = ath9k_hw_chan2wmode(ah, chan);
2443 if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) {
2444 if (aniState->firstepLevel > 0)
2445 ath9k_hw_ani_control(ah,
2446 ATH9K_ANI_FIRSTEP_LEVEL,
2447 0);
2448 }
2449 }
2450}
2451
2452static void ath9k_ani_reset(struct ath_hal *ah)
2453{
2454 struct ath_hal_5416 *ahp = AH5416(ah);
2455 struct ar5416AniState *aniState;
2456 struct ath9k_channel *chan = ah->ah_curchan;
2457 int index;
2458
2459 if (!DO_ANI(ah))
2460 return;
2461
2462 index = ath9k_hw_get_ani_channel_idx(ah, chan);
2463 aniState = &ahp->ah_ani[index];
2464 ahp->ah_curani = aniState;
2465
2466 if (DO_ANI(ah) && ah->ah_opmode != ATH9K_M_STA
2467 && ah->ah_opmode != ATH9K_M_IBSS) {
2468 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2469 "%s: Reset ANI state opmode %u\n", __func__,
2470 ah->ah_opmode);
2471 ahp->ah_stats.ast_ani_reset++;
2472 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
2473 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
2474 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0);
2475 ath9k_hw_ani_control(ah,
2476 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2477 !ATH9K_ANI_USE_OFDM_WEAK_SIG);
2478 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
2479 ATH9K_ANI_CCK_WEAK_SIG_THR);
2480 ath9k_hw_setrxfilter(ah,
2481 ath9k_hw_getrxfilter(ah) |
2482 ATH9K_RX_FILTER_PHYERR);
2483 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2484 ahp->ah_curani->ofdmTrigHigh =
2485 ah->ah_config.ofdm_trig_high;
2486 ahp->ah_curani->ofdmTrigLow =
2487 ah->ah_config.ofdm_trig_low;
2488 ahp->ah_curani->cckTrigHigh =
2489 ah->ah_config.cck_trig_high;
2490 ahp->ah_curani->cckTrigLow =
2491 ah->ah_config.cck_trig_low;
2492 }
2493 ath9k_ani_restart(ah);
2494 return;
2495 }
2496
2497 if (aniState->noiseImmunityLevel != 0)
2498 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2499 aniState->noiseImmunityLevel);
2500 if (aniState->spurImmunityLevel != 0)
2501 ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2502 aniState->spurImmunityLevel);
2503 if (aniState->ofdmWeakSigDetectOff)
2504 ath9k_hw_ani_control(ah,
2505 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2506 !aniState->ofdmWeakSigDetectOff);
2507 if (aniState->cckWeakSigThreshold)
2508 ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
2509 aniState->cckWeakSigThreshold);
2510 if (aniState->firstepLevel != 0)
2511 ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2512 aniState->firstepLevel);
2513 if (ahp->ah_hasHwPhyCounters) {
2514 ath9k_hw_setrxfilter(ah,
2515 ath9k_hw_getrxfilter(ah) &
2516 ~ATH9K_RX_FILTER_PHYERR);
2517 ath9k_ani_restart(ah);
2518 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
2519 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
2520
2521 } else {
2522 ath9k_ani_restart(ah);
2523 ath9k_hw_setrxfilter(ah,
2524 ath9k_hw_getrxfilter(ah) |
2525 ATH9K_RX_FILTER_PHYERR);
2526 }
2527}
2528
2529/*
2530 * Process a MIB interrupt. We may potentially be invoked because
2531 * any of the MIB counters overflow/trigger so don't assume we're
2532 * here because a PHY error counter triggered.
2533 */
2534void ath9k_hw_procmibevent(struct ath_hal *ah,
2535 const struct ath9k_node_stats *stats)
2536{
2537 struct ath_hal_5416 *ahp = AH5416(ah);
2538 u32 phyCnt1, phyCnt2;
2539
2540 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Processing Mib Intr\n");
2541 /* Reset these counters regardless */
2542 REG_WRITE(ah, AR_FILT_OFDM, 0);
2543 REG_WRITE(ah, AR_FILT_CCK, 0);
2544 if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
2545 REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
2546
2547 /* Clear the mib counters and save them in the stats */
2548 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2549 ahp->ah_stats.ast_nodestats = *stats;
2550
2551 if (!DO_ANI(ah))
2552 return;
2553
2554 /* NB: these are not reset-on-read */
2555 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
2556 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
2557 if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
2558 ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
2559 struct ar5416AniState *aniState = ahp->ah_curani;
2560 u32 ofdmPhyErrCnt, cckPhyErrCnt;
2561
2562 /* NB: only use ast_ani_*errs with AH_PRIVATE_DIAG */
2563 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
2564 ahp->ah_stats.ast_ani_ofdmerrs +=
2565 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
2566 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
2567
2568 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
2569 ahp->ah_stats.ast_ani_cckerrs +=
2570 cckPhyErrCnt - aniState->cckPhyErrCount;
2571 aniState->cckPhyErrCount = cckPhyErrCnt;
2572
2573 /*
2574 * NB: figure out which counter triggered. If both
2575 * trigger we'll only deal with one as the processing
2576 * clobbers the error counter so the trigger threshold
2577 * check will never be true.
2578 */
2579 if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh)
2580 ath9k_hw_ani_ofdm_err_trigger(ah);
2581 if (aniState->cckPhyErrCount > aniState->cckTrigHigh)
2582 ath9k_hw_ani_cck_err_trigger(ah);
2583 /* NB: always restart to insure the h/w counters are reset */
2584 ath9k_ani_restart(ah);
2585 }
2586}
2587
2588static void ath9k_hw_ani_lower_immunity(struct ath_hal *ah)
2589{
2590 struct ath_hal_5416 *ahp = AH5416(ah);
2591 struct ar5416AniState *aniState;
2592 int32_t rssi;
2593
2594 aniState = ahp->ah_curani;
2595
2596 if (ah->ah_opmode == ATH9K_M_HOSTAP) {
2597 if (aniState->firstepLevel > 0) {
2598 if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
2599 aniState->firstepLevel - 1)) {
2600 return;
2601 }
2602 }
2603 } else {
2604 rssi = BEACON_RSSI(ahp);
2605 if (rssi > aniState->rssiThrHigh) {
2606 /* XXX: Handle me */
2607 } else if (rssi > aniState->rssiThrLow) {
2608 if (aniState->ofdmWeakSigDetectOff) {
2609 if (ath9k_hw_ani_control(ah,
2610 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
2611 true) ==
2612 true) {
2613 return;
2614 }
2615 }
2616 if (aniState->firstepLevel > 0) {
2617 if (ath9k_hw_ani_control
2618 (ah, ATH9K_ANI_FIRSTEP_LEVEL,
2619 aniState->firstepLevel - 1) ==
2620 true) {
2621 return;
2622 }
2623 }
2624 } else {
2625 if (aniState->firstepLevel > 0) {
2626 if (ath9k_hw_ani_control
2627 (ah, ATH9K_ANI_FIRSTEP_LEVEL,
2628 aniState->firstepLevel - 1) ==
2629 true) {
2630 return;
2631 }
2632 }
2633 }
2634 }
2635
2636 if (aniState->spurImmunityLevel > 0) {
2637 if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
2638 aniState->spurImmunityLevel - 1)) {
2639 return;
2640 }
2641 }
2642
2643 if (aniState->noiseImmunityLevel > 0) {
2644 ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
2645 aniState->noiseImmunityLevel - 1);
2646 return;
2647 }
2648}
2649
2650static int32_t ath9k_hw_ani_get_listen_time(struct ath_hal *ah)
2651{
2652 struct ath_hal_5416 *ahp = AH5416(ah);
2653 struct ar5416AniState *aniState;
2654 u32 txFrameCount, rxFrameCount, cycleCount;
2655 int32_t listenTime;
2656
2657 txFrameCount = REG_READ(ah, AR_TFCNT);
2658 rxFrameCount = REG_READ(ah, AR_RFCNT);
2659 cycleCount = REG_READ(ah, AR_CCCNT);
2660
2661 aniState = ahp->ah_curani;
2662 if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) {
2663
2664 listenTime = 0;
2665 ahp->ah_stats.ast_ani_lzero++;
2666 } else {
2667 int32_t ccdelta = cycleCount - aniState->cycleCount;
2668 int32_t rfdelta = rxFrameCount - aniState->rxFrameCount;
2669 int32_t tfdelta = txFrameCount - aniState->txFrameCount;
2670 listenTime = (ccdelta - rfdelta - tfdelta) / 44000;
2671 }
2672 aniState->cycleCount = cycleCount;
2673 aniState->txFrameCount = txFrameCount;
2674 aniState->rxFrameCount = rxFrameCount;
2675
2676 return listenTime;
2677} 586}
2678 587
2679void ath9k_hw_ani_monitor(struct ath_hal *ah, 588static void ath9k_hw_init_txgain_ini(struct ath_hal *ah)
2680 const struct ath9k_node_stats *stats,
2681 struct ath9k_channel *chan)
2682{ 589{
590 u32 txgain_type;
2683 struct ath_hal_5416 *ahp = AH5416(ah); 591 struct ath_hal_5416 *ahp = AH5416(ah);
2684 struct ar5416AniState *aniState;
2685 int32_t listenTime;
2686
2687 aniState = ahp->ah_curani;
2688 ahp->ah_stats.ast_nodestats = *stats;
2689
2690 listenTime = ath9k_hw_ani_get_listen_time(ah);
2691 if (listenTime < 0) {
2692 ahp->ah_stats.ast_ani_lneg++;
2693 ath9k_ani_restart(ah);
2694 return;
2695 }
2696
2697 aniState->listenTime += listenTime;
2698
2699 if (ahp->ah_hasHwPhyCounters) {
2700 u32 phyCnt1, phyCnt2;
2701 u32 ofdmPhyErrCnt, cckPhyErrCnt;
2702
2703 ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats);
2704
2705 phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
2706 phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
2707
2708 if (phyCnt1 < aniState->ofdmPhyErrBase ||
2709 phyCnt2 < aniState->cckPhyErrBase) {
2710 if (phyCnt1 < aniState->ofdmPhyErrBase) {
2711 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2712 "%s: phyCnt1 0x%x, resetting "
2713 "counter value to 0x%x\n",
2714 __func__, phyCnt1,
2715 aniState->ofdmPhyErrBase);
2716 REG_WRITE(ah, AR_PHY_ERR_1,
2717 aniState->ofdmPhyErrBase);
2718 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
2719 AR_PHY_ERR_OFDM_TIMING);
2720 }
2721 if (phyCnt2 < aniState->cckPhyErrBase) {
2722 DPRINTF(ah->ah_sc, ATH_DBG_ANI,
2723 "%s: phyCnt2 0x%x, resetting "
2724 "counter value to 0x%x\n",
2725 __func__, phyCnt2,
2726 aniState->cckPhyErrBase);
2727 REG_WRITE(ah, AR_PHY_ERR_2,
2728 aniState->cckPhyErrBase);
2729 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
2730 AR_PHY_ERR_CCK_TIMING);
2731 }
2732 return;
2733 }
2734
2735 ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase;
2736 ahp->ah_stats.ast_ani_ofdmerrs +=
2737 ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
2738 aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
2739
2740 cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase;
2741 ahp->ah_stats.ast_ani_cckerrs +=
2742 cckPhyErrCnt - aniState->cckPhyErrCount;
2743 aniState->cckPhyErrCount = cckPhyErrCnt;
2744 }
2745
2746 if (!DO_ANI(ah))
2747 return;
2748
2749 if (aniState->listenTime > 5 * ahp->ah_aniPeriod) {
2750 if (aniState->ofdmPhyErrCount <= aniState->listenTime *
2751 aniState->ofdmTrigLow / 1000 &&
2752 aniState->cckPhyErrCount <= aniState->listenTime *
2753 aniState->cckTrigLow / 1000)
2754 ath9k_hw_ani_lower_immunity(ah);
2755 ath9k_ani_restart(ah);
2756 } else if (aniState->listenTime > ahp->ah_aniPeriod) {
2757 if (aniState->ofdmPhyErrCount > aniState->listenTime *
2758 aniState->ofdmTrigHigh / 1000) {
2759 ath9k_hw_ani_ofdm_err_trigger(ah);
2760 ath9k_ani_restart(ah);
2761 } else if (aniState->cckPhyErrCount >
2762 aniState->listenTime * aniState->cckTrigHigh /
2763 1000) {
2764 ath9k_hw_ani_cck_err_trigger(ah);
2765 ath9k_ani_restart(ah);
2766 }
2767 }
2768}
2769
2770#ifndef ATH_NF_PER_CHAN
2771static void ath9k_init_nfcal_hist_buffer(struct ath_hal *ah)
2772{
2773 int i, j;
2774
2775 for (i = 0; i < NUM_NF_READINGS; i++) {
2776 ah->nfCalHist[i].currIndex = 0;
2777 ah->nfCalHist[i].privNF = AR_PHY_CCA_MAX_GOOD_VALUE;
2778 ah->nfCalHist[i].invalidNFcount =
2779 AR_PHY_CCA_FILTERWINDOW_LENGTH;
2780 for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
2781 ah->nfCalHist[i].nfCalBuffer[j] =
2782 AR_PHY_CCA_MAX_GOOD_VALUE;
2783 }
2784 }
2785 return;
2786}
2787#endif
2788
2789static void ath9k_hw_gpio_cfg_output_mux(struct ath_hal *ah,
2790 u32 gpio, u32 type)
2791{
2792 int addr;
2793 u32 gpio_shift, tmp;
2794
2795 if (gpio > 11)
2796 addr = AR_GPIO_OUTPUT_MUX3;
2797 else if (gpio > 5)
2798 addr = AR_GPIO_OUTPUT_MUX2;
2799 else
2800 addr = AR_GPIO_OUTPUT_MUX1;
2801
2802 gpio_shift = (gpio % 6) * 5;
2803
2804 if (AR_SREV_9280_20_OR_LATER(ah)
2805 || (addr != AR_GPIO_OUTPUT_MUX1)) {
2806 REG_RMW(ah, addr, (type << gpio_shift),
2807 (0x1f << gpio_shift));
2808 } else {
2809 tmp = REG_READ(ah, addr);
2810 tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0);
2811 tmp &= ~(0x1f << gpio_shift);
2812 tmp |= (type << gpio_shift);
2813 REG_WRITE(ah, addr, tmp);
2814 }
2815}
2816
2817void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
2818 u32 ah_signal_type)
2819{
2820 u32 gpio_shift;
2821
2822 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
2823
2824 gpio_shift = 2 * gpio;
2825
2826 REG_RMW(ah,
2827 AR_GPIO_OE_OUT,
2828 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
2829 (AR_GPIO_OE_OUT_DRV << gpio_shift));
2830}
2831
2832void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 val)
2833{
2834 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
2835 AR_GPIO_BIT(gpio));
2836}
2837
2838/*
2839 * Configure GPIO Input lines
2840 */
2841void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio)
2842{
2843 u32 gpio_shift;
2844
2845 ASSERT(gpio < ah->ah_caps.num_gpio_pins);
2846
2847 gpio_shift = gpio << 1;
2848
2849 REG_RMW(ah,
2850 AR_GPIO_OE_OUT,
2851 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
2852 (AR_GPIO_OE_OUT_DRV << gpio_shift));
2853}
2854
2855#ifdef CONFIG_RFKILL
2856static void ath9k_enable_rfkill(struct ath_hal *ah)
2857{
2858 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
2859 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
2860
2861 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
2862 AR_GPIO_INPUT_MUX2_RFSILENT);
2863 592
2864 ath9k_hw_cfg_gpio_input(ah, ah->ah_rfkill_gpio); 593 if (ath9k_hw_get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) {
2865 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB); 594 txgain_type = ath9k_hw_get_eeprom(ah, EEP_TXGAIN_TYPE);
2866}
2867#endif
2868
2869u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
2870{
2871 if (gpio >= ah->ah_caps.num_gpio_pins)
2872 return 0xffffffff;
2873 595
2874 if (AR_SREV_9280_10_OR_LATER(ah)) { 596 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
2875 return (MS 597 INIT_INI_ARRAY(&ahp->ah_iniModesTxGain,
2876 (REG_READ(ah, AR_GPIO_IN_OUT), 598 ar9280Modes_high_power_tx_gain_9280_2,
2877 AR928X_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) != 0; 599 ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
2878 } else { 600 else
2879 return (MS(REG_READ(ah, AR_GPIO_IN_OUT), AR_GPIO_IN_VAL) & 601 INIT_INI_ARRAY(&ahp->ah_iniModesTxGain,
2880 AR_GPIO_BIT(gpio)) != 0; 602 ar9280Modes_original_tx_gain_9280_2,
2881 } 603 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
604 } else
605 INIT_INI_ARRAY(&ahp->ah_iniModesTxGain,
606 ar9280Modes_original_tx_gain_9280_2,
607 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
2882} 608}
2883 609
2884static int ath9k_hw_post_attach(struct ath_hal *ah) 610static int ath9k_hw_post_attach(struct ath_hal *ah)
@@ -2887,7 +613,7 @@ static int ath9k_hw_post_attach(struct ath_hal *ah)
2887 613
2888 if (!ath9k_hw_chip_test(ah)) { 614 if (!ath9k_hw_chip_test(ah)) {
2889 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, 615 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
2890 "%s: hardware self-test failed\n", __func__); 616 "hardware self-test failed\n");
2891 return -ENODEV; 617 return -ENODEV;
2892 } 618 }
2893 619
@@ -2906,365 +632,17 @@ static int ath9k_hw_post_attach(struct ath_hal *ah)
2906 ath9k_hw_ani_setup(ah); 632 ath9k_hw_ani_setup(ah);
2907 ath9k_hw_ani_attach(ah); 633 ath9k_hw_ani_attach(ah);
2908 } 634 }
2909 return 0;
2910}
2911
2912static u32 ath9k_hw_ini_fixup(struct ath_hal *ah,
2913 struct ar5416_eeprom *pEepData,
2914 u32 reg, u32 value)
2915{
2916 struct base_eep_header *pBase = &(pEepData->baseEepHeader);
2917
2918 switch (ah->ah_devid) {
2919 case AR9280_DEVID_PCI:
2920 if (reg == 0x7894) {
2921 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2922 "ini VAL: %x EEPROM: %x\n", value,
2923 (pBase->version & 0xff));
2924
2925 if ((pBase->version & 0xff) > 0x0a) {
2926 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2927 "PWDCLKIND: %d\n",
2928 pBase->pwdclkind);
2929 value &= ~AR_AN_TOP2_PWDCLKIND;
2930 value |= AR_AN_TOP2_PWDCLKIND & (pBase->
2931 pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
2932 } else {
2933 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2934 "PWDCLKIND Earlier Rev\n");
2935 }
2936
2937 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
2938 "final ini VAL: %x\n", value);
2939 }
2940 break;
2941 }
2942 return value;
2943}
2944
2945static bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
2946{
2947 struct ath_hal_5416 *ahp = AH5416(ah);
2948 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
2949 u16 capField = 0, eeval;
2950
2951 eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_0);
2952
2953 ah->ah_currentRD = eeval;
2954
2955 eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_1);
2956 ah->ah_currentRDExt = eeval;
2957
2958 capField = ath9k_hw_get_eeprom(ahp, EEP_OP_CAP);
2959
2960 if (ah->ah_opmode != ATH9K_M_HOSTAP &&
2961 ah->ah_subvendorid == AR_SUBVENDOR_ID_NEW_A) {
2962 if (ah->ah_currentRD == 0x64 || ah->ah_currentRD == 0x65)
2963 ah->ah_currentRD += 5;
2964 else if (ah->ah_currentRD == 0x41)
2965 ah->ah_currentRD = 0x43;
2966 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
2967 "%s: regdomain mapped to 0x%x\n", __func__,
2968 ah->ah_currentRD);
2969 }
2970
2971 eeval = ath9k_hw_get_eeprom(ahp, EEP_OP_MODE);
2972 bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX);
2973
2974 if (eeval & AR5416_OPFLAGS_11A) {
2975 set_bit(ATH9K_MODE_11A, pCap->wireless_modes);
2976 if (ah->ah_config.ht_enable) {
2977 if (!(eeval & AR5416_OPFLAGS_N_5G_HT20))
2978 set_bit(ATH9K_MODE_11NA_HT20,
2979 pCap->wireless_modes);
2980 if (!(eeval & AR5416_OPFLAGS_N_5G_HT40)) {
2981 set_bit(ATH9K_MODE_11NA_HT40PLUS,
2982 pCap->wireless_modes);
2983 set_bit(ATH9K_MODE_11NA_HT40MINUS,
2984 pCap->wireless_modes);
2985 }
2986 }
2987 }
2988
2989 if (eeval & AR5416_OPFLAGS_11G) {
2990 set_bit(ATH9K_MODE_11B, pCap->wireless_modes);
2991 set_bit(ATH9K_MODE_11G, pCap->wireless_modes);
2992 if (ah->ah_config.ht_enable) {
2993 if (!(eeval & AR5416_OPFLAGS_N_2G_HT20))
2994 set_bit(ATH9K_MODE_11NG_HT20,
2995 pCap->wireless_modes);
2996 if (!(eeval & AR5416_OPFLAGS_N_2G_HT40)) {
2997 set_bit(ATH9K_MODE_11NG_HT40PLUS,
2998 pCap->wireless_modes);
2999 set_bit(ATH9K_MODE_11NG_HT40MINUS,
3000 pCap->wireless_modes);
3001 }
3002 }
3003 }
3004
3005 pCap->tx_chainmask = ath9k_hw_get_eeprom(ahp, EEP_TX_MASK);
3006 if ((ah->ah_isPciExpress)
3007 || (eeval & AR5416_OPFLAGS_11A)) {
3008 pCap->rx_chainmask =
3009 ath9k_hw_get_eeprom(ahp, EEP_RX_MASK);
3010 } else {
3011 pCap->rx_chainmask =
3012 (ath9k_hw_gpio_get(ah, 0)) ? 0x5 : 0x7;
3013 }
3014
3015 if (!(AR_SREV_9280(ah) && (ah->ah_macRev == 0)))
3016 ahp->ah_miscMode |= AR_PCU_MIC_NEW_LOC_ENA;
3017
3018 pCap->low_2ghz_chan = 2312;
3019 pCap->high_2ghz_chan = 2732;
3020
3021 pCap->low_5ghz_chan = 4920;
3022 pCap->high_5ghz_chan = 6100;
3023
3024 pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP;
3025 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP;
3026 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM;
3027
3028 pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP;
3029 pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP;
3030 pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM;
3031
3032 pCap->hw_caps |= ATH9K_HW_CAP_CHAN_SPREAD;
3033
3034 if (ah->ah_config.ht_enable)
3035 pCap->hw_caps |= ATH9K_HW_CAP_HT;
3036 else
3037 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
3038
3039 pCap->hw_caps |= ATH9K_HW_CAP_GTT;
3040 pCap->hw_caps |= ATH9K_HW_CAP_VEOL;
3041 pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK;
3042 pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH;
3043
3044 if (capField & AR_EEPROM_EEPCAP_MAXQCU)
3045 pCap->total_queues =
3046 MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
3047 else
3048 pCap->total_queues = ATH9K_NUM_TX_QUEUES;
3049
3050 if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES)
3051 pCap->keycache_size =
3052 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES);
3053 else
3054 pCap->keycache_size = AR_KEYTABLE_SIZE;
3055
3056 pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
3057 pCap->num_mr_retries = 4;
3058 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
3059
3060 if (AR_SREV_9280_10_OR_LATER(ah))
3061 pCap->num_gpio_pins = AR928X_NUM_GPIO;
3062 else
3063 pCap->num_gpio_pins = AR_NUM_GPIO;
3064
3065 if (AR_SREV_9280_10_OR_LATER(ah)) {
3066 pCap->hw_caps |= ATH9K_HW_CAP_WOW;
3067 pCap->hw_caps |= ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
3068 } else {
3069 pCap->hw_caps &= ~ATH9K_HW_CAP_WOW;
3070 pCap->hw_caps &= ~ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
3071 }
3072
3073 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
3074 pCap->hw_caps |= ATH9K_HW_CAP_CST;
3075 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
3076 } else {
3077 pCap->rts_aggr_limit = (8 * 1024);
3078 }
3079
3080 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
3081
3082#ifdef CONFIG_RFKILL
3083 ah->ah_rfsilent = ath9k_hw_get_eeprom(ahp, EEP_RF_SILENT);
3084 if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) {
3085 ah->ah_rfkill_gpio =
3086 MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL);
3087 ah->ah_rfkill_polarity =
3088 MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY);
3089
3090 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
3091 }
3092#endif
3093
3094 if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) ||
3095 (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) ||
3096 (ah->ah_macVersion == AR_SREV_VERSION_9160) ||
3097 (ah->ah_macVersion == AR_SREV_VERSION_9100) ||
3098 (ah->ah_macVersion == AR_SREV_VERSION_9280))
3099 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
3100 else
3101 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
3102
3103 if (AR_SREV_9280(ah))
3104 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
3105 else
3106 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
3107
3108 if (ah->ah_currentRDExt & (1 << REG_EXT_JAPAN_MIDBAND)) {
3109 pCap->reg_cap =
3110 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
3111 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
3112 AR_EEPROM_EEREGCAP_EN_KK_U2 |
3113 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND;
3114 } else {
3115 pCap->reg_cap =
3116 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
3117 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
3118 }
3119
3120 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
3121
3122 pCap->num_antcfg_5ghz =
3123 ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_5GHZ);
3124 pCap->num_antcfg_2ghz =
3125 ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_2GHZ);
3126
3127 return true;
3128}
3129
3130static void ar5416DisablePciePhy(struct ath_hal *ah)
3131{
3132 if (!AR_SREV_9100(ah))
3133 return;
3134
3135 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
3136 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3137 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
3138 REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824);
3139 REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579);
3140 REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000);
3141 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3142 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3143 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
3144
3145 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3146}
3147
3148static void ath9k_set_power_sleep(struct ath_hal *ah, int setChip)
3149{
3150 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3151 if (setChip) {
3152 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
3153 AR_RTC_FORCE_WAKE_EN);
3154 if (!AR_SREV_9100(ah))
3155 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
3156
3157 REG_CLR_BIT(ah, (u16) (AR_RTC_RESET),
3158 AR_RTC_RESET_EN);
3159 }
3160}
3161
3162static void ath9k_set_power_network_sleep(struct ath_hal *ah, int setChip)
3163{
3164 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3165 if (setChip) {
3166 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3167
3168 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
3169 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
3170 AR_RTC_FORCE_WAKE_ON_INT);
3171 } else {
3172 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
3173 AR_RTC_FORCE_WAKE_EN);
3174 }
3175 }
3176}
3177
3178static bool ath9k_hw_set_power_awake(struct ath_hal *ah,
3179 int setChip)
3180{
3181 u32 val;
3182 int i;
3183
3184 if (setChip) {
3185 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) ==
3186 AR_RTC_STATUS_SHUTDOWN) {
3187 if (ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)
3188 != true) {
3189 return false;
3190 }
3191 }
3192 if (AR_SREV_9100(ah))
3193 REG_SET_BIT(ah, AR_RTC_RESET,
3194 AR_RTC_RESET_EN);
3195
3196 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
3197 AR_RTC_FORCE_WAKE_EN);
3198 udelay(50);
3199
3200 for (i = POWER_UP_TIME / 50; i > 0; i--) {
3201 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
3202 if (val == AR_RTC_STATUS_ON)
3203 break;
3204 udelay(50);
3205 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
3206 AR_RTC_FORCE_WAKE_EN);
3207 }
3208 if (i == 0) {
3209 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
3210 "%s: Failed to wakeup in %uus\n",
3211 __func__, POWER_UP_TIME / 20);
3212 return false;
3213 }
3214 }
3215
3216 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
3217 return true;
3218}
3219
3220bool ath9k_hw_setpower(struct ath_hal *ah,
3221 enum ath9k_power_mode mode)
3222{
3223 struct ath_hal_5416 *ahp = AH5416(ah);
3224 static const char *modes[] = {
3225 "AWAKE",
3226 "FULL-SLEEP",
3227 "NETWORK SLEEP",
3228 "UNDEFINED"
3229 };
3230 int status = true, setChip = true;
3231 635
3232 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, "%s: %s -> %s (%s)\n", __func__, 636 return 0;
3233 modes[ahp->ah_powerMode], modes[mode],
3234 setChip ? "set chip " : "");
3235
3236 switch (mode) {
3237 case ATH9K_PM_AWAKE:
3238 status = ath9k_hw_set_power_awake(ah, setChip);
3239 break;
3240 case ATH9K_PM_FULL_SLEEP:
3241 ath9k_set_power_sleep(ah, setChip);
3242 ahp->ah_chipFullSleep = true;
3243 break;
3244 case ATH9K_PM_NETWORK_SLEEP:
3245 ath9k_set_power_network_sleep(ah, setChip);
3246 break;
3247 default:
3248 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
3249 "%s: unknown power mode %u\n", __func__, mode);
3250 return false;
3251 }
3252 ahp->ah_powerMode = mode;
3253 return status;
3254} 637}
3255 638
3256static struct ath_hal *ath9k_hw_do_attach(u16 devid, 639static struct ath_hal *ath9k_hw_do_attach(u16 devid, struct ath_softc *sc,
3257 struct ath_softc *sc, 640 void __iomem *mem, int *status)
3258 void __iomem *mem,
3259 int *status)
3260{ 641{
3261 struct ath_hal_5416 *ahp; 642 struct ath_hal_5416 *ahp;
3262 struct ath_hal *ah; 643 struct ath_hal *ah;
3263 int ecode; 644 int ecode;
3264#ifndef CONFIG_SLOW_ANT_DIV 645 u32 i, j;
3265 u32 i;
3266 u32 j;
3267#endif
3268 646
3269 ahp = ath9k_hw_newstate(devid, sc, mem, status); 647 ahp = ath9k_hw_newstate(devid, sc, mem, status);
3270 if (ahp == NULL) 648 if (ahp == NULL)
@@ -3278,15 +656,13 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3278 ahp->ah_intrMitigation = true; 656 ahp->ah_intrMitigation = true;
3279 657
3280 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 658 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
3281 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't reset chip\n", 659 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "Couldn't reset chip\n");
3282 __func__);
3283 ecode = -EIO; 660 ecode = -EIO;
3284 goto bad; 661 goto bad;
3285 } 662 }
3286 663
3287 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { 664 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
3288 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't wakeup chip\n", 665 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "Couldn't wakeup chip\n");
3289 __func__);
3290 ecode = -EIO; 666 ecode = -EIO;
3291 goto bad; 667 goto bad;
3292 } 668 }
@@ -3300,18 +676,18 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3300 SER_REG_MODE_OFF; 676 SER_REG_MODE_OFF;
3301 } 677 }
3302 } 678 }
679
3303 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 680 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3304 "%s: serialize_regmode is %d\n", 681 "serialize_regmode is %d\n",
3305 __func__, ah->ah_config.serialize_regmode); 682 ah->ah_config.serialize_regmode);
3306 683
3307 if ((ah->ah_macVersion != AR_SREV_VERSION_5416_PCI) && 684 if ((ah->ah_macVersion != AR_SREV_VERSION_5416_PCI) &&
3308 (ah->ah_macVersion != AR_SREV_VERSION_5416_PCIE) && 685 (ah->ah_macVersion != AR_SREV_VERSION_5416_PCIE) &&
3309 (ah->ah_macVersion != AR_SREV_VERSION_9160) && 686 (ah->ah_macVersion != AR_SREV_VERSION_9160) &&
3310 (!AR_SREV_9100(ah)) && (!AR_SREV_9280(ah))) { 687 (!AR_SREV_9100(ah)) && (!AR_SREV_9280(ah)) && (!AR_SREV_9285(ah))) {
3311 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 688 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3312 "%s: Mac Chip Rev 0x%02x.%x is not supported by " 689 "Mac Chip Rev 0x%02x.%x is not supported by "
3313 "this driver\n", __func__, 690 "this driver\n", ah->ah_macVersion, ah->ah_macRev);
3314 ah->ah_macVersion, ah->ah_macRev);
3315 ecode = -EOPNOTSUPP; 691 ecode = -EOPNOTSUPP;
3316 goto bad; 692 goto bad;
3317 } 693 }
@@ -3341,8 +717,7 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3341 ahp->ah_adcDcCalInitData.calData = 717 ahp->ah_adcDcCalInitData.calData =
3342 &adc_init_dc_cal; 718 &adc_init_dc_cal;
3343 } 719 }
3344 ahp->ah_suppCals = 720 ahp->ah_suppCals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
3345 ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
3346 } 721 }
3347 722
3348 if (AR_SREV_9160(ah)) { 723 if (AR_SREV_9160(ah)) {
@@ -3352,16 +727,46 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3352 } else { 727 } else {
3353 ahp->ah_ani_function = ATH9K_ANI_ALL; 728 ahp->ah_ani_function = ATH9K_ANI_ALL;
3354 if (AR_SREV_9280_10_OR_LATER(ah)) { 729 if (AR_SREV_9280_10_OR_LATER(ah)) {
3355 ahp->ah_ani_function &= 730 ahp->ah_ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
3356 ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
3357 } 731 }
3358 } 732 }
3359 733
3360 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 734 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3361 "%s: This Mac Chip Rev 0x%02x.%x is \n", __func__, 735 "This Mac Chip Rev 0x%02x.%x is \n",
3362 ah->ah_macVersion, ah->ah_macRev); 736 ah->ah_macVersion, ah->ah_macRev);
3363 737
3364 if (AR_SREV_9280_20_OR_LATER(ah)) { 738 if (AR_SREV_9285_12_OR_LATER(ah)) {
739 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9285Modes_9285_1_2,
740 ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
741 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9285Common_9285_1_2,
742 ARRAY_SIZE(ar9285Common_9285_1_2), 2);
743
744 if (ah->ah_config.pcie_clock_req) {
745 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
746 ar9285PciePhy_clkreq_off_L1_9285_1_2,
747 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
748 } else {
749 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
750 ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
751 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
752 2);
753 }
754 } else if (AR_SREV_9285_10_OR_LATER(ah)) {
755 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9285Modes_9285,
756 ARRAY_SIZE(ar9285Modes_9285), 6);
757 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9285Common_9285,
758 ARRAY_SIZE(ar9285Common_9285), 2);
759
760 if (ah->ah_config.pcie_clock_req) {
761 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
762 ar9285PciePhy_clkreq_off_L1_9285,
763 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
764 } else {
765 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
766 ar9285PciePhy_clkreq_always_on_L1_9285,
767 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
768 }
769 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
3365 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280_2, 770 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280_2,
3366 ARRAY_SIZE(ar9280Modes_9280_2), 6); 771 ARRAY_SIZE(ar9280Modes_9280_2), 6);
3367 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280_2, 772 INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280_2,
@@ -3369,21 +774,16 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3369 774
3370 if (ah->ah_config.pcie_clock_req) { 775 if (ah->ah_config.pcie_clock_req) {
3371 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes, 776 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
3372 ar9280PciePhy_clkreq_off_L1_9280, 777 ar9280PciePhy_clkreq_off_L1_9280,
3373 ARRAY_SIZE 778 ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2);
3374 (ar9280PciePhy_clkreq_off_L1_9280),
3375 2);
3376 } else { 779 } else {
3377 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes, 780 INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes,
3378 ar9280PciePhy_clkreq_always_on_L1_9280, 781 ar9280PciePhy_clkreq_always_on_L1_9280,
3379 ARRAY_SIZE 782 ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
3380 (ar9280PciePhy_clkreq_always_on_L1_9280),
3381 2);
3382 } 783 }
3383 INIT_INI_ARRAY(&ahp->ah_iniModesAdditional, 784 INIT_INI_ARRAY(&ahp->ah_iniModesAdditional,
3384 ar9280Modes_fast_clock_9280_2, 785 ar9280Modes_fast_clock_9280_2,
3385 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 786 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
3386 3);
3387 } else if (AR_SREV_9280_10_OR_LATER(ah)) { 787 } else if (AR_SREV_9280_10_OR_LATER(ah)) {
3388 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280, 788 INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280,
3389 ARRAY_SIZE(ar9280Modes_9280), 6); 789 ARRAY_SIZE(ar9280Modes_9280), 6);
@@ -3469,13 +869,20 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3469 if (ah->ah_isPciExpress) 869 if (ah->ah_isPciExpress)
3470 ath9k_hw_configpcipowersave(ah, 0); 870 ath9k_hw_configpcipowersave(ah, 0);
3471 else 871 else
3472 ar5416DisablePciePhy(ah); 872 ath9k_hw_disablepcie(ah);
3473 873
3474 ecode = ath9k_hw_post_attach(ah); 874 ecode = ath9k_hw_post_attach(ah);
3475 if (ecode != 0) 875 if (ecode != 0)
3476 goto bad; 876 goto bad;
3477 877
3478#ifndef CONFIG_SLOW_ANT_DIV 878 /* rxgain table */
879 if (AR_SREV_9280_20(ah))
880 ath9k_hw_init_rxgain_ini(ah);
881
882 /* txgain table */
883 if (AR_SREV_9280_20(ah))
884 ath9k_hw_init_txgain_ini(ah);
885
3479 if (ah->ah_devid == AR9280_DEVID_PCI) { 886 if (ah->ah_devid == AR9280_DEVID_PCI) {
3480 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) { 887 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) {
3481 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0); 888 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0);
@@ -3484,16 +891,16 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3484 u32 val = INI_RA(&ahp->ah_iniModes, i, j); 891 u32 val = INI_RA(&ahp->ah_iniModes, i, j);
3485 892
3486 INI_RA(&ahp->ah_iniModes, i, j) = 893 INI_RA(&ahp->ah_iniModes, i, j) =
3487 ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom, 894 ath9k_hw_ini_fixup(ah,
895 &ahp->ah_eeprom.def,
3488 reg, val); 896 reg, val);
3489 } 897 }
3490 } 898 }
3491 } 899 }
3492#endif
3493 900
3494 if (!ath9k_hw_fill_cap_info(ah)) { 901 if (!ath9k_hw_fill_cap_info(ah)) {
3495 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 902 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3496 "%s:failed ath9k_hw_fill_cap_info\n", __func__); 903 "failed ath9k_hw_fill_cap_info\n");
3497 ecode = -EINVAL; 904 ecode = -EINVAL;
3498 goto bad; 905 goto bad;
3499 } 906 }
@@ -3501,8 +908,7 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3501 ecode = ath9k_hw_init_macaddr(ah); 908 ecode = ath9k_hw_init_macaddr(ah);
3502 if (ecode != 0) { 909 if (ecode != 0) {
3503 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 910 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3504 "%s: failed initializing mac address\n", 911 "failed initializing mac address\n");
3505 __func__);
3506 goto bad; 912 goto bad;
3507 } 913 }
3508 914
@@ -3511,1106 +917,569 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid,
3511 else 917 else
3512 ah->ah_txTrigLevel = (AR_FTRIG_512B >> AR_FTRIG_S); 918 ah->ah_txTrigLevel = (AR_FTRIG_512B >> AR_FTRIG_S);
3513 919
3514#ifndef ATH_NF_PER_CHAN
3515
3516 ath9k_init_nfcal_hist_buffer(ah); 920 ath9k_init_nfcal_hist_buffer(ah);
3517#endif
3518 921
3519 return ah; 922 return ah;
3520
3521bad: 923bad:
3522 if (ahp) 924 if (ahp)
3523 ath9k_hw_detach((struct ath_hal *) ahp); 925 ath9k_hw_detach((struct ath_hal *) ahp);
3524 if (status) 926 if (status)
3525 *status = ecode; 927 *status = ecode;
928
3526 return NULL; 929 return NULL;
3527} 930}
3528 931
3529void ath9k_hw_detach(struct ath_hal *ah) 932static void ath9k_hw_init_bb(struct ath_hal *ah,
933 struct ath9k_channel *chan)
3530{ 934{
3531 if (!AR_SREV_9100(ah)) 935 u32 synthDelay;
3532 ath9k_hw_ani_detach(ah);
3533 ath9k_hw_rfdetach(ah);
3534 936
3535 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); 937 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
3536 kfree(ah); 938 if (IS_CHAN_B(chan))
939 synthDelay = (4 * synthDelay) / 22;
940 else
941 synthDelay /= 10;
942
943 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
944
945 udelay(synthDelay + BASE_ACTIVATE_DELAY);
3537} 946}
3538 947
3539bool ath9k_get_channel_edges(struct ath_hal *ah, 948static void ath9k_hw_init_qos(struct ath_hal *ah)
3540 u16 flags, u16 *low,
3541 u16 *high)
3542{ 949{
3543 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 950 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
951 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
3544 952
3545 if (flags & CHANNEL_5GHZ) { 953 REG_WRITE(ah, AR_QOS_NO_ACK,
3546 *low = pCap->low_5ghz_chan; 954 SM(2, AR_QOS_NO_ACK_TWO_BIT) |
3547 *high = pCap->high_5ghz_chan; 955 SM(5, AR_QOS_NO_ACK_BIT_OFF) |
3548 return true; 956 SM(0, AR_QOS_NO_ACK_BYTE_OFF));
3549 }
3550 if ((flags & CHANNEL_2GHZ)) {
3551 *low = pCap->low_2ghz_chan;
3552 *high = pCap->high_2ghz_chan;
3553 957
3554 return true; 958 REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL);
3555 } 959 REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF);
3556 return false; 960 REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
961 REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
962 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
3557} 963}
3558 964
3559static inline bool ath9k_hw_fill_vpd_table(u8 pwrMin, 965static void ath9k_hw_init_pll(struct ath_hal *ah,
3560 u8 pwrMax, 966 struct ath9k_channel *chan)
3561 u8 *pPwrList,
3562 u8 *pVpdList,
3563 u16
3564 numIntercepts,
3565 u8 *pRetVpdList)
3566{ 967{
3567 u16 i, k; 968 u32 pll;
3568 u8 currPwr = pwrMin;
3569 u16 idxL = 0, idxR = 0;
3570
3571 for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) {
3572 ath9k_hw_get_lower_upper_index(currPwr, pPwrList,
3573 numIntercepts, &(idxL),
3574 &(idxR));
3575 if (idxR < 1)
3576 idxR = 1;
3577 if (idxL == numIntercepts - 1)
3578 idxL = (u16) (numIntercepts - 2);
3579 if (pPwrList[idxL] == pPwrList[idxR])
3580 k = pVpdList[idxL];
3581 else
3582 k = (u16) (((currPwr -
3583 pPwrList[idxL]) *
3584 pVpdList[idxR] +
3585 (pPwrList[idxR] -
3586 currPwr) * pVpdList[idxL]) /
3587 (pPwrList[idxR] -
3588 pPwrList[idxL]));
3589 pRetVpdList[i] = (u8) k;
3590 currPwr += 2;
3591 }
3592 969
3593 return true; 970 if (AR_SREV_9100(ah)) {
3594} 971 if (chan && IS_CHAN_5GHZ(chan))
972 pll = 0x1450;
973 else
974 pll = 0x1458;
975 } else {
976 if (AR_SREV_9280_10_OR_LATER(ah)) {
977 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
3595 978
3596static void 979 if (chan && IS_CHAN_HALF_RATE(chan))
3597ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah, 980 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
3598 struct ath9k_channel *chan, 981 else if (chan && IS_CHAN_QUARTER_RATE(chan))
3599 struct cal_data_per_freq *pRawDataSet, 982 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
3600 u8 *bChans,
3601 u16 availPiers,
3602 u16 tPdGainOverlap,
3603 int16_t *pMinCalPower,
3604 u16 *pPdGainBoundaries,
3605 u8 *pPDADCValues,
3606 u16 numXpdGains)
3607{
3608 int i, j, k;
3609 int16_t ss;
3610 u16 idxL = 0, idxR = 0, numPiers;
3611 static u8 vpdTableL[AR5416_NUM_PD_GAINS]
3612 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3613 static u8 vpdTableR[AR5416_NUM_PD_GAINS]
3614 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3615 static u8 vpdTableI[AR5416_NUM_PD_GAINS]
3616 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
3617
3618 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
3619 u8 minPwrT4[AR5416_NUM_PD_GAINS];
3620 u8 maxPwrT4[AR5416_NUM_PD_GAINS];
3621 int16_t vpdStep;
3622 int16_t tmpVal;
3623 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
3624 bool match;
3625 int16_t minDelta = 0;
3626 struct chan_centers centers;
3627 983
3628 ath9k_hw_get_channel_centers(ah, chan, &centers); 984 if (chan && IS_CHAN_5GHZ(chan)) {
985 pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
3629 986
3630 for (numPiers = 0; numPiers < availPiers; numPiers++) {
3631 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
3632 break;
3633 }
3634 987
3635 match = ath9k_hw_get_lower_upper_index((u8) 988 if (AR_SREV_9280_20(ah)) {
3636 FREQ2FBIN(centers. 989 if (((chan->channel % 20) == 0)
3637 synth_center, 990 || ((chan->channel % 10) == 0))
3638 IS_CHAN_2GHZ 991 pll = 0x2850;
3639 (chan)), bChans, 992 else
3640 numPiers, &idxL, &idxR); 993 pll = 0x142c;
3641 994 }
3642 if (match) { 995 } else {
3643 for (i = 0; i < numXpdGains; i++) { 996 pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
3644 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
3645 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
3646 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3647 pRawDataSet[idxL].
3648 pwrPdg[i],
3649 pRawDataSet[idxL].
3650 vpdPdg[i],
3651 AR5416_PD_GAIN_ICEPTS,
3652 vpdTableI[i]);
3653 }
3654 } else {
3655 for (i = 0; i < numXpdGains; i++) {
3656 pVpdL = pRawDataSet[idxL].vpdPdg[i];
3657 pPwrL = pRawDataSet[idxL].pwrPdg[i];
3658 pVpdR = pRawDataSet[idxR].vpdPdg[i];
3659 pPwrR = pRawDataSet[idxR].pwrPdg[i];
3660
3661 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
3662
3663 maxPwrT4[i] =
3664 min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1],
3665 pPwrR[AR5416_PD_GAIN_ICEPTS - 1]);
3666
3667
3668 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3669 pPwrL, pVpdL,
3670 AR5416_PD_GAIN_ICEPTS,
3671 vpdTableL[i]);
3672 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
3673 pPwrR, pVpdR,
3674 AR5416_PD_GAIN_ICEPTS,
3675 vpdTableR[i]);
3676
3677 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
3678 vpdTableI[i][j] =
3679 (u8) (ath9k_hw_interpolate
3680 ((u16)
3681 FREQ2FBIN(centers.
3682 synth_center,
3683 IS_CHAN_2GHZ
3684 (chan)),
3685 bChans[idxL],
3686 bChans[idxR], vpdTableL[i]
3687 [j], vpdTableR[i]
3688 [j]));
3689 } 997 }
3690 }
3691 }
3692 998
3693 *pMinCalPower = (int16_t) (minPwrT4[0] / 2); 999 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
3694
3695 k = 0;
3696 for (i = 0; i < numXpdGains; i++) {
3697 if (i == (numXpdGains - 1))
3698 pPdGainBoundaries[i] =
3699 (u16) (maxPwrT4[i] / 2);
3700 else
3701 pPdGainBoundaries[i] =
3702 (u16) ((maxPwrT4[i] +
3703 minPwrT4[i + 1]) / 4);
3704 1000
3705 pPdGainBoundaries[i] = 1001 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
3706 min((u16) AR5416_MAX_RATE_POWER,
3707 pPdGainBoundaries[i]);
3708 1002
3709 if ((i == 0) && !AR_SREV_5416_V20_OR_LATER(ah)) { 1003 if (chan && IS_CHAN_HALF_RATE(chan))
3710 minDelta = pPdGainBoundaries[0] - 23; 1004 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
3711 pPdGainBoundaries[0] = 23; 1005 else if (chan && IS_CHAN_QUARTER_RATE(chan))
3712 } else { 1006 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
3713 minDelta = 0;
3714 }
3715 1007
3716 if (i == 0) { 1008 if (chan && IS_CHAN_5GHZ(chan))
3717 if (AR_SREV_9280_10_OR_LATER(ah)) 1009 pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
3718 ss = (int16_t) (0 - (minPwrT4[i] / 2));
3719 else 1010 else
3720 ss = 0; 1011 pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
3721 } else { 1012 } else {
3722 ss = (int16_t) ((pPdGainBoundaries[i - 1] - 1013 pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
3723 (minPwrT4[i] / 2)) -
3724 tPdGainOverlap + 1 + minDelta);
3725 }
3726 vpdStep = (int16_t) (vpdTableI[i][1] - vpdTableI[i][0]);
3727 vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep);
3728
3729 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
3730 tmpVal = (int16_t) (vpdTableI[i][0] + ss * vpdStep);
3731 pPDADCValues[k++] =
3732 (u8) ((tmpVal < 0) ? 0 : tmpVal);
3733 ss++;
3734 }
3735
3736 sizeCurrVpdTable =
3737 (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
3738 tgtIndex = (u8) (pPdGainBoundaries[i] + tPdGainOverlap -
3739 (minPwrT4[i] / 2));
3740 maxIndex = (tgtIndex <
3741 sizeCurrVpdTable) ? tgtIndex : sizeCurrVpdTable;
3742 1014
3743 while ((ss < maxIndex) 1015 if (chan && IS_CHAN_HALF_RATE(chan))
3744 && (k < (AR5416_NUM_PDADC_VALUES - 1))) { 1016 pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
3745 pPDADCValues[k++] = vpdTableI[i][ss++]; 1017 else if (chan && IS_CHAN_QUARTER_RATE(chan))
3746 } 1018 pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
3747 1019
3748 vpdStep = (int16_t) (vpdTableI[i][sizeCurrVpdTable - 1] - 1020 if (chan && IS_CHAN_5GHZ(chan))
3749 vpdTableI[i][sizeCurrVpdTable - 2]); 1021 pll |= SM(0xa, AR_RTC_PLL_DIV);
3750 vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep); 1022 else
3751 1023 pll |= SM(0xb, AR_RTC_PLL_DIV);
3752 if (tgtIndex > maxIndex) {
3753 while ((ss <= tgtIndex)
3754 && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
3755 tmpVal = (int16_t) ((vpdTableI[i]
3756 [sizeCurrVpdTable -
3757 1] + (ss - maxIndex +
3758 1) * vpdStep));
3759 pPDADCValues[k++] = (u8) ((tmpVal >
3760 255) ? 255 : tmpVal);
3761 ss++;
3762 }
3763 } 1024 }
3764 } 1025 }
1026 REG_WRITE(ah, (u16) (AR_RTC_PLL_CONTROL), pll);
3765 1027
3766 while (i < AR5416_PD_GAINS_IN_MASK) { 1028 udelay(RTC_PLL_SETTLE_DELAY);
3767 pPdGainBoundaries[i] = pPdGainBoundaries[i - 1];
3768 i++;
3769 }
3770 1029
3771 while (k < AR5416_NUM_PDADC_VALUES) { 1030 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
3772 pPDADCValues[k] = pPDADCValues[k - 1];
3773 k++;
3774 }
3775 return;
3776} 1031}
3777 1032
3778static bool 1033static void ath9k_hw_init_chain_masks(struct ath_hal *ah)
3779ath9k_hw_set_power_cal_table(struct ath_hal *ah,
3780 struct ar5416_eeprom *pEepData,
3781 struct ath9k_channel *chan,
3782 int16_t *pTxPowerIndexOffset)
3783{ 1034{
3784 struct cal_data_per_freq *pRawDataset;
3785 u8 *pCalBChans = NULL;
3786 u16 pdGainOverlap_t2;
3787 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
3788 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
3789 u16 numPiers, i, j;
3790 int16_t tMinCalPower;
3791 u16 numXpdGain, xpdMask;
3792 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
3793 u32 reg32, regOffset, regChainOffset;
3794 int16_t modalIdx;
3795 struct ath_hal_5416 *ahp = AH5416(ah); 1035 struct ath_hal_5416 *ahp = AH5416(ah);
1036 int rx_chainmask, tx_chainmask;
3796 1037
3797 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0; 1038 rx_chainmask = ahp->ah_rxchainmask;
3798 xpdMask = pEepData->modalHeader[modalIdx].xpdGain; 1039 tx_chainmask = ahp->ah_txchainmask;
3799
3800 if ((pEepData->baseEepHeader.
3801 version & AR5416_EEP_VER_MINOR_MASK) >=
3802 AR5416_EEP_MINOR_VER_2) {
3803 pdGainOverlap_t2 =
3804 pEepData->modalHeader[modalIdx].pdGainOverlap;
3805 } else {
3806 pdGainOverlap_t2 =
3807 (u16) (MS
3808 (REG_READ(ah, AR_PHY_TPCRG5),
3809 AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
3810 }
3811
3812 if (IS_CHAN_2GHZ(chan)) {
3813 pCalBChans = pEepData->calFreqPier2G;
3814 numPiers = AR5416_NUM_2G_CAL_PIERS;
3815 } else {
3816 pCalBChans = pEepData->calFreqPier5G;
3817 numPiers = AR5416_NUM_5G_CAL_PIERS;
3818 }
3819
3820 numXpdGain = 0;
3821 1040
3822 for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) { 1041 switch (rx_chainmask) {
3823 if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) { 1042 case 0x5:
3824 if (numXpdGain >= AR5416_NUM_PD_GAINS) 1043 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
3825 break; 1044 AR_PHY_SWAP_ALT_CHAIN);
3826 xpdGainValues[numXpdGain] = 1045 case 0x3:
3827 (u16) (AR5416_PD_GAINS_IN_MASK - i); 1046 if (((ah)->ah_macVersion <= AR_SREV_VERSION_9160)) {
3828 numXpdGain++; 1047 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
1048 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
1049 break;
3829 } 1050 }
1051 case 0x1:
1052 case 0x2:
1053 case 0x7:
1054 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
1055 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
1056 break;
1057 default:
1058 break;
3830 } 1059 }
3831 1060
3832 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, 1061 REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
3833 (numXpdGain - 1) & 0x3); 1062 if (tx_chainmask == 0x5) {
3834 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1, 1063 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
3835 xpdGainValues[0]); 1064 AR_PHY_SWAP_ALT_CHAIN);
3836 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2,
3837 xpdGainValues[1]);
3838 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3,
3839 xpdGainValues[2]);
3840
3841 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
3842 if (AR_SREV_5416_V20_OR_LATER(ah) &&
3843 (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5)
3844 && (i != 0)) {
3845 regChainOffset = (i == 1) ? 0x2000 : 0x1000;
3846 } else
3847 regChainOffset = i * 0x1000;
3848 if (pEepData->baseEepHeader.txMask & (1 << i)) {
3849 if (IS_CHAN_2GHZ(chan))
3850 pRawDataset = pEepData->calPierData2G[i];
3851 else
3852 pRawDataset = pEepData->calPierData5G[i];
3853
3854 ath9k_hw_get_gain_boundaries_pdadcs(ah, chan,
3855 pRawDataset,
3856 pCalBChans,
3857 numPiers,
3858 pdGainOverlap_t2,
3859 &tMinCalPower,
3860 gainBoundaries,
3861 pdadcValues,
3862 numXpdGain);
3863
3864 if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) {
3865
3866 REG_WRITE(ah,
3867 AR_PHY_TPCRG5 + regChainOffset,
3868 SM(pdGainOverlap_t2,
3869 AR_PHY_TPCRG5_PD_GAIN_OVERLAP)
3870 | SM(gainBoundaries[0],
3871 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1)
3872 | SM(gainBoundaries[1],
3873 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2)
3874 | SM(gainBoundaries[2],
3875 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3)
3876 | SM(gainBoundaries[3],
3877 AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4));
3878 }
3879
3880 regOffset =
3881 AR_PHY_BASE + (672 << 2) + regChainOffset;
3882 for (j = 0; j < 32; j++) {
3883 reg32 =
3884 ((pdadcValues[4 * j + 0] & 0xFF) << 0)
3885 | ((pdadcValues[4 * j + 1] & 0xFF) <<
3886 8) | ((pdadcValues[4 * j + 2] &
3887 0xFF) << 16) |
3888 ((pdadcValues[4 * j + 3] & 0xFF) <<
3889 24);
3890 REG_WRITE(ah, regOffset, reg32);
3891
3892 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
3893 "PDADC (%d,%4x): %4.4x %8.8x\n",
3894 i, regChainOffset, regOffset,
3895 reg32);
3896 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
3897 "PDADC: Chain %d | PDADC %3d Value %3d | "
3898 "PDADC %3d Value %3d | PDADC %3d Value %3d | "
3899 "PDADC %3d Value %3d |\n",
3900 i, 4 * j, pdadcValues[4 * j],
3901 4 * j + 1, pdadcValues[4 * j + 1],
3902 4 * j + 2, pdadcValues[4 * j + 2],
3903 4 * j + 3,
3904 pdadcValues[4 * j + 3]);
3905
3906 regOffset += 4;
3907 }
3908 }
3909 } 1065 }
3910 *pTxPowerIndexOffset = 0; 1066 if (AR_SREV_9100(ah))
3911 1067 REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
3912 return true; 1068 REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
3913} 1069}
3914 1070
3915void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore) 1071static void ath9k_hw_init_interrupt_masks(struct ath_hal *ah,
1072 enum nl80211_iftype opmode)
3916{ 1073{
3917 struct ath_hal_5416 *ahp = AH5416(ah); 1074 struct ath_hal_5416 *ahp = AH5416(ah);
3918 u8 i;
3919
3920 if (ah->ah_isPciExpress != true)
3921 return;
3922 1075
3923 if (ah->ah_config.pcie_powersave_enable == 2) 1076 ahp->ah_maskReg = AR_IMR_TXERR |
3924 return; 1077 AR_IMR_TXURN |
1078 AR_IMR_RXERR |
1079 AR_IMR_RXORN |
1080 AR_IMR_BCNMISC;
3925 1081
3926 if (restore) 1082 if (ahp->ah_intrMitigation)
3927 return; 1083 ahp->ah_maskReg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
1084 else
1085 ahp->ah_maskReg |= AR_IMR_RXOK;
3928 1086
3929 if (AR_SREV_9280_20_OR_LATER(ah)) { 1087 ahp->ah_maskReg |= AR_IMR_TXOK;
3930 for (i = 0; i < ahp->ah_iniPcieSerdes.ia_rows; i++) {
3931 REG_WRITE(ah, INI_RA(&ahp->ah_iniPcieSerdes, i, 0),
3932 INI_RA(&ahp->ah_iniPcieSerdes, i, 1));
3933 }
3934 udelay(1000);
3935 } else if (AR_SREV_9280(ah)
3936 && (ah->ah_macRev == AR_SREV_REVISION_9280_10)) {
3937 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
3938 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
3939 1088
3940 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019); 1089 if (opmode == NL80211_IFTYPE_AP)
3941 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820); 1090 ahp->ah_maskReg |= AR_IMR_MIB;
3942 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
3943 1091
3944 if (ah->ah_config.pcie_clock_req) 1092 REG_WRITE(ah, AR_IMR, ahp->ah_maskReg);
3945 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc); 1093 REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT);
3946 else
3947 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
3948 1094
3949 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); 1095 if (!AR_SREV_9100(ah)) {
3950 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); 1096 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
3951 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007); 1097 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
1098 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
1099 }
1100}
3952 1101
3953 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 1102static bool ath9k_hw_set_ack_timeout(struct ath_hal *ah, u32 us)
1103{
1104 struct ath_hal_5416 *ahp = AH5416(ah);
3954 1105
3955 udelay(1000); 1106 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
1107 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad ack timeout %u\n", us);
1108 ahp->ah_acktimeout = (u32) -1;
1109 return false;
3956 } else { 1110 } else {
3957 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); 1111 REG_RMW_FIELD(ah, AR_TIME_OUT,
3958 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); 1112 AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
3959 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039); 1113 ahp->ah_acktimeout = us;
3960 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824); 1114 return true;
3961 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
3962 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
3963 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
3964 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
3965 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
3966 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
3967 } 1115 }
1116}
3968 1117
3969 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); 1118static bool ath9k_hw_set_cts_timeout(struct ath_hal *ah, u32 us)
1119{
1120 struct ath_hal_5416 *ahp = AH5416(ah);
3970 1121
3971 if (ah->ah_config.pcie_waen) { 1122 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
3972 REG_WRITE(ah, AR_WA, ah->ah_config.pcie_waen); 1123 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad cts timeout %u\n", us);
1124 ahp->ah_ctstimeout = (u32) -1;
1125 return false;
3973 } else { 1126 } else {
3974 if (AR_SREV_9280(ah)) 1127 REG_RMW_FIELD(ah, AR_TIME_OUT,
3975 REG_WRITE(ah, AR_WA, 0x0040073f); 1128 AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us));
3976 else 1129 ahp->ah_ctstimeout = us;
3977 REG_WRITE(ah, AR_WA, 0x0000073f); 1130 return true;
3978 } 1131 }
3979} 1132}
3980 1133
3981static void 1134static bool ath9k_hw_set_global_txtimeout(struct ath_hal *ah, u32 tu)
3982ath9k_hw_get_legacy_target_powers(struct ath_hal *ah,
3983 struct ath9k_channel *chan,
3984 struct cal_target_power_leg *powInfo,
3985 u16 numChannels,
3986 struct cal_target_power_leg *pNewPower,
3987 u16 numRates,
3988 bool isExtTarget)
3989{ 1135{
3990 u16 clo, chi; 1136 struct ath_hal_5416 *ahp = AH5416(ah);
3991 int i;
3992 int matchIndex = -1, lowIndex = -1;
3993 u16 freq;
3994 struct chan_centers centers;
3995
3996 ath9k_hw_get_channel_centers(ah, chan, &centers);
3997 freq = (isExtTarget) ? centers.ext_center : centers.ctl_center;
3998 1137
3999 if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, 1138 if (tu > 0xFFFF) {
4000 IS_CHAN_2GHZ(chan))) { 1139 DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
4001 matchIndex = 0; 1140 "bad global tx timeout %u\n", tu);
1141 ahp->ah_globaltxtimeout = (u32) -1;
1142 return false;
4002 } else { 1143 } else {
4003 for (i = 0; (i < numChannels) 1144 REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
4004 && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) { 1145 ahp->ah_globaltxtimeout = tu;
4005 if (freq == 1146 return true;
4006 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4007 IS_CHAN_2GHZ(chan))) {
4008 matchIndex = i;
4009 break;
4010 } else if ((freq <
4011 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4012 IS_CHAN_2GHZ(chan)))
4013 && (freq >
4014 ath9k_hw_fbin2freq(powInfo[i - 1].
4015 bChannel,
4016 IS_CHAN_2GHZ
4017 (chan)))) {
4018 lowIndex = i - 1;
4019 break;
4020 }
4021 }
4022 if ((matchIndex == -1) && (lowIndex == -1))
4023 matchIndex = i - 1;
4024 } 1147 }
1148}
4025 1149
4026 if (matchIndex != -1) { 1150static void ath9k_hw_init_user_settings(struct ath_hal *ah)
4027 *pNewPower = powInfo[matchIndex]; 1151{
4028 } else { 1152 struct ath_hal_5416 *ahp = AH5416(ah);
4029 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, 1153
4030 IS_CHAN_2GHZ(chan)); 1154 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "ahp->ah_miscMode 0x%x\n",
4031 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, 1155 ahp->ah_miscMode);
4032 IS_CHAN_2GHZ(chan)); 1156
4033 1157 if (ahp->ah_miscMode != 0)
4034 for (i = 0; i < numRates; i++) { 1158 REG_WRITE(ah, AR_PCU_MISC,
4035 pNewPower->tPow2x[i] = 1159 REG_READ(ah, AR_PCU_MISC) | ahp->ah_miscMode);
4036 (u8) ath9k_hw_interpolate(freq, clo, chi, 1160 if (ahp->ah_slottime != (u32) -1)
4037 powInfo 1161 ath9k_hw_setslottime(ah, ahp->ah_slottime);
4038 [lowIndex]. 1162 if (ahp->ah_acktimeout != (u32) -1)
4039 tPow2x[i], 1163 ath9k_hw_set_ack_timeout(ah, ahp->ah_acktimeout);
4040 powInfo 1164 if (ahp->ah_ctstimeout != (u32) -1)
4041 [lowIndex + 1165 ath9k_hw_set_cts_timeout(ah, ahp->ah_ctstimeout);
4042 1].tPow2x[i]); 1166 if (ahp->ah_globaltxtimeout != (u32) -1)
4043 } 1167 ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout);
4044 }
4045} 1168}
4046 1169
4047static void 1170const char *ath9k_hw_probe(u16 vendorid, u16 devid)
4048ath9k_hw_get_target_powers(struct ath_hal *ah,
4049 struct ath9k_channel *chan,
4050 struct cal_target_power_ht *powInfo,
4051 u16 numChannels,
4052 struct cal_target_power_ht *pNewPower,
4053 u16 numRates,
4054 bool isHt40Target)
4055{ 1171{
4056 u16 clo, chi; 1172 return vendorid == ATHEROS_VENDOR_ID ?
4057 int i; 1173 ath9k_hw_devname(devid) : NULL;
4058 int matchIndex = -1, lowIndex = -1; 1174}
4059 u16 freq;
4060 struct chan_centers centers;
4061 1175
4062 ath9k_hw_get_channel_centers(ah, chan, &centers); 1176void ath9k_hw_detach(struct ath_hal *ah)
4063 freq = isHt40Target ? centers.synth_center : centers.ctl_center; 1177{
1178 if (!AR_SREV_9100(ah))
1179 ath9k_hw_ani_detach(ah);
4064 1180
4065 if (freq <= 1181 ath9k_hw_rfdetach(ah);
4066 ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) { 1182 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
4067 matchIndex = 0; 1183 kfree(ah);
4068 } else { 1184}
4069 for (i = 0; (i < numChannels)
4070 && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) {
4071 if (freq ==
4072 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4073 IS_CHAN_2GHZ(chan))) {
4074 matchIndex = i;
4075 break;
4076 } else
4077 if ((freq <
4078 ath9k_hw_fbin2freq(powInfo[i].bChannel,
4079 IS_CHAN_2GHZ(chan)))
4080 && (freq >
4081 ath9k_hw_fbin2freq(powInfo[i - 1].
4082 bChannel,
4083 IS_CHAN_2GHZ
4084 (chan)))) {
4085 lowIndex = i - 1;
4086 break;
4087 }
4088 }
4089 if ((matchIndex == -1) && (lowIndex == -1))
4090 matchIndex = i - 1;
4091 }
4092 1185
4093 if (matchIndex != -1) { 1186struct ath_hal *ath9k_hw_attach(u16 devid, struct ath_softc *sc,
4094 *pNewPower = powInfo[matchIndex]; 1187 void __iomem *mem, int *error)
4095 } else { 1188{
4096 clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, 1189 struct ath_hal *ah = NULL;
4097 IS_CHAN_2GHZ(chan)); 1190
4098 chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, 1191 switch (devid) {
4099 IS_CHAN_2GHZ(chan)); 1192 case AR5416_DEVID_PCI:
4100 1193 case AR5416_DEVID_PCIE:
4101 for (i = 0; i < numRates; i++) { 1194 case AR9160_DEVID_PCI:
4102 pNewPower->tPow2x[i] = 1195 case AR9280_DEVID_PCI:
4103 (u8) ath9k_hw_interpolate(freq, clo, chi, 1196 case AR9280_DEVID_PCIE:
4104 powInfo 1197 case AR9285_DEVID_PCIE:
4105 [lowIndex]. 1198 ah = ath9k_hw_do_attach(devid, sc, mem, error);
4106 tPow2x[i], 1199 break;
4107 powInfo 1200 default:
4108 [lowIndex + 1201 *error = -ENXIO;
4109 1].tPow2x[i]); 1202 break;
4110 }
4111 } 1203 }
1204
1205 return ah;
4112} 1206}
4113 1207
4114static u16 1208/*******/
4115ath9k_hw_get_max_edge_power(u16 freq, 1209/* INI */
4116 struct cal_ctl_edges *pRdEdgesPower, 1210/*******/
4117 bool is2GHz) 1211
1212static void ath9k_hw_override_ini(struct ath_hal *ah,
1213 struct ath9k_channel *chan)
4118{ 1214{
4119 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 1215 /*
4120 int i; 1216 * Set the RX_ABORT and RX_DIS and clear if off only after
1217 * RXE is set for MAC. This prevents frames with corrupted
1218 * descriptor status.
1219 */
1220 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
4121 1221
4122 for (i = 0; (i < AR5416_NUM_BAND_EDGES) 1222
4123 && (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) { 1223 if (!AR_SREV_5416_V20_OR_LATER(ah) ||
4124 if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, 1224 AR_SREV_9280_10_OR_LATER(ah))
4125 is2GHz)) { 1225 return;
4126 twiceMaxEdgePower = pRdEdgesPower[i].tPower; 1226
4127 break; 1227 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
4128 } else if ((i > 0) 1228}
4129 && (freq < 1229
4130 ath9k_hw_fbin2freq(pRdEdgesPower[i]. 1230static u32 ath9k_hw_def_ini_fixup(struct ath_hal *ah,
4131 bChannel, is2GHz))) { 1231 struct ar5416_eeprom_def *pEepData,
4132 if (ath9k_hw_fbin2freq 1232 u32 reg, u32 value)
4133 (pRdEdgesPower[i - 1].bChannel, is2GHz) < freq 1233{
4134 && pRdEdgesPower[i - 1].flag) { 1234 struct base_eep_header *pBase = &(pEepData->baseEepHeader);
4135 twiceMaxEdgePower = 1235
4136 pRdEdgesPower[i - 1].tPower; 1236 switch (ah->ah_devid) {
1237 case AR9280_DEVID_PCI:
1238 if (reg == 0x7894) {
1239 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
1240 "ini VAL: %x EEPROM: %x\n", value,
1241 (pBase->version & 0xff));
1242
1243 if ((pBase->version & 0xff) > 0x0a) {
1244 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
1245 "PWDCLKIND: %d\n",
1246 pBase->pwdclkind);
1247 value &= ~AR_AN_TOP2_PWDCLKIND;
1248 value |= AR_AN_TOP2_PWDCLKIND &
1249 (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
1250 } else {
1251 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
1252 "PWDCLKIND Earlier Rev\n");
4137 } 1253 }
4138 break; 1254
1255 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
1256 "final ini VAL: %x\n", value);
4139 } 1257 }
1258 break;
4140 } 1259 }
4141 return twiceMaxEdgePower; 1260
1261 return value;
4142} 1262}
4143 1263
4144static bool 1264static u32 ath9k_hw_ini_fixup(struct ath_hal *ah,
4145ath9k_hw_set_power_per_rate_table(struct ath_hal *ah, 1265 struct ar5416_eeprom_def *pEepData,
4146 struct ar5416_eeprom *pEepData, 1266 u32 reg, u32 value)
4147 struct ath9k_channel *chan,
4148 int16_t *ratesArray,
4149 u16 cfgCtl,
4150 u8 AntennaReduction,
4151 u8 twiceMaxRegulatoryPower,
4152 u8 powerLimit)
4153{ 1267{
4154 u8 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 1268 struct ath_hal_5416 *ahp = AH5416(ah);
4155 static const u16 tpScaleReductionTable[5] =
4156 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER };
4157 1269
4158 int i; 1270 if (ahp->ah_eep_map == EEP_MAP_4KBITS)
4159 int8_t twiceLargestAntenna; 1271 return value;
4160 struct cal_ctl_data *rep; 1272 else
4161 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { 1273 return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value);
4162 0, { 0, 0, 0, 0} 1274}
4163 }; 1275
4164 struct cal_target_power_leg targetPowerOfdmExt = { 1276static int ath9k_hw_process_ini(struct ath_hal *ah,
4165 0, { 0, 0, 0, 0} }, targetPowerCckExt = { 1277 struct ath9k_channel *chan,
4166 0, { 0, 0, 0, 0 } 1278 enum ath9k_ht_macmode macmode)
4167 }; 1279{
4168 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = { 1280 int i, regWrites = 0;
4169 0, {0, 0, 0, 0}
4170 };
4171 u8 scaledPower = 0, minCtlPower, maxRegAllowedPower;
4172 u16 ctlModesFor11a[] =
4173 { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 };
4174 u16 ctlModesFor11g[] =
4175 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT,
4176 CTL_2GHT40
4177 };
4178 u16 numCtlModes, *pCtlMode, ctlMode, freq;
4179 struct chan_centers centers;
4180 int tx_chainmask;
4181 u8 twiceMinEdgePower;
4182 struct ath_hal_5416 *ahp = AH5416(ah); 1281 struct ath_hal_5416 *ahp = AH5416(ah);
1282 u32 modesIndex, freqIndex;
1283 int status;
4183 1284
4184 tx_chainmask = ahp->ah_txchainmask; 1285 switch (chan->chanmode) {
1286 case CHANNEL_A:
1287 case CHANNEL_A_HT20:
1288 modesIndex = 1;
1289 freqIndex = 1;
1290 break;
1291 case CHANNEL_A_HT40PLUS:
1292 case CHANNEL_A_HT40MINUS:
1293 modesIndex = 2;
1294 freqIndex = 1;
1295 break;
1296 case CHANNEL_G:
1297 case CHANNEL_G_HT20:
1298 case CHANNEL_B:
1299 modesIndex = 4;
1300 freqIndex = 2;
1301 break;
1302 case CHANNEL_G_HT40PLUS:
1303 case CHANNEL_G_HT40MINUS:
1304 modesIndex = 3;
1305 freqIndex = 2;
1306 break;
4185 1307
4186 ath9k_hw_get_channel_centers(ah, chan, &centers); 1308 default:
1309 return -EINVAL;
1310 }
4187 1311
4188 twiceLargestAntenna = max( 1312 REG_WRITE(ah, AR_PHY(0), 0x00000007);
4189 pEepData->modalHeader
4190 [IS_CHAN_2GHZ(chan)].antennaGainCh[0],
4191 pEepData->modalHeader
4192 [IS_CHAN_2GHZ(chan)].antennaGainCh[1]);
4193 1313
4194 twiceLargestAntenna = max((u8) twiceLargestAntenna, 1314 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
4195 pEepData->modalHeader
4196 [IS_CHAN_2GHZ(chan)].antennaGainCh[2]);
4197 1315
4198 twiceLargestAntenna = 1316 ath9k_hw_set_addac(ah, chan);
4199 (int8_t) min(AntennaReduction - twiceLargestAntenna, 0);
4200 1317
4201 maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; 1318 if (AR_SREV_5416_V22_OR_LATER(ah)) {
1319 REG_WRITE_ARRAY(&ahp->ah_iniAddac, 1, regWrites);
1320 } else {
1321 struct ar5416IniArray temp;
1322 u32 addacSize =
1323 sizeof(u32) * ahp->ah_iniAddac.ia_rows *
1324 ahp->ah_iniAddac.ia_columns;
4202 1325
4203 if (ah->ah_tpScale != ATH9K_TP_SCALE_MAX) { 1326 memcpy(ahp->ah_addac5416_21,
4204 maxRegAllowedPower -= 1327 ahp->ah_iniAddac.ia_array, addacSize);
4205 (tpScaleReductionTable[(ah->ah_tpScale)] * 2);
4206 }
4207 1328
4208 scaledPower = min(powerLimit, maxRegAllowedPower); 1329 (ahp->ah_addac5416_21)[31 * ahp->ah_iniAddac.ia_columns + 1] = 0;
4209 1330
4210 switch (ar5416_get_ntxchains(tx_chainmask)) { 1331 temp.ia_array = ahp->ah_addac5416_21;
4211 case 1: 1332 temp.ia_columns = ahp->ah_iniAddac.ia_columns;
4212 break; 1333 temp.ia_rows = ahp->ah_iniAddac.ia_rows;
4213 case 2: 1334 REG_WRITE_ARRAY(&temp, 1, regWrites);
4214 scaledPower -=
4215 pEepData->modalHeader[IS_CHAN_2GHZ(chan)].
4216 pwrDecreaseFor2Chain;
4217 break;
4218 case 3:
4219 scaledPower -=
4220 pEepData->modalHeader[IS_CHAN_2GHZ(chan)].
4221 pwrDecreaseFor3Chain;
4222 break;
4223 } 1335 }
4224 1336
4225 scaledPower = max(0, (int32_t) scaledPower); 1337 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
4226
4227 if (IS_CHAN_2GHZ(chan)) {
4228 numCtlModes =
4229 ARRAY_SIZE(ctlModesFor11g) -
4230 SUB_NUM_CTL_MODES_AT_2G_40;
4231 pCtlMode = ctlModesFor11g;
4232
4233 ath9k_hw_get_legacy_target_powers(ah, chan,
4234 pEepData->
4235 calTargetPowerCck,
4236 AR5416_NUM_2G_CCK_TARGET_POWERS,
4237 &targetPowerCck, 4,
4238 false);
4239 ath9k_hw_get_legacy_target_powers(ah, chan,
4240 pEepData->
4241 calTargetPower2G,
4242 AR5416_NUM_2G_20_TARGET_POWERS,
4243 &targetPowerOfdm, 4,
4244 false);
4245 ath9k_hw_get_target_powers(ah, chan,
4246 pEepData->calTargetPower2GHT20,
4247 AR5416_NUM_2G_20_TARGET_POWERS,
4248 &targetPowerHt20, 8, false);
4249 1338
4250 if (IS_CHAN_HT40(chan)) { 1339 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) {
4251 numCtlModes = ARRAY_SIZE(ctlModesFor11g); 1340 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0);
4252 ath9k_hw_get_target_powers(ah, chan, 1341 u32 val = INI_RA(&ahp->ah_iniModes, i, modesIndex);
4253 pEepData->
4254 calTargetPower2GHT40,
4255 AR5416_NUM_2G_40_TARGET_POWERS,
4256 &targetPowerHt40, 8,
4257 true);
4258 ath9k_hw_get_legacy_target_powers(ah, chan,
4259 pEepData->
4260 calTargetPowerCck,
4261 AR5416_NUM_2G_CCK_TARGET_POWERS,
4262 &targetPowerCckExt,
4263 4, true);
4264 ath9k_hw_get_legacy_target_powers(ah, chan,
4265 pEepData->
4266 calTargetPower2G,
4267 AR5416_NUM_2G_20_TARGET_POWERS,
4268 &targetPowerOfdmExt,
4269 4, true);
4270 }
4271 } else {
4272 1342
4273 numCtlModes = 1343 REG_WRITE(ah, reg, val);
4274 ARRAY_SIZE(ctlModesFor11a) -
4275 SUB_NUM_CTL_MODES_AT_5G_40;
4276 pCtlMode = ctlModesFor11a;
4277
4278 ath9k_hw_get_legacy_target_powers(ah, chan,
4279 pEepData->
4280 calTargetPower5G,
4281 AR5416_NUM_5G_20_TARGET_POWERS,
4282 &targetPowerOfdm, 4,
4283 false);
4284 ath9k_hw_get_target_powers(ah, chan,
4285 pEepData->calTargetPower5GHT20,
4286 AR5416_NUM_5G_20_TARGET_POWERS,
4287 &targetPowerHt20, 8, false);
4288 1344
4289 if (IS_CHAN_HT40(chan)) { 1345 if (reg >= 0x7800 && reg < 0x78a0
4290 numCtlModes = ARRAY_SIZE(ctlModesFor11a); 1346 && ah->ah_config.analog_shiftreg) {
4291 ath9k_hw_get_target_powers(ah, chan, 1347 udelay(100);
4292 pEepData->
4293 calTargetPower5GHT40,
4294 AR5416_NUM_5G_40_TARGET_POWERS,
4295 &targetPowerHt40, 8,
4296 true);
4297 ath9k_hw_get_legacy_target_powers(ah, chan,
4298 pEepData->
4299 calTargetPower5G,
4300 AR5416_NUM_5G_20_TARGET_POWERS,
4301 &targetPowerOfdmExt,
4302 4, true);
4303 } 1348 }
1349
1350 DO_DELAY(regWrites);
4304 } 1351 }
4305 1352
4306 for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { 1353 if (AR_SREV_9280(ah))
4307 bool isHt40CtlMode = 1354 REG_WRITE_ARRAY(&ahp->ah_iniModesRxGain, modesIndex, regWrites);
4308 (pCtlMode[ctlMode] == CTL_5GHT40)
4309 || (pCtlMode[ctlMode] == CTL_2GHT40);
4310 if (isHt40CtlMode)
4311 freq = centers.synth_center;
4312 else if (pCtlMode[ctlMode] & EXT_ADDITIVE)
4313 freq = centers.ext_center;
4314 else
4315 freq = centers.ctl_center;
4316 1355
4317 if (ar5416_get_eep_ver(ahp) == 14 1356 if (AR_SREV_9280(ah))
4318 && ar5416_get_eep_rev(ahp) <= 2) 1357 REG_WRITE_ARRAY(&ahp->ah_iniModesTxGain, modesIndex, regWrites);
4319 twiceMaxEdgePower = AR5416_MAX_RATE_POWER;
4320 1358
4321 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, 1359 for (i = 0; i < ahp->ah_iniCommon.ia_rows; i++) {
4322 "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, " 1360 u32 reg = INI_RA(&ahp->ah_iniCommon, i, 0);
4323 "EXT_ADDITIVE %d\n", 1361 u32 val = INI_RA(&ahp->ah_iniCommon, i, 1);
4324 ctlMode, numCtlModes, isHt40CtlMode,
4325 (pCtlMode[ctlMode] & EXT_ADDITIVE));
4326 1362
4327 for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; 1363 REG_WRITE(ah, reg, val);
4328 i++) { 1364
4329 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, 1365 if (reg >= 0x7800 && reg < 0x78a0
4330 " LOOP-Ctlidx %d: cfgCtl 0x%2.2x " 1366 && ah->ah_config.analog_shiftreg) {
4331 "pCtlMode 0x%2.2x ctlIndex 0x%2.2x " 1367 udelay(100);
4332 "chan %d\n",
4333 i, cfgCtl, pCtlMode[ctlMode],
4334 pEepData->ctlIndex[i], chan->channel);
4335
4336 if ((((cfgCtl & ~CTL_MODE_M) |
4337 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4338 pEepData->ctlIndex[i])
4339 ||
4340 (((cfgCtl & ~CTL_MODE_M) |
4341 (pCtlMode[ctlMode] & CTL_MODE_M)) ==
4342 ((pEepData->
4343 ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) {
4344 rep = &(pEepData->ctlData[i]);
4345
4346 twiceMinEdgePower =
4347 ath9k_hw_get_max_edge_power(freq,
4348 rep->
4349 ctlEdges
4350 [ar5416_get_ntxchains
4351 (tx_chainmask)
4352 - 1],
4353 IS_CHAN_2GHZ
4354 (chan));
4355
4356 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4357 " MATCH-EE_IDX %d: ch %d is2 %d "
4358 "2xMinEdge %d chainmask %d chains %d\n",
4359 i, freq, IS_CHAN_2GHZ(chan),
4360 twiceMinEdgePower, tx_chainmask,
4361 ar5416_get_ntxchains
4362 (tx_chainmask));
4363 if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) {
4364 twiceMaxEdgePower =
4365 min(twiceMaxEdgePower,
4366 twiceMinEdgePower);
4367 } else {
4368 twiceMaxEdgePower =
4369 twiceMinEdgePower;
4370 break;
4371 }
4372 }
4373 } 1368 }
4374 1369
4375 minCtlPower = min(twiceMaxEdgePower, scaledPower); 1370 DO_DELAY(regWrites);
1371 }
1372
1373 ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites);
4376 1374
4377 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, 1375 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
4378 " SEL-Min ctlMode %d pCtlMode %d " 1376 REG_WRITE_ARRAY(&ahp->ah_iniModesAdditional, modesIndex,
4379 "2xMaxEdge %d sP %d minCtlPwr %d\n", 1377 regWrites);
4380 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
4381 scaledPower, minCtlPower);
4382
4383 switch (pCtlMode[ctlMode]) {
4384 case CTL_11B:
4385 for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x);
4386 i++) {
4387 targetPowerCck.tPow2x[i] =
4388 min(targetPowerCck.tPow2x[i],
4389 minCtlPower);
4390 }
4391 break;
4392 case CTL_11A:
4393 case CTL_11G:
4394 for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x);
4395 i++) {
4396 targetPowerOfdm.tPow2x[i] =
4397 min(targetPowerOfdm.tPow2x[i],
4398 minCtlPower);
4399 }
4400 break;
4401 case CTL_5GHT20:
4402 case CTL_2GHT20:
4403 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x);
4404 i++) {
4405 targetPowerHt20.tPow2x[i] =
4406 min(targetPowerHt20.tPow2x[i],
4407 minCtlPower);
4408 }
4409 break;
4410 case CTL_11B_EXT:
4411 targetPowerCckExt.tPow2x[0] =
4412 min(targetPowerCckExt.tPow2x[0], minCtlPower);
4413 break;
4414 case CTL_11A_EXT:
4415 case CTL_11G_EXT:
4416 targetPowerOfdmExt.tPow2x[0] =
4417 min(targetPowerOfdmExt.tPow2x[0], minCtlPower);
4418 break;
4419 case CTL_5GHT40:
4420 case CTL_2GHT40:
4421 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x);
4422 i++) {
4423 targetPowerHt40.tPow2x[i] =
4424 min(targetPowerHt40.tPow2x[i],
4425 minCtlPower);
4426 }
4427 break;
4428 default:
4429 break;
4430 }
4431 } 1378 }
4432 1379
4433 ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] = 1380 ath9k_hw_override_ini(ah, chan);
4434 ratesArray[rate18mb] = ratesArray[rate24mb] = 1381 ath9k_hw_set_regs(ah, chan, macmode);
4435 targetPowerOfdm.tPow2x[0]; 1382 ath9k_hw_init_chain_masks(ah);
4436 ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1]; 1383
4437 ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2]; 1384 status = ath9k_hw_set_txpower(ah, chan,
4438 ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3]; 1385 ath9k_regd_get_ctl(ah, chan),
4439 ratesArray[rateXr] = targetPowerOfdm.tPow2x[0]; 1386 ath9k_regd_get_antenna_allowed(ah,
4440 1387 chan),
4441 for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) 1388 chan->maxRegTxPower * 2,
4442 ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i]; 1389 min((u32) MAX_RATE_POWER,
4443 1390 (u32) ah->ah_powerLimit));
4444 if (IS_CHAN_2GHZ(chan)) { 1391 if (status != 0) {
4445 ratesArray[rate1l] = targetPowerCck.tPow2x[0]; 1392 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
4446 ratesArray[rate2s] = ratesArray[rate2l] = 1393 "error init'ing transmit power\n");
4447 targetPowerCck.tPow2x[1]; 1394 return -EIO;
4448 ratesArray[rate5_5s] = ratesArray[rate5_5l] =
4449 targetPowerCck.tPow2x[2];
4450 ;
4451 ratesArray[rate11s] = ratesArray[rate11l] =
4452 targetPowerCck.tPow2x[3];
4453 ;
4454 } 1395 }
4455 if (IS_CHAN_HT40(chan)) { 1396
4456 for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { 1397 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
4457 ratesArray[rateHt40_0 + i] = 1398 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
4458 targetPowerHt40.tPow2x[i]; 1399 "ar5416SetRfRegs failed\n");
4459 } 1400 return -EIO;
4460 ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0];
4461 ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0];
4462 ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0];
4463 if (IS_CHAN_2GHZ(chan)) {
4464 ratesArray[rateExtCck] =
4465 targetPowerCckExt.tPow2x[0];
4466 }
4467 } 1401 }
4468 return true; 1402
1403 return 0;
4469} 1404}
4470 1405
4471static int 1406/****************************************/
4472ath9k_hw_set_txpower(struct ath_hal *ah, 1407/* Reset and Channel Switching Routines */
4473 struct ar5416_eeprom *pEepData, 1408/****************************************/
4474 struct ath9k_channel *chan, 1409
4475 u16 cfgCtl, 1410static void ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan)
4476 u8 twiceAntennaReduction,
4477 u8 twiceMaxRegulatoryPower,
4478 u8 powerLimit)
4479{ 1411{
4480 struct modal_eep_header *pModal = 1412 u32 rfMode = 0;
4481 &(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]);
4482 int16_t ratesArray[Ar5416RateSize];
4483 int16_t txPowerIndexOffset = 0;
4484 u8 ht40PowerIncForPdadc = 2;
4485 int i;
4486 1413
4487 memset(ratesArray, 0, sizeof(ratesArray)); 1414 if (chan == NULL)
1415 return;
4488 1416
4489 if ((pEepData->baseEepHeader. 1417 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
4490 version & AR5416_EEP_VER_MINOR_MASK) >= 1418 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
4491 AR5416_EEP_MINOR_VER_2) {
4492 ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
4493 }
4494 1419
4495 if (!ath9k_hw_set_power_per_rate_table(ah, pEepData, chan, 1420 if (!AR_SREV_9280_10_OR_LATER(ah))
4496 &ratesArray[0], cfgCtl, 1421 rfMode |= (IS_CHAN_5GHZ(chan)) ?
4497 twiceAntennaReduction, 1422 AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
4498 twiceMaxRegulatoryPower,
4499 powerLimit)) {
4500 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
4501 "ath9k_hw_set_txpower: unable to set "
4502 "tx power per rate table\n");
4503 return -EIO;
4504 }
4505 1423
4506 if (!ath9k_hw_set_power_cal_table 1424 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
4507 (ah, pEepData, chan, &txPowerIndexOffset)) { 1425 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
4508 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
4509 "ath9k_hw_set_txpower: unable to set power table\n");
4510 return -EIO;
4511 }
4512 1426
4513 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { 1427 REG_WRITE(ah, AR_PHY_MODE, rfMode);
4514 ratesArray[i] = 1428}
4515 (int16_t) (txPowerIndexOffset + ratesArray[i]);
4516 if (ratesArray[i] > AR5416_MAX_RATE_POWER)
4517 ratesArray[i] = AR5416_MAX_RATE_POWER;
4518 }
4519 1429
4520 if (AR_SREV_9280_10_OR_LATER(ah)) { 1430static void ath9k_hw_mark_phy_inactive(struct ath_hal *ah)
4521 for (i = 0; i < Ar5416RateSize; i++) 1431{
4522 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2; 1432 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
4523 } 1433}
4524 1434
4525 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, 1435static inline void ath9k_hw_set_dma(struct ath_hal *ah)
4526 ATH9K_POW_SM(ratesArray[rate18mb], 24) 1436{
4527 | ATH9K_POW_SM(ratesArray[rate12mb], 16) 1437 u32 regval;
4528 | ATH9K_POW_SM(ratesArray[rate9mb], 8)
4529 | ATH9K_POW_SM(ratesArray[rate6mb], 0)
4530 );
4531 REG_WRITE(ah, AR_PHY_POWER_TX_RATE2,
4532 ATH9K_POW_SM(ratesArray[rate54mb], 24)
4533 | ATH9K_POW_SM(ratesArray[rate48mb], 16)
4534 | ATH9K_POW_SM(ratesArray[rate36mb], 8)
4535 | ATH9K_POW_SM(ratesArray[rate24mb], 0)
4536 );
4537
4538 if (IS_CHAN_2GHZ(chan)) {
4539 REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
4540 ATH9K_POW_SM(ratesArray[rate2s], 24)
4541 | ATH9K_POW_SM(ratesArray[rate2l], 16)
4542 | ATH9K_POW_SM(ratesArray[rateXr], 8)
4543 | ATH9K_POW_SM(ratesArray[rate1l], 0)
4544 );
4545 REG_WRITE(ah, AR_PHY_POWER_TX_RATE4,
4546 ATH9K_POW_SM(ratesArray[rate11s], 24)
4547 | ATH9K_POW_SM(ratesArray[rate11l], 16)
4548 | ATH9K_POW_SM(ratesArray[rate5_5s], 8)
4549 | ATH9K_POW_SM(ratesArray[rate5_5l], 0)
4550 );
4551 }
4552 1438
4553 REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, 1439 regval = REG_READ(ah, AR_AHB_MODE);
4554 ATH9K_POW_SM(ratesArray[rateHt20_3], 24) 1440 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
4555 | ATH9K_POW_SM(ratesArray[rateHt20_2], 16)
4556 | ATH9K_POW_SM(ratesArray[rateHt20_1], 8)
4557 | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)
4558 );
4559 REG_WRITE(ah, AR_PHY_POWER_TX_RATE6,
4560 ATH9K_POW_SM(ratesArray[rateHt20_7], 24)
4561 | ATH9K_POW_SM(ratesArray[rateHt20_6], 16)
4562 | ATH9K_POW_SM(ratesArray[rateHt20_5], 8)
4563 | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)
4564 );
4565 1441
4566 if (IS_CHAN_HT40(chan)) { 1442 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
4567 REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, 1443 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
4568 ATH9K_POW_SM(ratesArray[rateHt40_3] +
4569 ht40PowerIncForPdadc, 24)
4570 | ATH9K_POW_SM(ratesArray[rateHt40_2] +
4571 ht40PowerIncForPdadc, 16)
4572 | ATH9K_POW_SM(ratesArray[rateHt40_1] +
4573 ht40PowerIncForPdadc, 8)
4574 | ATH9K_POW_SM(ratesArray[rateHt40_0] +
4575 ht40PowerIncForPdadc, 0)
4576 );
4577 REG_WRITE(ah, AR_PHY_POWER_TX_RATE8,
4578 ATH9K_POW_SM(ratesArray[rateHt40_7] +
4579 ht40PowerIncForPdadc, 24)
4580 | ATH9K_POW_SM(ratesArray[rateHt40_6] +
4581 ht40PowerIncForPdadc, 16)
4582 | ATH9K_POW_SM(ratesArray[rateHt40_5] +
4583 ht40PowerIncForPdadc, 8)
4584 | ATH9K_POW_SM(ratesArray[rateHt40_4] +
4585 ht40PowerIncForPdadc, 0)
4586 );
4587
4588 REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
4589 ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
4590 | ATH9K_POW_SM(ratesArray[rateExtCck], 16)
4591 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
4592 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)
4593 );
4594 }
4595 1444
4596 REG_WRITE(ah, AR_PHY_POWER_TX_SUB, 1445 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->ah_txTrigLevel);
4597 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
4598 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0)
4599 );
4600 1446
4601 i = rate6mb; 1447 regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK;
4602 if (IS_CHAN_HT40(chan)) 1448 REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
4603 i = rateHt40_0;
4604 else if (IS_CHAN_HT20(chan))
4605 i = rateHt20_0;
4606 1449
4607 if (AR_SREV_9280_10_OR_LATER(ah)) 1450 REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
4608 ah->ah_maxPowerLevel =
4609 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2;
4610 else
4611 ah->ah_maxPowerLevel = ratesArray[i];
4612 1451
4613 return 0; 1452 if (AR_SREV_9285(ah)) {
1453 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
1454 AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE);
1455 } else {
1456 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
1457 AR_PCU_TXBUF_CTRL_USABLE_SIZE);
1458 }
1459}
1460
1461static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode)
1462{
1463 u32 val;
1464
1465 val = REG_READ(ah, AR_STA_ID1);
1466 val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC);
1467 switch (opmode) {
1468 case NL80211_IFTYPE_AP:
1469 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP
1470 | AR_STA_ID1_KSRCH_MODE);
1471 REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1472 break;
1473 case NL80211_IFTYPE_ADHOC:
1474 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC
1475 | AR_STA_ID1_KSRCH_MODE);
1476 REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
1477 break;
1478 case NL80211_IFTYPE_STATION:
1479 case NL80211_IFTYPE_MONITOR:
1480 REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE);
1481 break;
1482 }
4614} 1483}
4615 1484
4616static inline void ath9k_hw_get_delta_slope_vals(struct ath_hal *ah, 1485static inline void ath9k_hw_get_delta_slope_vals(struct ath_hal *ah,
@@ -4632,9 +1501,8 @@ static inline void ath9k_hw_get_delta_slope_vals(struct ath_hal *ah,
4632 *coef_exponent = coef_exp - 16; 1501 *coef_exponent = coef_exp - 16;
4633} 1502}
4634 1503
4635static void 1504static void ath9k_hw_set_delta_slope(struct ath_hal *ah,
4636ath9k_hw_set_delta_slope(struct ath_hal *ah, 1505 struct ath9k_channel *chan)
4637 struct ath9k_channel *chan)
4638{ 1506{
4639 u32 coef_scaled, ds_coef_exp, ds_coef_man; 1507 u32 coef_scaled, ds_coef_exp, ds_coef_man;
4640 u32 clockMhzScaled = 0x64000000; 1508 u32 clockMhzScaled = 0x64000000;
@@ -4667,8 +1535,243 @@ ath9k_hw_set_delta_slope(struct ath_hal *ah,
4667 AR_PHY_HALFGI_DSC_EXP, ds_coef_exp); 1535 AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
4668} 1536}
4669 1537
4670static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah, 1538static bool ath9k_hw_set_reset(struct ath_hal *ah, int type)
4671 struct ath9k_channel *chan) 1539{
1540 u32 rst_flags;
1541 u32 tmpReg;
1542
1543 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1544 AR_RTC_FORCE_WAKE_ON_INT);
1545
1546 if (AR_SREV_9100(ah)) {
1547 rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
1548 AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
1549 } else {
1550 tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
1551 if (tmpReg &
1552 (AR_INTR_SYNC_LOCAL_TIMEOUT |
1553 AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
1554 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
1555 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
1556 } else {
1557 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1558 }
1559
1560 rst_flags = AR_RTC_RC_MAC_WARM;
1561 if (type == ATH9K_RESET_COLD)
1562 rst_flags |= AR_RTC_RC_MAC_COLD;
1563 }
1564
1565 REG_WRITE(ah, (u16) (AR_RTC_RC), rst_flags);
1566 udelay(50);
1567
1568 REG_WRITE(ah, (u16) (AR_RTC_RC), 0);
1569 if (!ath9k_hw_wait(ah, (u16) (AR_RTC_RC), AR_RTC_RC_M, 0)) {
1570 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
1571 "RTC stuck in MAC reset\n");
1572 return false;
1573 }
1574
1575 if (!AR_SREV_9100(ah))
1576 REG_WRITE(ah, AR_RC, 0);
1577
1578 ath9k_hw_init_pll(ah, NULL);
1579
1580 if (AR_SREV_9100(ah))
1581 udelay(50);
1582
1583 return true;
1584}
1585
1586static bool ath9k_hw_set_reset_power_on(struct ath_hal *ah)
1587{
1588 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1589 AR_RTC_FORCE_WAKE_ON_INT);
1590
1591 REG_WRITE(ah, (u16) (AR_RTC_RESET), 0);
1592 REG_WRITE(ah, (u16) (AR_RTC_RESET), 1);
1593
1594 if (!ath9k_hw_wait(ah,
1595 AR_RTC_STATUS,
1596 AR_RTC_STATUS_M,
1597 AR_RTC_STATUS_ON)) {
1598 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "RTC not waking up\n");
1599 return false;
1600 }
1601
1602 ath9k_hw_read_revisions(ah);
1603
1604 return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
1605}
1606
1607static bool ath9k_hw_set_reset_reg(struct ath_hal *ah, u32 type)
1608{
1609 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
1610 AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
1611
1612 switch (type) {
1613 case ATH9K_RESET_POWER_ON:
1614 return ath9k_hw_set_reset_power_on(ah);
1615 break;
1616 case ATH9K_RESET_WARM:
1617 case ATH9K_RESET_COLD:
1618 return ath9k_hw_set_reset(ah, type);
1619 break;
1620 default:
1621 return false;
1622 }
1623}
1624
1625static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan,
1626 enum ath9k_ht_macmode macmode)
1627{
1628 u32 phymode;
1629 u32 enableDacFifo = 0;
1630 struct ath_hal_5416 *ahp = AH5416(ah);
1631
1632 if (AR_SREV_9285_10_OR_LATER(ah))
1633 enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
1634 AR_PHY_FC_ENABLE_DAC_FIFO);
1635
1636 phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
1637 | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
1638
1639 if (IS_CHAN_HT40(chan)) {
1640 phymode |= AR_PHY_FC_DYN2040_EN;
1641
1642 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
1643 (chan->chanmode == CHANNEL_G_HT40PLUS))
1644 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
1645
1646 if (ahp->ah_extprotspacing == ATH9K_HT_EXTPROTSPACING_25)
1647 phymode |= AR_PHY_FC_DYN2040_EXT_CH;
1648 }
1649 REG_WRITE(ah, AR_PHY_TURBO, phymode);
1650
1651 ath9k_hw_set11nmac2040(ah, macmode);
1652
1653 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
1654 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
1655}
1656
1657static bool ath9k_hw_chip_reset(struct ath_hal *ah,
1658 struct ath9k_channel *chan)
1659{
1660 struct ath_hal_5416 *ahp = AH5416(ah);
1661
1662 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
1663 return false;
1664
1665 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
1666 return false;
1667
1668 ahp->ah_chipFullSleep = false;
1669
1670 ath9k_hw_init_pll(ah, chan);
1671
1672 ath9k_hw_set_rfmode(ah, chan);
1673
1674 return true;
1675}
1676
1677static struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah,
1678 struct ath9k_channel *chan)
1679{
1680 if (!(IS_CHAN_2GHZ(chan) ^ IS_CHAN_5GHZ(chan))) {
1681 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1682 "invalid channel %u/0x%x; not marked as "
1683 "2GHz or 5GHz\n", chan->channel, chan->channelFlags);
1684 return NULL;
1685 }
1686
1687 if (!IS_CHAN_OFDM(chan) &&
1688 !IS_CHAN_B(chan) &&
1689 !IS_CHAN_HT20(chan) &&
1690 !IS_CHAN_HT40(chan)) {
1691 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1692 "invalid channel %u/0x%x; not marked as "
1693 "OFDM or CCK or HT20 or HT40PLUS or HT40MINUS\n",
1694 chan->channel, chan->channelFlags);
1695 return NULL;
1696 }
1697
1698 return ath9k_regd_check_channel(ah, chan);
1699}
1700
1701static bool ath9k_hw_channel_change(struct ath_hal *ah,
1702 struct ath9k_channel *chan,
1703 enum ath9k_ht_macmode macmode)
1704{
1705 u32 synthDelay, qnum;
1706
1707 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
1708 if (ath9k_hw_numtxpending(ah, qnum)) {
1709 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
1710 "Transmit frames pending on queue %d\n", qnum);
1711 return false;
1712 }
1713 }
1714
1715 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
1716 if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
1717 AR_PHY_RFBUS_GRANT_EN)) {
1718 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
1719 "Could not kill baseband RX\n");
1720 return false;
1721 }
1722
1723 ath9k_hw_set_regs(ah, chan, macmode);
1724
1725 if (AR_SREV_9280_10_OR_LATER(ah)) {
1726 if (!(ath9k_hw_ar9280_set_channel(ah, chan))) {
1727 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1728 "failed to set channel\n");
1729 return false;
1730 }
1731 } else {
1732 if (!(ath9k_hw_set_channel(ah, chan))) {
1733 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
1734 "failed to set channel\n");
1735 return false;
1736 }
1737 }
1738
1739 if (ath9k_hw_set_txpower(ah, chan,
1740 ath9k_regd_get_ctl(ah, chan),
1741 ath9k_regd_get_antenna_allowed(ah, chan),
1742 chan->maxRegTxPower * 2,
1743 min((u32) MAX_RATE_POWER,
1744 (u32) ah->ah_powerLimit)) != 0) {
1745 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
1746 "error init'ing transmit power\n");
1747 return false;
1748 }
1749
1750 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
1751 if (IS_CHAN_B(chan))
1752 synthDelay = (4 * synthDelay) / 22;
1753 else
1754 synthDelay /= 10;
1755
1756 udelay(synthDelay + BASE_ACTIVATE_DELAY);
1757
1758 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
1759
1760 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
1761 ath9k_hw_set_delta_slope(ah, chan);
1762
1763 if (AR_SREV_9280_10_OR_LATER(ah))
1764 ath9k_hw_9280_spur_mitigate(ah, chan);
1765 else
1766 ath9k_hw_spur_mitigate(ah, chan);
1767
1768 if (!chan->oneTimeCalsDone)
1769 chan->oneTimeCalsDone = true;
1770
1771 return true;
1772}
1773
1774static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah, struct ath9k_channel *chan)
4672{ 1775{
4673 int bb_spur = AR_NO_SPUR; 1776 int bb_spur = AR_NO_SPUR;
4674 int freq; 1777 int freq;
@@ -4918,8 +2021,7 @@ static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah,
4918 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); 2021 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
4919} 2022}
4920 2023
4921static void ath9k_hw_spur_mitigate(struct ath_hal *ah, 2024static void ath9k_hw_spur_mitigate(struct ath_hal *ah, struct ath9k_channel *chan)
4922 struct ath9k_channel *chan)
4923{ 2025{
4924 int bb_spur = AR_NO_SPUR; 2026 int bb_spur = AR_NO_SPUR;
4925 int bin, cur_bin; 2027 int bin, cur_bin;
@@ -5120,752 +2222,11 @@ static void ath9k_hw_spur_mitigate(struct ath_hal *ah,
5120 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); 2222 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
5121} 2223}
5122 2224
5123static void ath9k_hw_init_chain_masks(struct ath_hal *ah) 2225bool ath9k_hw_reset(struct ath_hal *ah, struct ath9k_channel *chan,
5124{
5125 struct ath_hal_5416 *ahp = AH5416(ah);
5126 int rx_chainmask, tx_chainmask;
5127
5128 rx_chainmask = ahp->ah_rxchainmask;
5129 tx_chainmask = ahp->ah_txchainmask;
5130
5131 switch (rx_chainmask) {
5132 case 0x5:
5133 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
5134 AR_PHY_SWAP_ALT_CHAIN);
5135 case 0x3:
5136 if (((ah)->ah_macVersion <= AR_SREV_VERSION_9160)) {
5137 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
5138 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
5139 break;
5140 }
5141 case 0x1:
5142 case 0x2:
5143 if (!AR_SREV_9280(ah))
5144 break;
5145 case 0x7:
5146 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
5147 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
5148 break;
5149 default:
5150 break;
5151 }
5152
5153 REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
5154 if (tx_chainmask == 0x5) {
5155 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
5156 AR_PHY_SWAP_ALT_CHAIN);
5157 }
5158 if (AR_SREV_9100(ah))
5159 REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
5160 REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
5161}
5162
5163static void ath9k_hw_set_addac(struct ath_hal *ah,
5164 struct ath9k_channel *chan)
5165{
5166 struct modal_eep_header *pModal;
5167 struct ath_hal_5416 *ahp = AH5416(ah);
5168 struct ar5416_eeprom *eep = &ahp->ah_eeprom;
5169 u8 biaslevel;
5170
5171 if (ah->ah_macVersion != AR_SREV_VERSION_9160)
5172 return;
5173
5174 if (ar5416_get_eep_rev(ahp) < AR5416_EEP_MINOR_VER_7)
5175 return;
5176
5177 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
5178
5179 if (pModal->xpaBiasLvl != 0xff) {
5180 biaslevel = pModal->xpaBiasLvl;
5181 } else {
5182
5183 u16 resetFreqBin, freqBin, freqCount = 0;
5184 struct chan_centers centers;
5185
5186 ath9k_hw_get_channel_centers(ah, chan, &centers);
5187
5188 resetFreqBin =
5189 FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan));
5190 freqBin = pModal->xpaBiasLvlFreq[0] & 0xff;
5191 biaslevel = (u8) (pModal->xpaBiasLvlFreq[0] >> 14);
5192
5193 freqCount++;
5194
5195 while (freqCount < 3) {
5196 if (pModal->xpaBiasLvlFreq[freqCount] == 0x0)
5197 break;
5198
5199 freqBin = pModal->xpaBiasLvlFreq[freqCount] & 0xff;
5200 if (resetFreqBin >= freqBin) {
5201 biaslevel =
5202 (u8) (pModal->
5203 xpaBiasLvlFreq[freqCount]
5204 >> 14);
5205 } else {
5206 break;
5207 }
5208 freqCount++;
5209 }
5210 }
5211
5212 if (IS_CHAN_2GHZ(chan)) {
5213 INI_RA(&ahp->ah_iniAddac, 7, 1) =
5214 (INI_RA(&ahp->ah_iniAddac, 7, 1) & (~0x18)) | biaslevel
5215 << 3;
5216 } else {
5217 INI_RA(&ahp->ah_iniAddac, 6, 1) =
5218 (INI_RA(&ahp->ah_iniAddac, 6, 1) & (~0xc0)) | biaslevel
5219 << 6;
5220 }
5221}
5222
5223static u32 ath9k_hw_mac_usec(struct ath_hal *ah, u32 clks)
5224{
5225 if (ah->ah_curchan != NULL)
5226 return clks /
5227 CLOCK_RATE[ath9k_hw_chan2wmode(ah, ah->ah_curchan)];
5228 else
5229 return clks / CLOCK_RATE[ATH9K_MODE_11B];
5230}
5231
5232static u32 ath9k_hw_mac_to_usec(struct ath_hal *ah, u32 clks)
5233{
5234 struct ath9k_channel *chan = ah->ah_curchan;
5235
5236 if (chan && IS_CHAN_HT40(chan))
5237 return ath9k_hw_mac_usec(ah, clks) / 2;
5238 else
5239 return ath9k_hw_mac_usec(ah, clks);
5240}
5241
5242static u32 ath9k_hw_mac_clks(struct ath_hal *ah, u32 usecs)
5243{
5244 if (ah->ah_curchan != NULL)
5245 return usecs * CLOCK_RATE[ath9k_hw_chan2wmode(ah,
5246 ah->ah_curchan)];
5247 else
5248 return usecs * CLOCK_RATE[ATH9K_MODE_11B];
5249}
5250
5251static u32 ath9k_hw_mac_to_clks(struct ath_hal *ah, u32 usecs)
5252{
5253 struct ath9k_channel *chan = ah->ah_curchan;
5254
5255 if (chan && IS_CHAN_HT40(chan))
5256 return ath9k_hw_mac_clks(ah, usecs) * 2;
5257 else
5258 return ath9k_hw_mac_clks(ah, usecs);
5259}
5260
5261static bool ath9k_hw_set_ack_timeout(struct ath_hal *ah, u32 us)
5262{
5263 struct ath_hal_5416 *ahp = AH5416(ah);
5264
5265 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
5266 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad ack timeout %u\n",
5267 __func__, us);
5268 ahp->ah_acktimeout = (u32) -1;
5269 return false;
5270 } else {
5271 REG_RMW_FIELD(ah, AR_TIME_OUT,
5272 AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
5273 ahp->ah_acktimeout = us;
5274 return true;
5275 }
5276}
5277
5278static bool ath9k_hw_set_cts_timeout(struct ath_hal *ah, u32 us)
5279{
5280 struct ath_hal_5416 *ahp = AH5416(ah);
5281
5282 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
5283 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad cts timeout %u\n",
5284 __func__, us);
5285 ahp->ah_ctstimeout = (u32) -1;
5286 return false;
5287 } else {
5288 REG_RMW_FIELD(ah, AR_TIME_OUT,
5289 AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us));
5290 ahp->ah_ctstimeout = us;
5291 return true;
5292 }
5293}
5294static bool ath9k_hw_set_global_txtimeout(struct ath_hal *ah,
5295 u32 tu)
5296{
5297 struct ath_hal_5416 *ahp = AH5416(ah);
5298
5299 if (tu > 0xFFFF) {
5300 DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
5301 "%s: bad global tx timeout %u\n", __func__, tu);
5302 ahp->ah_globaltxtimeout = (u32) -1;
5303 return false;
5304 } else {
5305 REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
5306 ahp->ah_globaltxtimeout = tu;
5307 return true;
5308 }
5309}
5310
5311bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us)
5312{
5313 struct ath_hal_5416 *ahp = AH5416(ah);
5314
5315 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
5316 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad slot time %u\n",
5317 __func__, us);
5318 ahp->ah_slottime = (u32) -1;
5319 return false;
5320 } else {
5321 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
5322 ahp->ah_slottime = us;
5323 return true;
5324 }
5325}
5326
5327static void ath9k_hw_init_user_settings(struct ath_hal *ah)
5328{
5329 struct ath_hal_5416 *ahp = AH5416(ah);
5330
5331 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "--AP %s ahp->ah_miscMode 0x%x\n",
5332 __func__, ahp->ah_miscMode);
5333 if (ahp->ah_miscMode != 0)
5334 REG_WRITE(ah, AR_PCU_MISC,
5335 REG_READ(ah, AR_PCU_MISC) | ahp->ah_miscMode);
5336 if (ahp->ah_slottime != (u32) -1)
5337 ath9k_hw_setslottime(ah, ahp->ah_slottime);
5338 if (ahp->ah_acktimeout != (u32) -1)
5339 ath9k_hw_set_ack_timeout(ah, ahp->ah_acktimeout);
5340 if (ahp->ah_ctstimeout != (u32) -1)
5341 ath9k_hw_set_cts_timeout(ah, ahp->ah_ctstimeout);
5342 if (ahp->ah_globaltxtimeout != (u32) -1)
5343 ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout);
5344}
5345
5346static int
5347ath9k_hw_process_ini(struct ath_hal *ah,
5348 struct ath9k_channel *chan,
5349 enum ath9k_ht_macmode macmode)
5350{
5351 int i, regWrites = 0;
5352 struct ath_hal_5416 *ahp = AH5416(ah);
5353 u32 modesIndex, freqIndex;
5354 int status;
5355
5356 switch (chan->chanmode) {
5357 case CHANNEL_A:
5358 case CHANNEL_A_HT20:
5359 modesIndex = 1;
5360 freqIndex = 1;
5361 break;
5362 case CHANNEL_A_HT40PLUS:
5363 case CHANNEL_A_HT40MINUS:
5364 modesIndex = 2;
5365 freqIndex = 1;
5366 break;
5367 case CHANNEL_G:
5368 case CHANNEL_G_HT20:
5369 case CHANNEL_B:
5370 modesIndex = 4;
5371 freqIndex = 2;
5372 break;
5373 case CHANNEL_G_HT40PLUS:
5374 case CHANNEL_G_HT40MINUS:
5375 modesIndex = 3;
5376 freqIndex = 2;
5377 break;
5378
5379 default:
5380 return -EINVAL;
5381 }
5382
5383 REG_WRITE(ah, AR_PHY(0), 0x00000007);
5384
5385 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
5386
5387 ath9k_hw_set_addac(ah, chan);
5388
5389 if (AR_SREV_5416_V22_OR_LATER(ah)) {
5390 REG_WRITE_ARRAY(&ahp->ah_iniAddac, 1, regWrites);
5391 } else {
5392 struct ar5416IniArray temp;
5393 u32 addacSize =
5394 sizeof(u32) * ahp->ah_iniAddac.ia_rows *
5395 ahp->ah_iniAddac.ia_columns;
5396
5397 memcpy(ahp->ah_addac5416_21,
5398 ahp->ah_iniAddac.ia_array, addacSize);
5399
5400 (ahp->ah_addac5416_21)[31 *
5401 ahp->ah_iniAddac.ia_columns + 1] = 0;
5402
5403 temp.ia_array = ahp->ah_addac5416_21;
5404 temp.ia_columns = ahp->ah_iniAddac.ia_columns;
5405 temp.ia_rows = ahp->ah_iniAddac.ia_rows;
5406 REG_WRITE_ARRAY(&temp, 1, regWrites);
5407 }
5408 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
5409
5410 for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) {
5411 u32 reg = INI_RA(&ahp->ah_iniModes, i, 0);
5412 u32 val = INI_RA(&ahp->ah_iniModes, i, modesIndex);
5413
5414#ifdef CONFIG_SLOW_ANT_DIV
5415 if (ah->ah_devid == AR9280_DEVID_PCI)
5416 val = ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom, reg,
5417 val);
5418#endif
5419
5420 REG_WRITE(ah, reg, val);
5421
5422 if (reg >= 0x7800 && reg < 0x78a0
5423 && ah->ah_config.analog_shiftreg) {
5424 udelay(100);
5425 }
5426
5427 DO_DELAY(regWrites);
5428 }
5429
5430 for (i = 0; i < ahp->ah_iniCommon.ia_rows; i++) {
5431 u32 reg = INI_RA(&ahp->ah_iniCommon, i, 0);
5432 u32 val = INI_RA(&ahp->ah_iniCommon, i, 1);
5433
5434 REG_WRITE(ah, reg, val);
5435
5436 if (reg >= 0x7800 && reg < 0x78a0
5437 && ah->ah_config.analog_shiftreg) {
5438 udelay(100);
5439 }
5440
5441 DO_DELAY(regWrites);
5442 }
5443
5444 ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites);
5445
5446 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
5447 REG_WRITE_ARRAY(&ahp->ah_iniModesAdditional, modesIndex,
5448 regWrites);
5449 }
5450
5451 ath9k_hw_override_ini(ah, chan);
5452 ath9k_hw_set_regs(ah, chan, macmode);
5453 ath9k_hw_init_chain_masks(ah);
5454
5455 status = ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
5456 ath9k_regd_get_ctl(ah, chan),
5457 ath9k_regd_get_antenna_allowed(ah,
5458 chan),
5459 chan->maxRegTxPower * 2,
5460 min((u32) MAX_RATE_POWER,
5461 (u32) ah->ah_powerLimit));
5462 if (status != 0) {
5463 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
5464 "%s: error init'ing transmit power\n", __func__);
5465 return -EIO;
5466 }
5467
5468 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
5469 DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
5470 "%s: ar5416SetRfRegs failed\n", __func__);
5471 return -EIO;
5472 }
5473
5474 return 0;
5475}
5476
5477static void ath9k_hw_setup_calibration(struct ath_hal *ah,
5478 struct hal_cal_list *currCal)
5479{
5480 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
5481 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
5482 currCal->calData->calCountMax);
5483
5484 switch (currCal->calData->calType) {
5485 case IQ_MISMATCH_CAL:
5486 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
5487 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5488 "%s: starting IQ Mismatch Calibration\n",
5489 __func__);
5490 break;
5491 case ADC_GAIN_CAL:
5492 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
5493 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5494 "%s: starting ADC Gain Calibration\n", __func__);
5495 break;
5496 case ADC_DC_CAL:
5497 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
5498 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5499 "%s: starting ADC DC Calibration\n", __func__);
5500 break;
5501 case ADC_DC_INIT_CAL:
5502 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
5503 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5504 "%s: starting Init ADC DC Calibration\n",
5505 __func__);
5506 break;
5507 }
5508
5509 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
5510 AR_PHY_TIMING_CTRL4_DO_CAL);
5511}
5512
5513static void ath9k_hw_reset_calibration(struct ath_hal *ah,
5514 struct hal_cal_list *currCal)
5515{
5516 struct ath_hal_5416 *ahp = AH5416(ah);
5517 int i;
5518
5519 ath9k_hw_setup_calibration(ah, currCal);
5520
5521 currCal->calState = CAL_RUNNING;
5522
5523 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
5524 ahp->ah_Meas0.sign[i] = 0;
5525 ahp->ah_Meas1.sign[i] = 0;
5526 ahp->ah_Meas2.sign[i] = 0;
5527 ahp->ah_Meas3.sign[i] = 0;
5528 }
5529
5530 ahp->ah_CalSamples = 0;
5531}
5532
5533static void
5534ath9k_hw_per_calibration(struct ath_hal *ah,
5535 struct ath9k_channel *ichan,
5536 u8 rxchainmask,
5537 struct hal_cal_list *currCal,
5538 bool *isCalDone)
5539{
5540 struct ath_hal_5416 *ahp = AH5416(ah);
5541
5542 *isCalDone = false;
5543
5544 if (currCal->calState == CAL_RUNNING) {
5545 if (!(REG_READ(ah,
5546 AR_PHY_TIMING_CTRL4(0)) &
5547 AR_PHY_TIMING_CTRL4_DO_CAL)) {
5548
5549 currCal->calData->calCollect(ah);
5550
5551 ahp->ah_CalSamples++;
5552
5553 if (ahp->ah_CalSamples >=
5554 currCal->calData->calNumSamples) {
5555 int i, numChains = 0;
5556 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
5557 if (rxchainmask & (1 << i))
5558 numChains++;
5559 }
5560
5561 currCal->calData->calPostProc(ah,
5562 numChains);
5563
5564 ichan->CalValid |=
5565 currCal->calData->calType;
5566 currCal->calState = CAL_DONE;
5567 *isCalDone = true;
5568 } else {
5569 ath9k_hw_setup_calibration(ah, currCal);
5570 }
5571 }
5572 } else if (!(ichan->CalValid & currCal->calData->calType)) {
5573 ath9k_hw_reset_calibration(ah, currCal);
5574 }
5575}
5576
5577static inline bool ath9k_hw_run_init_cals(struct ath_hal *ah,
5578 int init_cal_count)
5579{
5580 struct ath_hal_5416 *ahp = AH5416(ah);
5581 struct ath9k_channel ichan;
5582 bool isCalDone;
5583 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
5584 const struct hal_percal_data *calData = currCal->calData;
5585 int i;
5586
5587 if (currCal == NULL)
5588 return false;
5589
5590 ichan.CalValid = 0;
5591
5592 for (i = 0; i < init_cal_count; i++) {
5593 ath9k_hw_reset_calibration(ah, currCal);
5594
5595 if (!ath9k_hw_wait(ah, AR_PHY_TIMING_CTRL4(0),
5596 AR_PHY_TIMING_CTRL4_DO_CAL, 0)) {
5597 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5598 "%s: Cal %d failed to complete in 100ms.\n",
5599 __func__, calData->calType);
5600
5601 ahp->ah_cal_list = ahp->ah_cal_list_last =
5602 ahp->ah_cal_list_curr = NULL;
5603 return false;
5604 }
5605
5606 ath9k_hw_per_calibration(ah, &ichan, ahp->ah_rxchainmask,
5607 currCal, &isCalDone);
5608 if (!isCalDone) {
5609 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5610 "%s: Not able to run Init Cal %d.\n",
5611 __func__, calData->calType);
5612 }
5613 if (currCal->calNext) {
5614 currCal = currCal->calNext;
5615 calData = currCal->calData;
5616 }
5617 }
5618
5619 ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr = NULL;
5620 return true;
5621}
5622
5623static bool
5624ath9k_hw_channel_change(struct ath_hal *ah,
5625 struct ath9k_channel *chan,
5626 enum ath9k_ht_macmode macmode)
5627{
5628 u32 synthDelay, qnum;
5629 struct ath_hal_5416 *ahp = AH5416(ah);
5630
5631 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
5632 if (ath9k_hw_numtxpending(ah, qnum)) {
5633 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
5634 "%s: Transmit frames pending on queue %d\n",
5635 __func__, qnum);
5636 return false;
5637 }
5638 }
5639
5640 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
5641 if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
5642 AR_PHY_RFBUS_GRANT_EN)) {
5643 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
5644 "%s: Could not kill baseband RX\n", __func__);
5645 return false;
5646 }
5647
5648 ath9k_hw_set_regs(ah, chan, macmode);
5649
5650 if (AR_SREV_9280_10_OR_LATER(ah)) {
5651 if (!(ath9k_hw_ar9280_set_channel(ah, chan))) {
5652 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5653 "%s: failed to set channel\n", __func__);
5654 return false;
5655 }
5656 } else {
5657 if (!(ath9k_hw_set_channel(ah, chan))) {
5658 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5659 "%s: failed to set channel\n", __func__);
5660 return false;
5661 }
5662 }
5663
5664 if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan,
5665 ath9k_regd_get_ctl(ah, chan),
5666 ath9k_regd_get_antenna_allowed(ah, chan),
5667 chan->maxRegTxPower * 2,
5668 min((u32) MAX_RATE_POWER,
5669 (u32) ah->ah_powerLimit)) != 0) {
5670 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
5671 "%s: error init'ing transmit power\n", __func__);
5672 return false;
5673 }
5674
5675 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
5676 if (IS_CHAN_CCK(chan))
5677 synthDelay = (4 * synthDelay) / 22;
5678 else
5679 synthDelay /= 10;
5680
5681 udelay(synthDelay + BASE_ACTIVATE_DELAY);
5682
5683 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
5684
5685 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
5686 ath9k_hw_set_delta_slope(ah, chan);
5687
5688 if (AR_SREV_9280_10_OR_LATER(ah))
5689 ath9k_hw_9280_spur_mitigate(ah, chan);
5690 else
5691 ath9k_hw_spur_mitigate(ah, chan);
5692
5693 if (!chan->oneTimeCalsDone)
5694 chan->oneTimeCalsDone = true;
5695
5696 return true;
5697}
5698
5699static bool ath9k_hw_chip_reset(struct ath_hal *ah,
5700 struct ath9k_channel *chan)
5701{
5702 struct ath_hal_5416 *ahp = AH5416(ah);
5703
5704 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
5705 return false;
5706
5707 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
5708 return false;
5709
5710 ahp->ah_chipFullSleep = false;
5711
5712 ath9k_hw_init_pll(ah, chan);
5713
5714 ath9k_hw_set_rfmode(ah, chan);
5715
5716 return true;
5717}
5718
5719static inline void ath9k_hw_set_dma(struct ath_hal *ah)
5720{
5721 u32 regval;
5722
5723 regval = REG_READ(ah, AR_AHB_MODE);
5724 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
5725
5726 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
5727 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
5728
5729 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->ah_txTrigLevel);
5730
5731 regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK;
5732 REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B);
5733
5734 REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
5735
5736 if (AR_SREV_9285(ah)) {
5737 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
5738 AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE);
5739 } else {
5740 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
5741 AR_PCU_TXBUF_CTRL_USABLE_SIZE);
5742 }
5743}
5744
5745bool ath9k_hw_stopdmarecv(struct ath_hal *ah)
5746{
5747 REG_WRITE(ah, AR_CR, AR_CR_RXD);
5748 if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0)) {
5749 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
5750 "%s: dma failed to stop in 10ms\n"
5751 "AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n",
5752 __func__,
5753 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
5754 return false;
5755 } else {
5756 return true;
5757 }
5758}
5759
5760void ath9k_hw_startpcureceive(struct ath_hal *ah)
5761{
5762 REG_CLR_BIT(ah, AR_DIAG_SW,
5763 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
5764
5765 ath9k_enable_mib_counters(ah);
5766
5767 ath9k_ani_reset(ah);
5768}
5769
5770void ath9k_hw_stoppcurecv(struct ath_hal *ah)
5771{
5772 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
5773
5774 ath9k_hw_disable_mib_counters(ah);
5775}
5776
5777static bool ath9k_hw_iscal_supported(struct ath_hal *ah,
5778 struct ath9k_channel *chan,
5779 enum hal_cal_types calType)
5780{
5781 struct ath_hal_5416 *ahp = AH5416(ah);
5782 bool retval = false;
5783
5784 switch (calType & ahp->ah_suppCals) {
5785 case IQ_MISMATCH_CAL:
5786 if (!IS_CHAN_B(chan))
5787 retval = true;
5788 break;
5789 case ADC_GAIN_CAL:
5790 case ADC_DC_CAL:
5791 if (!IS_CHAN_B(chan)
5792 && !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan)))
5793 retval = true;
5794 break;
5795 }
5796
5797 return retval;
5798}
5799
5800static bool ath9k_hw_init_cal(struct ath_hal *ah,
5801 struct ath9k_channel *chan)
5802{
5803 struct ath_hal_5416 *ahp = AH5416(ah);
5804 struct ath9k_channel *ichan =
5805 ath9k_regd_check_channel(ah, chan);
5806
5807 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
5808 REG_READ(ah, AR_PHY_AGC_CONTROL) |
5809 AR_PHY_AGC_CONTROL_CAL);
5810
5811 if (!ath9k_hw_wait
5812 (ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0)) {
5813 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5814 "%s: offset calibration failed to complete in 1ms; "
5815 "noisy environment?\n", __func__);
5816 return false;
5817 }
5818
5819 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
5820 REG_READ(ah, AR_PHY_AGC_CONTROL) |
5821 AR_PHY_AGC_CONTROL_NF);
5822
5823 ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr =
5824 NULL;
5825
5826 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
5827 if (ath9k_hw_iscal_supported(ah, chan, ADC_GAIN_CAL)) {
5828 INIT_CAL(&ahp->ah_adcGainCalData);
5829 INSERT_CAL(ahp, &ahp->ah_adcGainCalData);
5830 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5831 "%s: enabling ADC Gain Calibration.\n",
5832 __func__);
5833 }
5834 if (ath9k_hw_iscal_supported(ah, chan, ADC_DC_CAL)) {
5835 INIT_CAL(&ahp->ah_adcDcCalData);
5836 INSERT_CAL(ahp, &ahp->ah_adcDcCalData);
5837 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5838 "%s: enabling ADC DC Calibration.\n",
5839 __func__);
5840 }
5841 if (ath9k_hw_iscal_supported(ah, chan, IQ_MISMATCH_CAL)) {
5842 INIT_CAL(&ahp->ah_iqCalData);
5843 INSERT_CAL(ahp, &ahp->ah_iqCalData);
5844 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
5845 "%s: enabling IQ Calibration.\n",
5846 __func__);
5847 }
5848
5849 ahp->ah_cal_list_curr = ahp->ah_cal_list;
5850
5851 if (ahp->ah_cal_list_curr)
5852 ath9k_hw_reset_calibration(ah,
5853 ahp->ah_cal_list_curr);
5854 }
5855
5856 ichan->CalValid = 0;
5857
5858 return true;
5859}
5860
5861
5862bool ath9k_hw_reset(struct ath_hal *ah,
5863 struct ath9k_channel *chan,
5864 enum ath9k_ht_macmode macmode, 2226 enum ath9k_ht_macmode macmode,
5865 u8 txchainmask, u8 rxchainmask, 2227 u8 txchainmask, u8 rxchainmask,
5866 enum ath9k_ht_extprotspacing extprotspacing, 2228 enum ath9k_ht_extprotspacing extprotspacing,
5867 bool bChannelChange, 2229 bool bChannelChange, int *status)
5868 int *status)
5869{ 2230{
5870 u32 saveLedState; 2231 u32 saveLedState;
5871 struct ath_hal_5416 *ahp = AH5416(ah); 2232 struct ath_hal_5416 *ahp = AH5416(ah);
@@ -5886,8 +2247,8 @@ bool ath9k_hw_reset(struct ath_hal *ah,
5886 2247
5887 if (ath9k_hw_check_chan(ah, chan) == NULL) { 2248 if (ath9k_hw_check_chan(ah, chan) == NULL) {
5888 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, 2249 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
5889 "%s: invalid channel %u/0x%x; no mapping\n", 2250 "invalid channel %u/0x%x; no mapping\n",
5890 __func__, chan->channel, chan->channelFlags); 2251 chan->channel, chan->channelFlags);
5891 ecode = -EINVAL; 2252 ecode = -EINVAL;
5892 goto bad; 2253 goto bad;
5893 } 2254 }
@@ -5907,8 +2268,7 @@ bool ath9k_hw_reset(struct ath_hal *ah,
5907 ((chan->channelFlags & CHANNEL_ALL) == 2268 ((chan->channelFlags & CHANNEL_ALL) ==
5908 (ah->ah_curchan->channelFlags & CHANNEL_ALL)) && 2269 (ah->ah_curchan->channelFlags & CHANNEL_ALL)) &&
5909 (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) && 2270 (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) &&
5910 !IS_CHAN_A_5MHZ_SPACED(ah-> 2271 !IS_CHAN_A_5MHZ_SPACED(ah->ah_curchan)))) {
5911 ah_curchan)))) {
5912 2272
5913 if (ath9k_hw_channel_change(ah, chan, macmode)) { 2273 if (ath9k_hw_channel_change(ah, chan, macmode)) {
5914 ath9k_hw_loadnf(ah, ah->ah_curchan); 2274 ath9k_hw_loadnf(ah, ah->ah_curchan);
@@ -5930,8 +2290,7 @@ bool ath9k_hw_reset(struct ath_hal *ah,
5930 ath9k_hw_mark_phy_inactive(ah); 2290 ath9k_hw_mark_phy_inactive(ah);
5931 2291
5932 if (!ath9k_hw_chip_reset(ah, chan)) { 2292 if (!ath9k_hw_chip_reset(ah, chan)) {
5933 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: chip reset failed\n", 2293 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "chip reset failed\n");
5934 __func__);
5935 ecode = -EINVAL; 2294 ecode = -EINVAL;
5936 goto bad; 2295 goto bad;
5937 } 2296 }
@@ -5965,7 +2324,7 @@ bool ath9k_hw_reset(struct ath_hal *ah,
5965 2324
5966 if (!ath9k_hw_eeprom_set_board_values(ah, chan)) { 2325 if (!ath9k_hw_eeprom_set_board_values(ah, chan)) {
5967 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 2326 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
5968 "%s: error setting board options\n", __func__); 2327 "error setting board options\n");
5969 ecode = -EIO; 2328 ecode = -EIO;
5970 goto bad; 2329 goto bad;
5971 } 2330 }
@@ -6016,7 +2375,7 @@ bool ath9k_hw_reset(struct ath_hal *ah,
6016 ath9k_hw_init_interrupt_masks(ah, ah->ah_opmode); 2375 ath9k_hw_init_interrupt_masks(ah, ah->ah_opmode);
6017 ath9k_hw_init_qos(ah); 2376 ath9k_hw_init_qos(ah);
6018 2377
6019#ifdef CONFIG_RFKILL 2378#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
6020 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 2379 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
6021 ath9k_enable_rfkill(ah); 2380 ath9k_enable_rfkill(ah);
6022#endif 2381#endif
@@ -6055,15 +2414,13 @@ bool ath9k_hw_reset(struct ath_hal *ah,
6055 mask = REG_READ(ah, AR_CFG); 2414 mask = REG_READ(ah, AR_CFG);
6056 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { 2415 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
6057 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 2416 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
6058 "%s CFG Byte Swap Set 0x%x\n", __func__, 2417 "CFG Byte Swap Set 0x%x\n", mask);
6059 mask);
6060 } else { 2418 } else {
6061 mask = 2419 mask =
6062 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; 2420 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
6063 REG_WRITE(ah, AR_CFG, mask); 2421 REG_WRITE(ah, AR_CFG, mask);
6064 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 2422 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
6065 "%s Setting CFG 0x%x\n", __func__, 2423 "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
6066 REG_READ(ah, AR_CFG));
6067 } 2424 }
6068 } else { 2425 } else {
6069#ifdef __BIG_ENDIAN 2426#ifdef __BIG_ENDIAN
@@ -6078,693 +2435,402 @@ bad:
6078 return false; 2435 return false;
6079} 2436}
6080 2437
6081bool ath9k_hw_phy_disable(struct ath_hal *ah) 2438/************************/
6082{ 2439/* Key Cache Management */
6083 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM); 2440/************************/
6084}
6085
6086bool ath9k_hw_disable(struct ath_hal *ah)
6087{
6088 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
6089 return false;
6090
6091 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD);
6092}
6093 2441
6094bool 2442bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry)
6095ath9k_hw_calibrate(struct ath_hal *ah, struct ath9k_channel *chan,
6096 u8 rxchainmask, bool longcal,
6097 bool *isCalDone)
6098{ 2443{
6099 struct ath_hal_5416 *ahp = AH5416(ah); 2444 u32 keyType;
6100 struct hal_cal_list *currCal = ahp->ah_cal_list_curr;
6101 struct ath9k_channel *ichan =
6102 ath9k_regd_check_channel(ah, chan);
6103
6104 *isCalDone = true;
6105 2445
6106 if (ichan == NULL) { 2446 if (entry >= ah->ah_caps.keycache_size) {
6107 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, 2447 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
6108 "%s: invalid channel %u/0x%x; no mapping\n", 2448 "entry %u out of range\n", entry);
6109 __func__, chan->channel, chan->channelFlags);
6110 return false; 2449 return false;
6111 } 2450 }
6112 2451
6113 if (currCal && 2452 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
6114 (currCal->calState == CAL_RUNNING ||
6115 currCal->calState == CAL_WAITING)) {
6116 ath9k_hw_per_calibration(ah, ichan, rxchainmask, currCal,
6117 isCalDone);
6118 if (*isCalDone) {
6119 ahp->ah_cal_list_curr = currCal = currCal->calNext;
6120
6121 if (currCal->calState == CAL_WAITING) {
6122 *isCalDone = false;
6123 ath9k_hw_reset_calibration(ah, currCal);
6124 }
6125 }
6126 }
6127 2453
6128 if (longcal) { 2454 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0);
6129 ath9k_hw_getnf(ah, ichan); 2455 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
6130 ath9k_hw_loadnf(ah, ah->ah_curchan); 2456 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
6131 ath9k_hw_start_nfcal(ah); 2457 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
2458 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
2459 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
2460 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
2461 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
6132 2462
6133 if ((ichan->channelFlags & CHANNEL_CW_INT) != 0) { 2463 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
2464 u16 micentry = entry + 64;
2465
2466 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0);
2467 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
2468 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
2469 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
6134 2470
6135 chan->channelFlags |= CHANNEL_CW_INT;
6136 ichan->channelFlags &= ~CHANNEL_CW_INT;
6137 }
6138 } 2471 }
6139 2472
2473 if (ah->ah_curchan == NULL)
2474 return true;
2475
6140 return true; 2476 return true;
6141} 2477}
6142 2478
6143static void ath9k_hw_iqcal_collect(struct ath_hal *ah) 2479bool ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry, const u8 *mac)
6144{ 2480{
6145 struct ath_hal_5416 *ahp = AH5416(ah); 2481 u32 macHi, macLo;
6146 int i;
6147 2482
6148 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 2483 if (entry >= ah->ah_caps.keycache_size) {
6149 ahp->ah_totalPowerMeasI[i] += 2484 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
6150 REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); 2485 "entry %u out of range\n", entry);
6151 ahp->ah_totalPowerMeasQ[i] += 2486 return false;
6152 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
6153 ahp->ah_totalIqCorrMeas[i] +=
6154 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6155 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6156 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
6157 ahp->ah_CalSamples, i, ahp->ah_totalPowerMeasI[i],
6158 ahp->ah_totalPowerMeasQ[i],
6159 ahp->ah_totalIqCorrMeas[i]);
6160 } 2487 }
6161}
6162 2488
6163static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah) 2489 if (mac != NULL) {
6164{ 2490 macHi = (mac[5] << 8) | mac[4];
6165 struct ath_hal_5416 *ahp = AH5416(ah); 2491 macLo = (mac[3] << 24) |
6166 int i; 2492 (mac[2] << 16) |
6167 2493 (mac[1] << 8) |
6168 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 2494 mac[0];
6169 ahp->ah_totalAdcIOddPhase[i] += 2495 macLo >>= 1;
6170 REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); 2496 macLo |= (macHi & 1) << 31;
6171 ahp->ah_totalAdcIEvenPhase[i] += 2497 macHi >>= 1;
6172 REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); 2498 } else {
6173 ahp->ah_totalAdcQOddPhase[i] += 2499 macLo = macHi = 0;
6174 REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6175 ahp->ah_totalAdcQEvenPhase[i] +=
6176 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
6177
6178 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6179 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
6180 "oddq=0x%08x; evenq=0x%08x;\n",
6181 ahp->ah_CalSamples, i,
6182 ahp->ah_totalAdcIOddPhase[i],
6183 ahp->ah_totalAdcIEvenPhase[i],
6184 ahp->ah_totalAdcQOddPhase[i],
6185 ahp->ah_totalAdcQEvenPhase[i]);
6186 } 2500 }
6187} 2501 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
6188 2502 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID);
6189static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah)
6190{
6191 struct ath_hal_5416 *ahp = AH5416(ah);
6192 int i;
6193 2503
6194 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 2504 return true;
6195 ahp->ah_totalAdcDcOffsetIOddPhase[i] +=
6196 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
6197 ahp->ah_totalAdcDcOffsetIEvenPhase[i] +=
6198 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
6199 ahp->ah_totalAdcDcOffsetQOddPhase[i] +=
6200 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
6201 ahp->ah_totalAdcDcOffsetQEvenPhase[i] +=
6202 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
6203
6204 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6205 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
6206 "oddq=0x%08x; evenq=0x%08x;\n",
6207 ahp->ah_CalSamples, i,
6208 ahp->ah_totalAdcDcOffsetIOddPhase[i],
6209 ahp->ah_totalAdcDcOffsetIEvenPhase[i],
6210 ahp->ah_totalAdcDcOffsetQOddPhase[i],
6211 ahp->ah_totalAdcDcOffsetQEvenPhase[i]);
6212 }
6213} 2505}
6214 2506
6215static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains) 2507bool ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry,
2508 const struct ath9k_keyval *k,
2509 const u8 *mac, int xorKey)
6216{ 2510{
2511 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
2512 u32 key0, key1, key2, key3, key4;
2513 u32 keyType;
2514 u32 xorMask = xorKey ?
2515 (ATH9K_KEY_XOR << 24 | ATH9K_KEY_XOR << 16 | ATH9K_KEY_XOR << 8
2516 | ATH9K_KEY_XOR) : 0;
6217 struct ath_hal_5416 *ahp = AH5416(ah); 2517 struct ath_hal_5416 *ahp = AH5416(ah);
6218 u32 powerMeasQ, powerMeasI, iqCorrMeas;
6219 u32 qCoffDenom, iCoffDenom;
6220 int32_t qCoff, iCoff;
6221 int iqCorrNeg, i;
6222
6223 for (i = 0; i < numChains; i++) {
6224 powerMeasI = ahp->ah_totalPowerMeasI[i];
6225 powerMeasQ = ahp->ah_totalPowerMeasQ[i];
6226 iqCorrMeas = ahp->ah_totalIqCorrMeas[i];
6227
6228 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6229 "Starting IQ Cal and Correction for Chain %d\n",
6230 i);
6231
6232 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6233 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
6234 i, ahp->ah_totalIqCorrMeas[i]);
6235
6236 iqCorrNeg = 0;
6237 2518
2519 if (entry >= pCap->keycache_size) {
2520 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
2521 "entry %u out of range\n", entry);
2522 return false;
2523 }
6238 2524
6239 if (iqCorrMeas > 0x80000000) { 2525 switch (k->kv_type) {
6240 iqCorrMeas = (0xffffffff - iqCorrMeas) + 1; 2526 case ATH9K_CIPHER_AES_OCB:
6241 iqCorrNeg = 1; 2527 keyType = AR_KEYTABLE_TYPE_AES;
2528 break;
2529 case ATH9K_CIPHER_AES_CCM:
2530 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
2531 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
2532 "AES-CCM not supported by mac rev 0x%x\n",
2533 ah->ah_macRev);
2534 return false;
6242 } 2535 }
6243 2536 keyType = AR_KEYTABLE_TYPE_CCM;
6244 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 2537 break;
6245 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); 2538 case ATH9K_CIPHER_TKIP:
6246 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 2539 keyType = AR_KEYTABLE_TYPE_TKIP;
6247 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); 2540 if (ATH9K_IS_MIC_ENABLED(ah)
6248 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n", 2541 && entry + 64 >= pCap->keycache_size) {
6249 iqCorrNeg); 2542 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
6250 2543 "entry %u inappropriate for TKIP\n", entry);
6251 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128; 2544 return false;
6252 qCoffDenom = powerMeasQ / 64;
6253
6254 if (powerMeasQ != 0) {
6255
6256 iCoff = iqCorrMeas / iCoffDenom;
6257 qCoff = powerMeasI / qCoffDenom - 64;
6258 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6259 "Chn %d iCoff = 0x%08x\n", i, iCoff);
6260 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6261 "Chn %d qCoff = 0x%08x\n", i, qCoff);
6262
6263
6264 iCoff = iCoff & 0x3f;
6265 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6266 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
6267 if (iqCorrNeg == 0x0)
6268 iCoff = 0x40 - iCoff;
6269
6270 if (qCoff > 15)
6271 qCoff = 15;
6272 else if (qCoff <= -16)
6273 qCoff = 16;
6274
6275 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6276 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
6277 i, iCoff, qCoff);
6278
6279 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
6280 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
6281 iCoff);
6282 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
6283 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
6284 qCoff);
6285 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6286 "IQ Cal and Correction done for Chain %d\n",
6287 i);
6288 } 2545 }
6289 } 2546 break;
6290 2547 case ATH9K_CIPHER_WEP:
6291 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0), 2548 if (k->kv_len < LEN_WEP40) {
6292 AR_PHY_TIMING_CTRL4_IQCORR_ENABLE); 2549 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
6293} 2550 "WEP key length %u too small\n", k->kv_len);
6294 2551 return false;
6295static void
6296ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah, u8 numChains)
6297{
6298 struct ath_hal_5416 *ahp = AH5416(ah);
6299 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset,
6300 qEvenMeasOffset;
6301 u32 qGainMismatch, iGainMismatch, val, i;
6302
6303 for (i = 0; i < numChains; i++) {
6304 iOddMeasOffset = ahp->ah_totalAdcIOddPhase[i];
6305 iEvenMeasOffset = ahp->ah_totalAdcIEvenPhase[i];
6306 qOddMeasOffset = ahp->ah_totalAdcQOddPhase[i];
6307 qEvenMeasOffset = ahp->ah_totalAdcQEvenPhase[i];
6308
6309 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6310 "Starting ADC Gain Cal for Chain %d\n", i);
6311
6312 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6313 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
6314 iOddMeasOffset);
6315 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6316 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
6317 iEvenMeasOffset);
6318 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6319 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
6320 qOddMeasOffset);
6321 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6322 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
6323 qEvenMeasOffset);
6324
6325 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
6326 iGainMismatch =
6327 ((iEvenMeasOffset * 32) /
6328 iOddMeasOffset) & 0x3f;
6329 qGainMismatch =
6330 ((qOddMeasOffset * 32) /
6331 qEvenMeasOffset) & 0x3f;
6332
6333 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6334 "Chn %d gain_mismatch_i = 0x%08x\n", i,
6335 iGainMismatch);
6336 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6337 "Chn %d gain_mismatch_q = 0x%08x\n", i,
6338 qGainMismatch);
6339
6340 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
6341 val &= 0xfffff000;
6342 val |= (qGainMismatch) | (iGainMismatch << 6);
6343 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
6344
6345 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6346 "ADC Gain Cal done for Chain %d\n", i);
6347 } 2552 }
2553 if (k->kv_len <= LEN_WEP40)
2554 keyType = AR_KEYTABLE_TYPE_40;
2555 else if (k->kv_len <= LEN_WEP104)
2556 keyType = AR_KEYTABLE_TYPE_104;
2557 else
2558 keyType = AR_KEYTABLE_TYPE_128;
2559 break;
2560 case ATH9K_CIPHER_CLR:
2561 keyType = AR_KEYTABLE_TYPE_CLR;
2562 break;
2563 default:
2564 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
2565 "cipher %u not supported\n", k->kv_type);
2566 return false;
6348 } 2567 }
6349 2568
6350 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0), 2569 key0 = get_unaligned_le32(k->kv_val + 0) ^ xorMask;
6351 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) | 2570 key1 = (get_unaligned_le16(k->kv_val + 4) ^ xorMask) & 0xffff;
6352 AR_PHY_NEW_ADC_GAIN_CORR_ENABLE); 2571 key2 = get_unaligned_le32(k->kv_val + 6) ^ xorMask;
6353} 2572 key3 = (get_unaligned_le16(k->kv_val + 10) ^ xorMask) & 0xffff;
6354 2573 key4 = get_unaligned_le32(k->kv_val + 12) ^ xorMask;
6355static void 2574 if (k->kv_len <= LEN_WEP104)
6356ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah, u8 numChains) 2575 key4 &= 0xff;
6357{
6358 struct ath_hal_5416 *ahp = AH5416(ah);
6359 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
6360 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
6361 const struct hal_percal_data *calData =
6362 ahp->ah_cal_list_curr->calData;
6363 u32 numSamples =
6364 (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
6365
6366 for (i = 0; i < numChains; i++) {
6367 iOddMeasOffset = ahp->ah_totalAdcDcOffsetIOddPhase[i];
6368 iEvenMeasOffset = ahp->ah_totalAdcDcOffsetIEvenPhase[i];
6369 qOddMeasOffset = ahp->ah_totalAdcDcOffsetQOddPhase[i];
6370 qEvenMeasOffset = ahp->ah_totalAdcDcOffsetQEvenPhase[i];
6371
6372 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6373 "Starting ADC DC Offset Cal for Chain %d\n", i);
6374
6375 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6376 "Chn %d pwr_meas_odd_i = %d\n", i,
6377 iOddMeasOffset);
6378 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6379 "Chn %d pwr_meas_even_i = %d\n", i,
6380 iEvenMeasOffset);
6381 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6382 "Chn %d pwr_meas_odd_q = %d\n", i,
6383 qOddMeasOffset);
6384 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6385 "Chn %d pwr_meas_even_q = %d\n", i,
6386 qEvenMeasOffset);
6387
6388 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
6389 numSamples) & 0x1ff;
6390 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
6391 numSamples) & 0x1ff;
6392
6393 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6394 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
6395 iDcMismatch);
6396 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6397 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
6398 qDcMismatch);
6399
6400 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
6401 val &= 0xc0000fff;
6402 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
6403 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
6404
6405 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6406 "ADC DC Offset Cal done for Chain %d\n", i);
6407 }
6408
6409 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
6410 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
6411 AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
6412}
6413
6414bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit)
6415{
6416 struct ath_hal_5416 *ahp = AH5416(ah);
6417 struct ath9k_channel *chan = ah->ah_curchan;
6418 2576
6419 ah->ah_powerLimit = min(limit, (u32) MAX_RATE_POWER); 2577 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
2578 u16 micentry = entry + 64;
6420 2579
6421 if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan, 2580 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
6422 ath9k_regd_get_ctl(ah, chan), 2581 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
6423 ath9k_regd_get_antenna_allowed(ah, 2582 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
6424 chan), 2583 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
6425 chan->maxRegTxPower * 2, 2584 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
6426 min((u32) MAX_RATE_POWER, 2585 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
6427 (u32) ah->ah_powerLimit)) != 0) 2586 (void) ath9k_hw_keysetmac(ah, entry, mac);
6428 return false;
6429 2587
6430 return true; 2588 if (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) {
6431} 2589 u32 mic0, mic1, mic2, mic3, mic4;
6432 2590
6433void 2591 mic0 = get_unaligned_le32(k->kv_mic + 0);
6434ath9k_hw_get_channel_centers(struct ath_hal *ah, 2592 mic2 = get_unaligned_le32(k->kv_mic + 4);
6435 struct ath9k_channel *chan, 2593 mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
6436 struct chan_centers *centers) 2594 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
6437{ 2595 mic4 = get_unaligned_le32(k->kv_txmic + 4);
6438 int8_t extoff; 2596 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
6439 struct ath_hal_5416 *ahp = AH5416(ah); 2597 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
2598 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
2599 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
2600 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
2601 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
2602 AR_KEYTABLE_TYPE_CLR);
6440 2603
6441 if (!IS_CHAN_HT40(chan)) { 2604 } else {
6442 centers->ctl_center = centers->ext_center = 2605 u32 mic0, mic2;
6443 centers->synth_center = chan->channel;
6444 return;
6445 }
6446 2606
6447 if ((chan->chanmode == CHANNEL_A_HT40PLUS) || 2607 mic0 = get_unaligned_le32(k->kv_mic + 0);
6448 (chan->chanmode == CHANNEL_G_HT40PLUS)) { 2608 mic2 = get_unaligned_le32(k->kv_mic + 4);
6449 centers->synth_center = 2609 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
6450 chan->channel + HT40_CHANNEL_CENTER_SHIFT; 2610 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
6451 extoff = 1; 2611 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
2612 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
2613 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
2614 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
2615 AR_KEYTABLE_TYPE_CLR);
2616 }
2617 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
2618 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
2619 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
2620 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
6452 } else { 2621 } else {
6453 centers->synth_center = 2622 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
6454 chan->channel - HT40_CHANNEL_CENTER_SHIFT; 2623 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
6455 extoff = -1; 2624 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
2625 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
2626 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
2627 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
2628
2629 (void) ath9k_hw_keysetmac(ah, entry, mac);
6456 } 2630 }
6457 2631
6458 centers->ctl_center = centers->synth_center - (extoff * 2632 if (ah->ah_curchan == NULL)
6459 HT40_CHANNEL_CENTER_SHIFT); 2633 return true;
6460 centers->ext_center = centers->synth_center + (extoff *
6461 ((ahp->
6462 ah_extprotspacing
6463 ==
6464 ATH9K_HT_EXTPROTSPACING_20)
6465 ?
6466 HT40_CHANNEL_CENTER_SHIFT
6467 : 15));
6468 2634
2635 return true;
6469} 2636}
6470 2637
6471void 2638bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry)
6472ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan,
6473 bool *isCalDone)
6474{ 2639{
6475 struct ath_hal_5416 *ahp = AH5416(ah); 2640 if (entry < ah->ah_caps.keycache_size) {
6476 struct ath9k_channel *ichan = 2641 u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry));
6477 ath9k_regd_check_channel(ah, chan); 2642 if (val & AR_KEYTABLE_VALID)
6478 struct hal_cal_list *currCal = ahp->ah_cal_list_curr; 2643 return true;
6479
6480 *isCalDone = true;
6481
6482 if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
6483 return;
6484
6485 if (currCal == NULL)
6486 return;
6487
6488 if (ichan == NULL) {
6489 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6490 "%s: invalid channel %u/0x%x; no mapping\n",
6491 __func__, chan->channel, chan->channelFlags);
6492 return;
6493 }
6494
6495
6496 if (currCal->calState != CAL_DONE) {
6497 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6498 "%s: Calibration state incorrect, %d\n",
6499 __func__, currCal->calState);
6500 return;
6501 } 2644 }
6502 2645 return false;
6503
6504 if (!ath9k_hw_iscal_supported(ah, chan, currCal->calData->calType))
6505 return;
6506
6507 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE,
6508 "%s: Resetting Cal %d state for channel %u/0x%x\n",
6509 __func__, currCal->calData->calType, chan->channel,
6510 chan->channelFlags);
6511
6512 ichan->CalValid &= ~currCal->calData->calType;
6513 currCal->calState = CAL_WAITING;
6514
6515 *isCalDone = false;
6516} 2646}
6517 2647
6518void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac) 2648/******************************/
2649/* Power Management (Chipset) */
2650/******************************/
2651
2652static void ath9k_set_power_sleep(struct ath_hal *ah, int setChip)
6519{ 2653{
6520 struct ath_hal_5416 *ahp = AH5416(ah); 2654 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2655 if (setChip) {
2656 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
2657 AR_RTC_FORCE_WAKE_EN);
2658 if (!AR_SREV_9100(ah))
2659 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
6521 2660
6522 memcpy(mac, ahp->ah_macaddr, ETH_ALEN); 2661 REG_CLR_BIT(ah, (u16) (AR_RTC_RESET),
2662 AR_RTC_RESET_EN);
2663 }
6523} 2664}
6524 2665
6525bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac) 2666static void ath9k_set_power_network_sleep(struct ath_hal *ah, int setChip)
6526{ 2667{
6527 struct ath_hal_5416 *ahp = AH5416(ah); 2668 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2669 if (setChip) {
2670 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6528 2671
6529 memcpy(ahp->ah_macaddr, mac, ETH_ALEN); 2672 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
6530 return true; 2673 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
2674 AR_RTC_FORCE_WAKE_ON_INT);
2675 } else {
2676 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
2677 AR_RTC_FORCE_WAKE_EN);
2678 }
2679 }
6531} 2680}
6532 2681
6533void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask) 2682static bool ath9k_hw_set_power_awake(struct ath_hal *ah,
2683 int setChip)
6534{ 2684{
6535 struct ath_hal_5416 *ahp = AH5416(ah); 2685 u32 val;
2686 int i;
6536 2687
6537 memcpy(mask, ahp->ah_bssidmask, ETH_ALEN); 2688 if (setChip) {
6538} 2689 if ((REG_READ(ah, AR_RTC_STATUS) &
2690 AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
2691 if (ath9k_hw_set_reset_reg(ah,
2692 ATH9K_RESET_POWER_ON) != true) {
2693 return false;
2694 }
2695 }
2696 if (AR_SREV_9100(ah))
2697 REG_SET_BIT(ah, AR_RTC_RESET,
2698 AR_RTC_RESET_EN);
6539 2699
6540bool 2700 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
6541ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask) 2701 AR_RTC_FORCE_WAKE_EN);
6542{ 2702 udelay(50);
6543 struct ath_hal_5416 *ahp = AH5416(ah);
6544 2703
6545 memcpy(ahp->ah_bssidmask, mask, ETH_ALEN); 2704 for (i = POWER_UP_TIME / 50; i > 0; i--) {
2705 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
2706 if (val == AR_RTC_STATUS_ON)
2707 break;
2708 udelay(50);
2709 REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
2710 AR_RTC_FORCE_WAKE_EN);
2711 }
2712 if (i == 0) {
2713 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
2714 "Failed to wakeup in %uus\n", POWER_UP_TIME / 20);
2715 return false;
2716 }
2717 }
6546 2718
6547 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask)); 2719 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
6548 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
6549 2720
6550 return true; 2721 return true;
6551} 2722}
6552 2723
6553void 2724bool ath9k_hw_setpower(struct ath_hal *ah,
6554ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid, 2725 enum ath9k_power_mode mode)
6555 u16 assocId)
6556{ 2726{
6557 struct ath_hal_5416 *ahp = AH5416(ah); 2727 struct ath_hal_5416 *ahp = AH5416(ah);
2728 static const char *modes[] = {
2729 "AWAKE",
2730 "FULL-SLEEP",
2731 "NETWORK SLEEP",
2732 "UNDEFINED"
2733 };
2734 int status = true, setChip = true;
6558 2735
6559 memcpy(ahp->ah_bssid, bssid, ETH_ALEN); 2736 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, "%s -> %s (%s)\n",
6560 ahp->ah_assocId = assocId; 2737 modes[ahp->ah_powerMode], modes[mode],
6561 2738 setChip ? "set chip " : "");
6562 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid));
6563 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) |
6564 ((assocId & 0x3fff) << AR_BSS_ID1_AID_S));
6565}
6566
6567u64 ath9k_hw_gettsf64(struct ath_hal *ah)
6568{
6569 u64 tsf;
6570
6571 tsf = REG_READ(ah, AR_TSF_U32);
6572 tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
6573 return tsf;
6574}
6575
6576void ath9k_hw_reset_tsf(struct ath_hal *ah)
6577{
6578 int count;
6579 2739
6580 count = 0; 2740 switch (mode) {
6581 while (REG_READ(ah, AR_SLP32_MODE) & AR_SLP32_TSF_WRITE_STATUS) { 2741 case ATH9K_PM_AWAKE:
6582 count++; 2742 status = ath9k_hw_set_power_awake(ah, setChip);
6583 if (count > 10) { 2743 break;
6584 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 2744 case ATH9K_PM_FULL_SLEEP:
6585 "%s: AR_SLP32_TSF_WRITE_STATUS limit exceeded\n", 2745 ath9k_set_power_sleep(ah, setChip);
6586 __func__); 2746 ahp->ah_chipFullSleep = true;
6587 break; 2747 break;
6588 } 2748 case ATH9K_PM_NETWORK_SLEEP:
6589 udelay(10); 2749 ath9k_set_power_network_sleep(ah, setChip);
2750 break;
2751 default:
2752 DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT,
2753 "Unknown power mode %u\n", mode);
2754 return false;
6590 } 2755 }
6591 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); 2756 ahp->ah_powerMode = mode;
6592}
6593
6594u32 ath9k_hw_getdefantenna(struct ath_hal *ah)
6595{
6596 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
6597}
6598 2757
6599void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna) 2758 return status;
6600{
6601 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
6602} 2759}
6603 2760
6604bool 2761void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore)
6605ath9k_hw_setantennaswitch(struct ath_hal *ah,
6606 enum ath9k_ant_setting settings,
6607 struct ath9k_channel *chan,
6608 u8 *tx_chainmask,
6609 u8 *rx_chainmask,
6610 u8 *antenna_cfgd)
6611{ 2762{
6612 struct ath_hal_5416 *ahp = AH5416(ah); 2763 struct ath_hal_5416 *ahp = AH5416(ah);
6613 static u8 tx_chainmask_cfg, rx_chainmask_cfg; 2764 u8 i;
6614 2765
6615 if (AR_SREV_9280(ah)) { 2766 if (ah->ah_isPciExpress != true)
6616 if (!tx_chainmask_cfg) { 2767 return;
6617 2768
6618 tx_chainmask_cfg = *tx_chainmask; 2769 if (ah->ah_config.pcie_powersave_enable == 2)
6619 rx_chainmask_cfg = *rx_chainmask; 2770 return;
6620 }
6621 2771
6622 switch (settings) { 2772 if (restore)
6623 case ATH9K_ANT_FIXED_A: 2773 return;
6624 *tx_chainmask = ATH9K_ANTENNA0_CHAINMASK; 2774
6625 *rx_chainmask = ATH9K_ANTENNA0_CHAINMASK; 2775 if (AR_SREV_9280_20_OR_LATER(ah)) {
6626 *antenna_cfgd = true; 2776 for (i = 0; i < ahp->ah_iniPcieSerdes.ia_rows; i++) {
6627 break; 2777 REG_WRITE(ah, INI_RA(&ahp->ah_iniPcieSerdes, i, 0),
6628 case ATH9K_ANT_FIXED_B: 2778 INI_RA(&ahp->ah_iniPcieSerdes, i, 1));
6629 if (ah->ah_caps.tx_chainmask >
6630 ATH9K_ANTENNA1_CHAINMASK) {
6631 *tx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
6632 }
6633 *rx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
6634 *antenna_cfgd = true;
6635 break;
6636 case ATH9K_ANT_VARIABLE:
6637 *tx_chainmask = tx_chainmask_cfg;
6638 *rx_chainmask = rx_chainmask_cfg;
6639 *antenna_cfgd = true;
6640 break;
6641 default:
6642 break;
6643 } 2779 }
6644 } else { 2780 udelay(1000);
6645 ahp->ah_diversityControl = settings; 2781 } else if (AR_SREV_9280(ah) &&
6646 } 2782 (ah->ah_macRev == AR_SREV_REVISION_9280_10)) {
2783 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
2784 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
6647 2785
6648 return true; 2786 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
6649} 2787 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
2788 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
6650 2789
6651void ath9k_hw_setopmode(struct ath_hal *ah) 2790 if (ah->ah_config.pcie_clock_req)
6652{ 2791 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
6653 ath9k_hw_set_operating_mode(ah, ah->ah_opmode); 2792 else
6654} 2793 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
6655 2794
6656bool 2795 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
6657ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type, 2796 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
6658 u32 capability, u32 *result) 2797 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
6659{
6660 struct ath_hal_5416 *ahp = AH5416(ah);
6661 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6662 2798
6663 switch (type) { 2799 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
6664 case ATH9K_CAP_CIPHER:
6665 switch (capability) {
6666 case ATH9K_CIPHER_AES_CCM:
6667 case ATH9K_CIPHER_AES_OCB:
6668 case ATH9K_CIPHER_TKIP:
6669 case ATH9K_CIPHER_WEP:
6670 case ATH9K_CIPHER_MIC:
6671 case ATH9K_CIPHER_CLR:
6672 return true;
6673 default:
6674 return false;
6675 }
6676 case ATH9K_CAP_TKIP_MIC:
6677 switch (capability) {
6678 case 0:
6679 return true;
6680 case 1:
6681 return (ahp->ah_staId1Defaults &
6682 AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
6683 false;
6684 }
6685 case ATH9K_CAP_TKIP_SPLIT:
6686 return (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) ?
6687 false : true;
6688 case ATH9K_CAP_WME_TKIPMIC:
6689 return 0;
6690 case ATH9K_CAP_PHYCOUNTERS:
6691 return ahp->ah_hasHwPhyCounters ? 0 : -ENXIO;
6692 case ATH9K_CAP_DIVERSITY:
6693 return (REG_READ(ah, AR_PHY_CCK_DETECT) &
6694 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
6695 true : false;
6696 case ATH9K_CAP_PHYDIAG:
6697 return true;
6698 case ATH9K_CAP_MCAST_KEYSRCH:
6699 switch (capability) {
6700 case 0:
6701 return true;
6702 case 1:
6703 if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
6704 return false;
6705 } else {
6706 return (ahp->ah_staId1Defaults &
6707 AR_STA_ID1_MCAST_KSRCH) ? true :
6708 false;
6709 }
6710 }
6711 return false;
6712 case ATH9K_CAP_TSF_ADJUST:
6713 return (ahp->ah_miscMode & AR_PCU_TX_ADD_TSF) ?
6714 true : false;
6715 case ATH9K_CAP_RFSILENT:
6716 if (capability == 3)
6717 return false;
6718 case ATH9K_CAP_ANT_CFG_2GHZ:
6719 *result = pCap->num_antcfg_2ghz;
6720 return true;
6721 case ATH9K_CAP_ANT_CFG_5GHZ:
6722 *result = pCap->num_antcfg_5ghz;
6723 return true;
6724 case ATH9K_CAP_TXPOW:
6725 switch (capability) {
6726 case 0:
6727 return 0;
6728 case 1:
6729 *result = ah->ah_powerLimit;
6730 return 0;
6731 case 2:
6732 *result = ah->ah_maxPowerLevel;
6733 return 0;
6734 case 3:
6735 *result = ah->ah_tpScale;
6736 return 0;
6737 }
6738 return false;
6739 default:
6740 return false;
6741 }
6742}
6743 2800
6744int 2801 udelay(1000);
6745ath9k_hw_select_antconfig(struct ath_hal *ah, u32 cfg) 2802 } else {
6746{ 2803 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
6747 struct ath_hal_5416 *ahp = AH5416(ah); 2804 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
6748 struct ath9k_channel *chan = ah->ah_curchan; 2805 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
6749 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 2806 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
6750 u16 ant_config; 2807 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
6751 u32 halNumAntConfig; 2808 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
2809 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
2810 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
2811 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
2812 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
2813 }
6752 2814
6753 halNumAntConfig = 2815 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
6754 IS_CHAN_2GHZ(chan) ? pCap->num_antcfg_2ghz : pCap->
6755 num_antcfg_5ghz;
6756 2816
6757 if (cfg < halNumAntConfig) { 2817 if (ah->ah_config.pcie_waen) {
6758 if (!ath9k_hw_get_eeprom_antenna_cfg(ahp, chan, 2818 REG_WRITE(ah, AR_WA, ah->ah_config.pcie_waen);
6759 cfg, &ant_config)) { 2819 } else {
6760 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config); 2820 if (AR_SREV_9285(ah))
6761 return 0; 2821 REG_WRITE(ah, AR_WA, AR9285_WA_DEFAULT);
6762 } 2822 else if (AR_SREV_9280(ah))
2823 REG_WRITE(ah, AR_WA, AR9280_WA_DEFAULT);
2824 else
2825 REG_WRITE(ah, AR_WA, AR_WA_DEFAULT);
6763 } 2826 }
6764 2827
6765 return -EINVAL;
6766} 2828}
6767 2829
2830/**********************/
2831/* Interrupt Handling */
2832/**********************/
2833
6768bool ath9k_hw_intrpend(struct ath_hal *ah) 2834bool ath9k_hw_intrpend(struct ath_hal *ah)
6769{ 2835{
6770 u32 host_isr; 2836 u32 host_isr;
@@ -6791,6 +2857,7 @@ bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
6791 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 2857 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6792 u32 sync_cause = 0; 2858 u32 sync_cause = 0;
6793 bool fatal_int = false; 2859 bool fatal_int = false;
2860 struct ath_hal_5416 *ahp = AH5416(ah);
6794 2861
6795 if (!AR_SREV_9100(ah)) { 2862 if (!AR_SREV_9100(ah)) {
6796 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { 2863 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
@@ -6800,9 +2867,8 @@ bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
6800 } 2867 }
6801 } 2868 }
6802 2869
6803 sync_cause = 2870 sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
6804 REG_READ(ah, 2871 AR_INTR_SYNC_DEFAULT;
6805 AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
6806 2872
6807 *masked = 0; 2873 *masked = 0;
6808 2874
@@ -6814,8 +2880,6 @@ bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
6814 } 2880 }
6815 2881
6816 if (isr) { 2882 if (isr) {
6817 struct ath_hal_5416 *ahp = AH5416(ah);
6818
6819 if (isr & AR_ISR_BCNMISC) { 2883 if (isr & AR_ISR_BCNMISC) {
6820 u32 isr2; 2884 u32 isr2;
6821 isr2 = REG_READ(ah, AR_ISR_S2); 2885 isr2 = REG_READ(ah, AR_ISR_S2);
@@ -6842,7 +2906,6 @@ bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
6842 *masked = isr & ATH9K_INT_COMMON; 2906 *masked = isr & ATH9K_INT_COMMON;
6843 2907
6844 if (ahp->ah_intrMitigation) { 2908 if (ahp->ah_intrMitigation) {
6845
6846 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) 2909 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
6847 *masked |= ATH9K_INT_RX; 2910 *masked |= ATH9K_INT_RX;
6848 } 2911 }
@@ -6867,8 +2930,7 @@ bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
6867 2930
6868 if (isr & AR_ISR_RXORN) { 2931 if (isr & AR_ISR_RXORN) {
6869 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 2932 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6870 "%s: receive FIFO overrun interrupt\n", 2933 "receive FIFO overrun interrupt\n");
6871 __func__);
6872 } 2934 }
6873 2935
6874 if (!AR_SREV_9100(ah)) { 2936 if (!AR_SREV_9100(ah)) {
@@ -6881,8 +2943,10 @@ bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
6881 2943
6882 *masked |= mask2; 2944 *masked |= mask2;
6883 } 2945 }
2946
6884 if (AR_SREV_9100(ah)) 2947 if (AR_SREV_9100(ah))
6885 return true; 2948 return true;
2949
6886 if (sync_cause) { 2950 if (sync_cause) {
6887 fatal_int = 2951 fatal_int =
6888 (sync_cause & 2952 (sync_cause &
@@ -6892,32 +2956,29 @@ bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked)
6892 if (fatal_int) { 2956 if (fatal_int) {
6893 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) { 2957 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
6894 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2958 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
6895 "%s: received PCI FATAL interrupt\n", 2959 "received PCI FATAL interrupt\n");
6896 __func__);
6897 } 2960 }
6898 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) { 2961 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
6899 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2962 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
6900 "%s: received PCI PERR interrupt\n", 2963 "received PCI PERR interrupt\n");
6901 __func__);
6902 } 2964 }
6903 } 2965 }
6904 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { 2966 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
6905 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 2967 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6906 "%s: AR_INTR_SYNC_RADM_CPL_TIMEOUT\n", 2968 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
6907 __func__);
6908 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); 2969 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
6909 REG_WRITE(ah, AR_RC, 0); 2970 REG_WRITE(ah, AR_RC, 0);
6910 *masked |= ATH9K_INT_FATAL; 2971 *masked |= ATH9K_INT_FATAL;
6911 } 2972 }
6912 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) { 2973 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
6913 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 2974 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
6914 "%s: AR_INTR_SYNC_LOCAL_TIMEOUT\n", 2975 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
6915 __func__);
6916 } 2976 }
6917 2977
6918 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); 2978 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
6919 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR); 2979 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
6920 } 2980 }
2981
6921 return true; 2982 return true;
6922} 2983}
6923 2984
@@ -6933,12 +2994,10 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints)
6933 u32 mask, mask2; 2994 u32 mask, mask2;
6934 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 2995 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
6935 2996
6936 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: 0x%x => 0x%x\n", __func__, 2997 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
6937 omask, ints);
6938 2998
6939 if (omask & ATH9K_INT_GLOBAL) { 2999 if (omask & ATH9K_INT_GLOBAL) {
6940 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: disable IER\n", 3000 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "disable IER\n");
6941 __func__);
6942 REG_WRITE(ah, AR_IER, AR_IER_DISABLE); 3001 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
6943 (void) REG_READ(ah, AR_IER); 3002 (void) REG_READ(ah, AR_IER);
6944 if (!AR_SREV_9100(ah)) { 3003 if (!AR_SREV_9100(ah)) {
@@ -6993,8 +3052,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints)
6993 mask2 |= AR_IMR_S2_CST; 3052 mask2 |= AR_IMR_S2_CST;
6994 } 3053 }
6995 3054
6996 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: new IMR 0x%x\n", __func__, 3055 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
6997 mask);
6998 REG_WRITE(ah, AR_IMR, mask); 3056 REG_WRITE(ah, AR_IMR, mask);
6999 mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM | 3057 mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
7000 AR_IMR_S2_DTIM | 3058 AR_IMR_S2_DTIM |
@@ -7014,8 +3072,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints)
7014 } 3072 }
7015 3073
7016 if (ints & ATH9K_INT_GLOBAL) { 3074 if (ints & ATH9K_INT_GLOBAL) {
7017 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: enable IER\n", 3075 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "enable IER\n");
7018 __func__);
7019 REG_WRITE(ah, AR_IER, AR_IER_ENABLE); 3076 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
7020 if (!AR_SREV_9100(ah)) { 3077 if (!AR_SREV_9100(ah)) {
7021 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 3078 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
@@ -7035,9 +3092,11 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints)
7035 return omask; 3092 return omask;
7036} 3093}
7037 3094
7038void 3095/*******************/
7039ath9k_hw_beaconinit(struct ath_hal *ah, 3096/* Beacon Handling */
7040 u32 next_beacon, u32 beacon_period) 3097/*******************/
3098
3099void ath9k_hw_beaconinit(struct ath_hal *ah, u32 next_beacon, u32 beacon_period)
7041{ 3100{
7042 struct ath_hal_5416 *ahp = AH5416(ah); 3101 struct ath_hal_5416 *ahp = AH5416(ah);
7043 int flags = 0; 3102 int flags = 0;
@@ -7045,14 +3104,14 @@ ath9k_hw_beaconinit(struct ath_hal *ah,
7045 ahp->ah_beaconInterval = beacon_period; 3104 ahp->ah_beaconInterval = beacon_period;
7046 3105
7047 switch (ah->ah_opmode) { 3106 switch (ah->ah_opmode) {
7048 case ATH9K_M_STA: 3107 case NL80211_IFTYPE_STATION:
7049 case ATH9K_M_MONITOR: 3108 case NL80211_IFTYPE_MONITOR:
7050 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); 3109 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
7051 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); 3110 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
7052 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); 3111 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
7053 flags |= AR_TBTT_TIMER_EN; 3112 flags |= AR_TBTT_TIMER_EN;
7054 break; 3113 break;
7055 case ATH9K_M_IBSS: 3114 case NL80211_IFTYPE_ADHOC:
7056 REG_SET_BIT(ah, AR_TXCFG, 3115 REG_SET_BIT(ah, AR_TXCFG,
7057 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); 3116 AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
7058 REG_WRITE(ah, AR_NEXT_NDP_TIMER, 3117 REG_WRITE(ah, AR_NEXT_NDP_TIMER,
@@ -7060,7 +3119,7 @@ ath9k_hw_beaconinit(struct ath_hal *ah,
7060 (ahp->ah_atimWindow ? ahp-> 3119 (ahp->ah_atimWindow ? ahp->
7061 ah_atimWindow : 1))); 3120 ah_atimWindow : 1)));
7062 flags |= AR_NDP_TIMER_EN; 3121 flags |= AR_NDP_TIMER_EN;
7063 case ATH9K_M_HOSTAP: 3122 case NL80211_IFTYPE_AP:
7064 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); 3123 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
7065 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 3124 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT,
7066 TU_TO_USEC(next_beacon - 3125 TU_TO_USEC(next_beacon -
@@ -7073,6 +3132,12 @@ ath9k_hw_beaconinit(struct ath_hal *ah,
7073 flags |= 3132 flags |=
7074 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; 3133 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
7075 break; 3134 break;
3135 default:
3136 DPRINTF(ah->ah_sc, ATH_DBG_BEACON,
3137 "%s: unsupported opmode: %d\n",
3138 __func__, ah->ah_opmode);
3139 return;
3140 break;
7076 } 3141 }
7077 3142
7078 REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period)); 3143 REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period));
@@ -7089,9 +3154,8 @@ ath9k_hw_beaconinit(struct ath_hal *ah,
7089 REG_SET_BIT(ah, AR_TIMER_MODE, flags); 3154 REG_SET_BIT(ah, AR_TIMER_MODE, flags);
7090} 3155}
7091 3156
7092void 3157void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
7093ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah, 3158 const struct ath9k_beacon_state *bs)
7094 const struct ath9k_beacon_state *bs)
7095{ 3159{
7096 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout; 3160 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
7097 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 3161 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
@@ -7120,14 +3184,10 @@ ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
7120 else 3184 else
7121 nextTbtt = bs->bs_nexttbtt; 3185 nextTbtt = bs->bs_nexttbtt;
7122 3186
7123 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next DTIM %d\n", __func__, 3187 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
7124 bs->bs_nextdtim); 3188 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
7125 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next beacon %d\n", __func__, 3189 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
7126 nextTbtt); 3190 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
7127 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: beacon period %d\n", __func__,
7128 beaconintval);
7129 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: DTIM period %d\n", __func__,
7130 dtimperiod);
7131 3191
7132 REG_WRITE(ah, AR_NEXT_DTIM, 3192 REG_WRITE(ah, AR_NEXT_DTIM,
7133 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP)); 3193 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
@@ -7154,1424 +3214,682 @@ ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah,
7154 3214
7155} 3215}
7156 3216
7157bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry) 3217/*******************/
7158{ 3218/* HW Capabilities */
7159 if (entry < ah->ah_caps.keycache_size) { 3219/*******************/
7160 u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry));
7161 if (val & AR_KEYTABLE_VALID)
7162 return true;
7163 }
7164 return false;
7165}
7166 3220
7167bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry) 3221bool ath9k_hw_fill_cap_info(struct ath_hal *ah)
7168{ 3222{
7169 u32 keyType; 3223 struct ath_hal_5416 *ahp = AH5416(ah);
3224 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3225 u16 capField = 0, eeval;
7170 3226
7171 if (entry >= ah->ah_caps.keycache_size) { 3227 eeval = ath9k_hw_get_eeprom(ah, EEP_REG_0);
7172 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7173 "%s: entry %u out of range\n", __func__, entry);
7174 return false;
7175 }
7176 keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry));
7177 3228
7178 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0); 3229 ah->ah_currentRD = eeval;
7179 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0);
7180 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0);
7181 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0);
7182 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0);
7183 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR);
7184 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0);
7185 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0);
7186 3230
7187 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) { 3231 eeval = ath9k_hw_get_eeprom(ah, EEP_REG_1);
7188 u16 micentry = entry + 64; 3232 ah->ah_currentRDExt = eeval;
7189 3233
7190 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0); 3234 capField = ath9k_hw_get_eeprom(ah, EEP_OP_CAP);
7191 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
7192 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
7193 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
7194 3235
3236 if (ah->ah_opmode != NL80211_IFTYPE_AP &&
3237 ah->ah_subvendorid == AR_SUBVENDOR_ID_NEW_A) {
3238 if (ah->ah_currentRD == 0x64 || ah->ah_currentRD == 0x65)
3239 ah->ah_currentRD += 5;
3240 else if (ah->ah_currentRD == 0x41)
3241 ah->ah_currentRD = 0x43;
3242 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
3243 "regdomain mapped to 0x%x\n", ah->ah_currentRD);
7195 } 3244 }
7196 3245
7197 if (ah->ah_curchan == NULL) 3246 eeval = ath9k_hw_get_eeprom(ah, EEP_OP_MODE);
7198 return true; 3247 bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX);
7199
7200 return true;
7201}
7202
7203bool
7204ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry,
7205 const u8 *mac)
7206{
7207 u32 macHi, macLo;
7208
7209 if (entry >= ah->ah_caps.keycache_size) {
7210 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7211 "%s: entry %u out of range\n", __func__, entry);
7212 return false;
7213 }
7214 3248
7215 if (mac != NULL) { 3249 if (eeval & AR5416_OPFLAGS_11A) {
7216 macHi = (mac[5] << 8) | mac[4]; 3250 set_bit(ATH9K_MODE_11A, pCap->wireless_modes);
7217 macLo = (mac[3] << 24) | (mac[2] << 16) 3251 if (ah->ah_config.ht_enable) {
7218 | (mac[1] << 8) | mac[0]; 3252 if (!(eeval & AR5416_OPFLAGS_N_5G_HT20))
7219 macLo >>= 1; 3253 set_bit(ATH9K_MODE_11NA_HT20,
7220 macLo |= (macHi & 1) << 31; 3254 pCap->wireless_modes);
7221 macHi >>= 1; 3255 if (!(eeval & AR5416_OPFLAGS_N_5G_HT40)) {
7222 } else { 3256 set_bit(ATH9K_MODE_11NA_HT40PLUS,
7223 macLo = macHi = 0; 3257 pCap->wireless_modes);
3258 set_bit(ATH9K_MODE_11NA_HT40MINUS,
3259 pCap->wireless_modes);
3260 }
3261 }
7224 } 3262 }
7225 REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo);
7226 REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID);
7227 3263
7228 return true; 3264 if (eeval & AR5416_OPFLAGS_11G) {
7229} 3265 set_bit(ATH9K_MODE_11B, pCap->wireless_modes);
7230 3266 set_bit(ATH9K_MODE_11G, pCap->wireless_modes);
7231bool 3267 if (ah->ah_config.ht_enable) {
7232ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry, 3268 if (!(eeval & AR5416_OPFLAGS_N_2G_HT20))
7233 const struct ath9k_keyval *k, 3269 set_bit(ATH9K_MODE_11NG_HT20,
7234 const u8 *mac, int xorKey) 3270 pCap->wireless_modes);
7235{ 3271 if (!(eeval & AR5416_OPFLAGS_N_2G_HT40)) {
7236 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 3272 set_bit(ATH9K_MODE_11NG_HT40PLUS,
7237 u32 key0, key1, key2, key3, key4; 3273 pCap->wireless_modes);
7238 u32 keyType; 3274 set_bit(ATH9K_MODE_11NG_HT40MINUS,
7239 u32 xorMask = xorKey ? 3275 pCap->wireless_modes);
7240 (ATH9K_KEY_XOR << 24 | ATH9K_KEY_XOR << 16 | ATH9K_KEY_XOR << 8 3276 }
7241 | ATH9K_KEY_XOR) : 0;
7242 struct ath_hal_5416 *ahp = AH5416(ah);
7243
7244 if (entry >= pCap->keycache_size) {
7245 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7246 "%s: entry %u out of range\n", __func__, entry);
7247 return false;
7248 }
7249 switch (k->kv_type) {
7250 case ATH9K_CIPHER_AES_OCB:
7251 keyType = AR_KEYTABLE_TYPE_AES;
7252 break;
7253 case ATH9K_CIPHER_AES_CCM:
7254 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
7255 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7256 "%s: AES-CCM not supported by "
7257 "mac rev 0x%x\n", __func__,
7258 ah->ah_macRev);
7259 return false;
7260 }
7261 keyType = AR_KEYTABLE_TYPE_CCM;
7262 break;
7263 case ATH9K_CIPHER_TKIP:
7264 keyType = AR_KEYTABLE_TYPE_TKIP;
7265 if (ATH9K_IS_MIC_ENABLED(ah)
7266 && entry + 64 >= pCap->keycache_size) {
7267 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7268 "%s: entry %u inappropriate for TKIP\n",
7269 __func__, entry);
7270 return false;
7271 }
7272 break;
7273 case ATH9K_CIPHER_WEP:
7274 if (k->kv_len < LEN_WEP40) {
7275 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7276 "%s: WEP key length %u too small\n",
7277 __func__, k->kv_len);
7278 return false;
7279 } 3277 }
7280 if (k->kv_len <= LEN_WEP40)
7281 keyType = AR_KEYTABLE_TYPE_40;
7282 else if (k->kv_len <= LEN_WEP104)
7283 keyType = AR_KEYTABLE_TYPE_104;
7284 else
7285 keyType = AR_KEYTABLE_TYPE_128;
7286 break;
7287 case ATH9K_CIPHER_CLR:
7288 keyType = AR_KEYTABLE_TYPE_CLR;
7289 break;
7290 default:
7291 DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE,
7292 "%s: cipher %u not supported\n", __func__,
7293 k->kv_type);
7294 return false;
7295 } 3278 }
7296 3279
7297 key0 = get_unaligned_le32(k->kv_val + 0) ^ xorMask; 3280 pCap->tx_chainmask = ath9k_hw_get_eeprom(ah, EEP_TX_MASK);
7298 key1 = (get_unaligned_le16(k->kv_val + 4) ^ xorMask) & 0xffff; 3281 if ((ah->ah_isPciExpress)
7299 key2 = get_unaligned_le32(k->kv_val + 6) ^ xorMask; 3282 || (eeval & AR5416_OPFLAGS_11A)) {
7300 key3 = (get_unaligned_le16(k->kv_val + 10) ^ xorMask) & 0xffff; 3283 pCap->rx_chainmask =
7301 key4 = get_unaligned_le32(k->kv_val + 12) ^ xorMask; 3284 ath9k_hw_get_eeprom(ah, EEP_RX_MASK);
7302 if (k->kv_len <= LEN_WEP104)
7303 key4 &= 0xff;
7304
7305 if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) {
7306 u16 micentry = entry + 64;
7307
7308 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0);
7309 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1);
7310 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
7311 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
7312 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
7313 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
7314 (void) ath9k_hw_keysetmac(ah, entry, mac);
7315
7316 if (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) {
7317 u32 mic0, mic1, mic2, mic3, mic4;
7318
7319 mic0 = get_unaligned_le32(k->kv_mic + 0);
7320 mic2 = get_unaligned_le32(k->kv_mic + 4);
7321 mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff;
7322 mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff;
7323 mic4 = get_unaligned_le32(k->kv_txmic + 4);
7324 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
7325 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1);
7326 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
7327 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3);
7328 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4);
7329 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
7330 AR_KEYTABLE_TYPE_CLR);
7331
7332 } else {
7333 u32 mic0, mic2;
7334
7335 mic0 = get_unaligned_le32(k->kv_mic + 0);
7336 mic2 = get_unaligned_le32(k->kv_mic + 4);
7337 REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0);
7338 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
7339 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2);
7340 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
7341 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
7342 REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry),
7343 AR_KEYTABLE_TYPE_CLR);
7344 }
7345 REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0);
7346 REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0);
7347 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0);
7348 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1);
7349 } else { 3285 } else {
7350 REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); 3286 pCap->rx_chainmask =
7351 REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); 3287 (ath9k_hw_gpio_get(ah, 0)) ? 0x5 : 0x7;
7352 REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2);
7353 REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3);
7354 REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4);
7355 REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType);
7356
7357 (void) ath9k_hw_keysetmac(ah, entry, mac);
7358 } 3288 }
7359 3289
7360 if (ah->ah_curchan == NULL) 3290 if (!(AR_SREV_9280(ah) && (ah->ah_macRev == 0)))
7361 return true; 3291 ahp->ah_miscMode |= AR_PCU_MIC_NEW_LOC_ENA;
7362
7363 return true;
7364}
7365
7366bool
7367ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel)
7368{
7369 struct ath_hal_5416 *ahp = AH5416(ah);
7370 u32 txcfg, curLevel, newLevel;
7371 enum ath9k_int omask;
7372
7373 if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD)
7374 return false;
7375
7376 omask = ath9k_hw_set_interrupts(ah,
7377 ahp->ah_maskReg & ~ATH9K_INT_GLOBAL);
7378
7379 txcfg = REG_READ(ah, AR_TXCFG);
7380 curLevel = MS(txcfg, AR_FTRIG);
7381 newLevel = curLevel;
7382 if (bIncTrigLevel) {
7383 if (curLevel < MAX_TX_FIFO_THRESHOLD)
7384 newLevel++;
7385 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
7386 newLevel--;
7387 if (newLevel != curLevel)
7388 REG_WRITE(ah, AR_TXCFG,
7389 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
7390 3292
7391 ath9k_hw_set_interrupts(ah, omask); 3293 pCap->low_2ghz_chan = 2312;
3294 pCap->high_2ghz_chan = 2732;
7392 3295
7393 ah->ah_txTrigLevel = newLevel; 3296 pCap->low_5ghz_chan = 4920;
3297 pCap->high_5ghz_chan = 6100;
7394 3298
7395 return newLevel != curLevel; 3299 pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP;
7396} 3300 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP;
3301 pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM;
7397 3302
7398bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q, 3303 pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP;
7399 const struct ath9k_tx_queue_info *qinfo) 3304 pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP;
7400{ 3305 pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM;
7401 u32 cw;
7402 struct ath_hal_5416 *ahp = AH5416(ah);
7403 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7404 struct ath9k_tx_queue_info *qi;
7405 3306
7406 if (q >= pCap->total_queues) { 3307 pCap->hw_caps |= ATH9K_HW_CAP_CHAN_SPREAD;
7407 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
7408 __func__, q);
7409 return false;
7410 }
7411 3308
7412 qi = &ahp->ah_txq[q]; 3309 if (ah->ah_config.ht_enable)
7413 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 3310 pCap->hw_caps |= ATH9K_HW_CAP_HT;
7414 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n", 3311 else
7415 __func__); 3312 pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
7416 return false;
7417 }
7418 3313
7419 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %p\n", __func__, qi); 3314 pCap->hw_caps |= ATH9K_HW_CAP_GTT;
3315 pCap->hw_caps |= ATH9K_HW_CAP_VEOL;
3316 pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK;
3317 pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH;
7420 3318
7421 qi->tqi_ver = qinfo->tqi_ver; 3319 if (capField & AR_EEPROM_EEPCAP_MAXQCU)
7422 qi->tqi_subtype = qinfo->tqi_subtype; 3320 pCap->total_queues =
7423 qi->tqi_qflags = qinfo->tqi_qflags; 3321 MS(capField, AR_EEPROM_EEPCAP_MAXQCU);
7424 qi->tqi_priority = qinfo->tqi_priority;
7425 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
7426 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
7427 else 3322 else
7428 qi->tqi_aifs = INIT_AIFS; 3323 pCap->total_queues = ATH9K_NUM_TX_QUEUES;
7429 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
7430 cw = min(qinfo->tqi_cwmin, 1024U);
7431 qi->tqi_cwmin = 1;
7432 while (qi->tqi_cwmin < cw)
7433 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
7434 } else
7435 qi->tqi_cwmin = qinfo->tqi_cwmin;
7436 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
7437 cw = min(qinfo->tqi_cwmax, 1024U);
7438 qi->tqi_cwmax = 1;
7439 while (qi->tqi_cwmax < cw)
7440 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
7441 } else
7442 qi->tqi_cwmax = INIT_CWMAX;
7443 3324
7444 if (qinfo->tqi_shretry != 0) 3325 if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES)
7445 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U); 3326 pCap->keycache_size =
7446 else 3327 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES);
7447 qi->tqi_shretry = INIT_SH_RETRY;
7448 if (qinfo->tqi_lgretry != 0)
7449 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
7450 else 3328 else
7451 qi->tqi_lgretry = INIT_LG_RETRY; 3329 pCap->keycache_size = AR_KEYTABLE_SIZE;
7452 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
7453 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
7454 qi->tqi_burstTime = qinfo->tqi_burstTime;
7455 qi->tqi_readyTime = qinfo->tqi_readyTime;
7456
7457 switch (qinfo->tqi_subtype) {
7458 case ATH9K_WME_UPSD:
7459 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
7460 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
7461 break;
7462 default:
7463 break;
7464 }
7465 return true;
7466}
7467 3330
7468bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q, 3331 pCap->hw_caps |= ATH9K_HW_CAP_FASTCC;
7469 struct ath9k_tx_queue_info *qinfo) 3332 pCap->num_mr_retries = 4;
7470{ 3333 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
7471 struct ath_hal_5416 *ahp = AH5416(ah);
7472 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7473 struct ath9k_tx_queue_info *qi;
7474 3334
7475 if (q >= pCap->total_queues) { 3335 if (AR_SREV_9280_10_OR_LATER(ah))
7476 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n", 3336 pCap->num_gpio_pins = AR928X_NUM_GPIO;
7477 __func__, q); 3337 else
7478 return false; 3338 pCap->num_gpio_pins = AR_NUM_GPIO;
7479 }
7480 3339
7481 qi = &ahp->ah_txq[q]; 3340 if (AR_SREV_9280_10_OR_LATER(ah)) {
7482 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 3341 pCap->hw_caps |= ATH9K_HW_CAP_WOW;
7483 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n", 3342 pCap->hw_caps |= ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
7484 __func__); 3343 } else {
7485 return false; 3344 pCap->hw_caps &= ~ATH9K_HW_CAP_WOW;
3345 pCap->hw_caps &= ~ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT;
7486 } 3346 }
7487 3347
7488 qinfo->tqi_qflags = qi->tqi_qflags; 3348 if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
7489 qinfo->tqi_ver = qi->tqi_ver; 3349 pCap->hw_caps |= ATH9K_HW_CAP_CST;
7490 qinfo->tqi_subtype = qi->tqi_subtype; 3350 pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
7491 qinfo->tqi_qflags = qi->tqi_qflags; 3351 } else {
7492 qinfo->tqi_priority = qi->tqi_priority; 3352 pCap->rts_aggr_limit = (8 * 1024);
7493 qinfo->tqi_aifs = qi->tqi_aifs; 3353 }
7494 qinfo->tqi_cwmin = qi->tqi_cwmin;
7495 qinfo->tqi_cwmax = qi->tqi_cwmax;
7496 qinfo->tqi_shretry = qi->tqi_shretry;
7497 qinfo->tqi_lgretry = qi->tqi_lgretry;
7498 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
7499 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
7500 qinfo->tqi_burstTime = qi->tqi_burstTime;
7501 qinfo->tqi_readyTime = qi->tqi_readyTime;
7502 3354
7503 return true; 3355 pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM;
7504}
7505 3356
7506int 3357#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
7507ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type, 3358 ah->ah_rfsilent = ath9k_hw_get_eeprom(ah, EEP_RF_SILENT);
7508 const struct ath9k_tx_queue_info *qinfo) 3359 if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) {
7509{ 3360 ah->ah_rfkill_gpio =
7510 struct ath_hal_5416 *ahp = AH5416(ah); 3361 MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL);
7511 struct ath9k_tx_queue_info *qi; 3362 ah->ah_rfkill_polarity =
7512 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 3363 MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY);
7513 int q;
7514 3364
7515 switch (type) { 3365 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
7516 case ATH9K_TX_QUEUE_BEACON:
7517 q = pCap->total_queues - 1;
7518 break;
7519 case ATH9K_TX_QUEUE_CAB:
7520 q = pCap->total_queues - 2;
7521 break;
7522 case ATH9K_TX_QUEUE_PSPOLL:
7523 q = 1;
7524 break;
7525 case ATH9K_TX_QUEUE_UAPSD:
7526 q = pCap->total_queues - 3;
7527 break;
7528 case ATH9K_TX_QUEUE_DATA:
7529 for (q = 0; q < pCap->total_queues; q++)
7530 if (ahp->ah_txq[q].tqi_type ==
7531 ATH9K_TX_QUEUE_INACTIVE)
7532 break;
7533 if (q == pCap->total_queues) {
7534 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
7535 "%s: no available tx queue\n", __func__);
7536 return -1;
7537 }
7538 break;
7539 default:
7540 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: bad tx queue type %u\n",
7541 __func__, type);
7542 return -1;
7543 } 3366 }
3367#endif
3368
3369 if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) ||
3370 (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) ||
3371 (ah->ah_macVersion == AR_SREV_VERSION_9160) ||
3372 (ah->ah_macVersion == AR_SREV_VERSION_9100) ||
3373 (ah->ah_macVersion == AR_SREV_VERSION_9280))
3374 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
3375 else
3376 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
7544 3377
7545 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q); 3378 if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
3379 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
3380 else
3381 pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
7546 3382
7547 qi = &ahp->ah_txq[q]; 3383 if (ah->ah_currentRDExt & (1 << REG_EXT_JAPAN_MIDBAND)) {
7548 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { 3384 pCap->reg_cap =
7549 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 3385 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
7550 "%s: tx queue %u already active\n", __func__, q); 3386 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN |
7551 return -1; 3387 AR_EEPROM_EEREGCAP_EN_KK_U2 |
7552 } 3388 AR_EEPROM_EEREGCAP_EN_KK_MIDBAND;
7553 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
7554 qi->tqi_type = type;
7555 if (qinfo == NULL) {
7556 qi->tqi_qflags =
7557 TXQ_FLAG_TXOKINT_ENABLE
7558 | TXQ_FLAG_TXERRINT_ENABLE
7559 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
7560 qi->tqi_aifs = INIT_AIFS;
7561 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
7562 qi->tqi_cwmax = INIT_CWMAX;
7563 qi->tqi_shretry = INIT_SH_RETRY;
7564 qi->tqi_lgretry = INIT_LG_RETRY;
7565 qi->tqi_physCompBuf = 0;
7566 } else { 3389 } else {
7567 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf; 3390 pCap->reg_cap =
7568 (void) ath9k_hw_set_txq_props(ah, q, qinfo); 3391 AR_EEPROM_EEREGCAP_EN_KK_NEW_11A |
3392 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
7569 } 3393 }
7570 3394
7571 return q; 3395 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
7572}
7573 3396
7574static void 3397 pCap->num_antcfg_5ghz =
7575ath9k_hw_set_txq_interrupts(struct ath_hal *ah, 3398 ath9k_hw_get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ);
7576 struct ath9k_tx_queue_info *qi) 3399 pCap->num_antcfg_2ghz =
7577{ 3400 ath9k_hw_get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ);
7578 struct ath_hal_5416 *ahp = AH5416(ah);
7579 3401
7580 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 3402 return true;
7581 "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
7582 __func__, ahp->ah_txOkInterruptMask,
7583 ahp->ah_txErrInterruptMask, ahp->ah_txDescInterruptMask,
7584 ahp->ah_txEolInterruptMask, ahp->ah_txUrnInterruptMask);
7585
7586 REG_WRITE(ah, AR_IMR_S0,
7587 SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
7588 | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC));
7589 REG_WRITE(ah, AR_IMR_S1,
7590 SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
7591 | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL));
7592 REG_RMW_FIELD(ah, AR_IMR_S2,
7593 AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
7594} 3403}
7595 3404
7596bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q) 3405bool ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type,
3406 u32 capability, u32 *result)
7597{ 3407{
7598 struct ath_hal_5416 *ahp = AH5416(ah); 3408 struct ath_hal_5416 *ahp = AH5416(ah);
7599 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 3409 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
7600 struct ath9k_tx_queue_info *qi;
7601 3410
7602 if (q >= pCap->total_queues) { 3411 switch (type) {
7603 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n", 3412 case ATH9K_CAP_CIPHER:
7604 __func__, q); 3413 switch (capability) {
3414 case ATH9K_CIPHER_AES_CCM:
3415 case ATH9K_CIPHER_AES_OCB:
3416 case ATH9K_CIPHER_TKIP:
3417 case ATH9K_CIPHER_WEP:
3418 case ATH9K_CIPHER_MIC:
3419 case ATH9K_CIPHER_CLR:
3420 return true;
3421 default:
3422 return false;
3423 }
3424 case ATH9K_CAP_TKIP_MIC:
3425 switch (capability) {
3426 case 0:
3427 return true;
3428 case 1:
3429 return (ahp->ah_staId1Defaults &
3430 AR_STA_ID1_CRPT_MIC_ENABLE) ? true :
3431 false;
3432 }
3433 case ATH9K_CAP_TKIP_SPLIT:
3434 return (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) ?
3435 false : true;
3436 case ATH9K_CAP_WME_TKIPMIC:
3437 return 0;
3438 case ATH9K_CAP_PHYCOUNTERS:
3439 return ahp->ah_hasHwPhyCounters ? 0 : -ENXIO;
3440 case ATH9K_CAP_DIVERSITY:
3441 return (REG_READ(ah, AR_PHY_CCK_DETECT) &
3442 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
3443 true : false;
3444 case ATH9K_CAP_PHYDIAG:
3445 return true;
3446 case ATH9K_CAP_MCAST_KEYSRCH:
3447 switch (capability) {
3448 case 0:
3449 return true;
3450 case 1:
3451 if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) {
3452 return false;
3453 } else {
3454 return (ahp->ah_staId1Defaults &
3455 AR_STA_ID1_MCAST_KSRCH) ? true :
3456 false;
3457 }
3458 }
7605 return false; 3459 return false;
7606 } 3460 case ATH9K_CAP_TSF_ADJUST:
7607 qi = &ahp->ah_txq[q]; 3461 return (ahp->ah_miscMode & AR_PCU_TX_ADD_TSF) ?
7608 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 3462 true : false;
7609 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n", 3463 case ATH9K_CAP_RFSILENT:
7610 __func__, q); 3464 if (capability == 3)
3465 return false;
3466 case ATH9K_CAP_ANT_CFG_2GHZ:
3467 *result = pCap->num_antcfg_2ghz;
3468 return true;
3469 case ATH9K_CAP_ANT_CFG_5GHZ:
3470 *result = pCap->num_antcfg_5ghz;
3471 return true;
3472 case ATH9K_CAP_TXPOW:
3473 switch (capability) {
3474 case 0:
3475 return 0;
3476 case 1:
3477 *result = ah->ah_powerLimit;
3478 return 0;
3479 case 2:
3480 *result = ah->ah_maxPowerLevel;
3481 return 0;
3482 case 3:
3483 *result = ah->ah_tpScale;
3484 return 0;
3485 }
3486 return false;
3487 default:
7611 return false; 3488 return false;
7612 } 3489 }
7613
7614 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: release queue %u\n",
7615 __func__, q);
7616
7617 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
7618 ahp->ah_txOkInterruptMask &= ~(1 << q);
7619 ahp->ah_txErrInterruptMask &= ~(1 << q);
7620 ahp->ah_txDescInterruptMask &= ~(1 << q);
7621 ahp->ah_txEolInterruptMask &= ~(1 << q);
7622 ahp->ah_txUrnInterruptMask &= ~(1 << q);
7623 ath9k_hw_set_txq_interrupts(ah, qi);
7624
7625 return true;
7626} 3490}
7627 3491
7628bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q) 3492bool ath9k_hw_setcapability(struct ath_hal *ah, enum ath9k_capability_type type,
3493 u32 capability, u32 setting, int *status)
7629{ 3494{
7630 struct ath_hal_5416 *ahp = AH5416(ah); 3495 struct ath_hal_5416 *ahp = AH5416(ah);
7631 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 3496 u32 v;
7632 struct ath9k_channel *chan = ah->ah_curchan;
7633 struct ath9k_tx_queue_info *qi;
7634 u32 cwMin, chanCwMin, value;
7635 3497
7636 if (q >= pCap->total_queues) { 3498 switch (type) {
7637 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n", 3499 case ATH9K_CAP_TKIP_MIC:
7638 __func__, q); 3500 if (setting)
7639 return false; 3501 ahp->ah_staId1Defaults |=
7640 } 3502 AR_STA_ID1_CRPT_MIC_ENABLE;
7641 qi = &ahp->ah_txq[q]; 3503 else
7642 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 3504 ahp->ah_staId1Defaults &=
7643 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n", 3505 ~AR_STA_ID1_CRPT_MIC_ENABLE;
7644 __func__, q);
7645 return true; 3506 return true;
7646 } 3507 case ATH9K_CAP_DIVERSITY:
7647 3508 v = REG_READ(ah, AR_PHY_CCK_DETECT);
7648 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: reset queue %u\n", __func__, q); 3509 if (setting)
7649 3510 v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
7650 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
7651 if (chan && IS_CHAN_B(chan))
7652 chanCwMin = INIT_CWMIN_11B;
7653 else 3511 else
7654 chanCwMin = INIT_CWMIN; 3512 v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
7655 3513 REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
7656 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1); 3514 return true;
7657 } else 3515 case ATH9K_CAP_MCAST_KEYSRCH:
7658 cwMin = qi->tqi_cwmin; 3516 if (setting)
7659 3517 ahp->ah_staId1Defaults |= AR_STA_ID1_MCAST_KSRCH;
7660 REG_WRITE(ah, AR_DLCL_IFS(q), SM(cwMin, AR_D_LCL_IFS_CWMIN) 3518 else
7661 | SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) 3519 ahp->ah_staId1Defaults &= ~AR_STA_ID1_MCAST_KSRCH;
7662 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); 3520 return true;
7663 3521 case ATH9K_CAP_TSF_ADJUST:
7664 REG_WRITE(ah, AR_DRETRY_LIMIT(q), 3522 if (setting)
7665 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) 3523 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
7666 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) 3524 else
7667 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)); 3525 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
7668 3526 return true;
7669 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
7670 REG_WRITE(ah, AR_DMISC(q),
7671 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
7672
7673 if (qi->tqi_cbrPeriod) {
7674 REG_WRITE(ah, AR_QCBRCFG(q),
7675 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL)
7676 | SM(qi->tqi_cbrOverflowLimit,
7677 AR_Q_CBRCFG_OVF_THRESH));
7678 REG_WRITE(ah, AR_QMISC(q),
7679 REG_READ(ah,
7680 AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | (qi->
7681 tqi_cbrOverflowLimit
7682 ?
7683 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN
7684 :
7685 0));
7686 }
7687 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
7688 REG_WRITE(ah, AR_QRDYTIMECFG(q),
7689 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
7690 AR_Q_RDYTIMECFG_EN);
7691 }
7692
7693 REG_WRITE(ah, AR_DCHNTIME(q),
7694 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
7695 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
7696
7697 if (qi->tqi_burstTime
7698 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
7699 REG_WRITE(ah, AR_QMISC(q),
7700 REG_READ(ah,
7701 AR_QMISC(q)) |
7702 AR_Q_MISC_RDYTIME_EXP_POLICY);
7703
7704 }
7705
7706 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
7707 REG_WRITE(ah, AR_DMISC(q),
7708 REG_READ(ah, AR_DMISC(q)) |
7709 AR_D_MISC_POST_FR_BKOFF_DIS);
7710 }
7711 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
7712 REG_WRITE(ah, AR_DMISC(q),
7713 REG_READ(ah, AR_DMISC(q)) |
7714 AR_D_MISC_FRAG_BKOFF_EN);
7715 }
7716 switch (qi->tqi_type) {
7717 case ATH9K_TX_QUEUE_BEACON:
7718 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
7719 | AR_Q_MISC_FSP_DBA_GATED
7720 | AR_Q_MISC_BEACON_USE
7721 | AR_Q_MISC_CBR_INCR_DIS1);
7722
7723 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7724 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
7725 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
7726 | AR_D_MISC_BEACON_USE
7727 | AR_D_MISC_POST_FR_BKOFF_DIS);
7728 break;
7729 case ATH9K_TX_QUEUE_CAB:
7730 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
7731 | AR_Q_MISC_FSP_DBA_GATED
7732 | AR_Q_MISC_CBR_INCR_DIS1
7733 | AR_Q_MISC_CBR_INCR_DIS0);
7734 value = (qi->tqi_readyTime
7735 - (ah->ah_config.sw_beacon_response_time -
7736 ah->ah_config.dma_beacon_response_time)
7737 -
7738 ah->ah_config.additional_swba_backoff) *
7739 1024;
7740 REG_WRITE(ah, AR_QRDYTIMECFG(q),
7741 value | AR_Q_RDYTIMECFG_EN);
7742 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7743 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
7744 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
7745 break;
7746 case ATH9K_TX_QUEUE_PSPOLL:
7747 REG_WRITE(ah, AR_QMISC(q),
7748 REG_READ(ah,
7749 AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
7750 break;
7751 case ATH9K_TX_QUEUE_UAPSD:
7752 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
7753 | AR_D_MISC_POST_FR_BKOFF_DIS);
7754 break;
7755 default: 3527 default:
7756 break; 3528 return false;
7757 }
7758
7759 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
7760 REG_WRITE(ah, AR_DMISC(q),
7761 REG_READ(ah, AR_DMISC(q)) |
7762 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
7763 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
7764 AR_D_MISC_POST_FR_BKOFF_DIS);
7765 } 3529 }
7766
7767 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
7768 ahp->ah_txOkInterruptMask |= 1 << q;
7769 else
7770 ahp->ah_txOkInterruptMask &= ~(1 << q);
7771 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
7772 ahp->ah_txErrInterruptMask |= 1 << q;
7773 else
7774 ahp->ah_txErrInterruptMask &= ~(1 << q);
7775 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
7776 ahp->ah_txDescInterruptMask |= 1 << q;
7777 else
7778 ahp->ah_txDescInterruptMask &= ~(1 << q);
7779 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
7780 ahp->ah_txEolInterruptMask |= 1 << q;
7781 else
7782 ahp->ah_txEolInterruptMask &= ~(1 << q);
7783 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
7784 ahp->ah_txUrnInterruptMask |= 1 << q;
7785 else
7786 ahp->ah_txUrnInterruptMask &= ~(1 << q);
7787 ath9k_hw_set_txq_interrupts(ah, qi);
7788
7789 return true;
7790} 3530}
7791 3531
7792void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs) 3532/****************************/
7793{ 3533/* GPIO / RFKILL / Antennae */
7794 struct ath_hal_5416 *ahp = AH5416(ah); 3534/****************************/
7795 *txqs &= ahp->ah_intrTxqs;
7796 ahp->ah_intrTxqs &= ~(*txqs);
7797}
7798 3535
7799bool 3536static void ath9k_hw_gpio_cfg_output_mux(struct ath_hal *ah,
7800ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds, 3537 u32 gpio, u32 type)
7801 u32 segLen, bool firstSeg,
7802 bool lastSeg, const struct ath_desc *ds0)
7803{
7804 struct ar5416_desc *ads = AR5416DESC(ds);
7805
7806 if (firstSeg) {
7807 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
7808 } else if (lastSeg) {
7809 ads->ds_ctl0 = 0;
7810 ads->ds_ctl1 = segLen;
7811 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
7812 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
7813 } else {
7814 ads->ds_ctl0 = 0;
7815 ads->ds_ctl1 = segLen | AR_TxMore;
7816 ads->ds_ctl2 = 0;
7817 ads->ds_ctl3 = 0;
7818 }
7819 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
7820 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
7821 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
7822 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
7823 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
7824 return true;
7825}
7826
7827void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds)
7828{ 3538{
7829 struct ar5416_desc *ads = AR5416DESC(ds); 3539 int addr;
3540 u32 gpio_shift, tmp;
7830 3541
7831 ads->ds_txstatus0 = ads->ds_txstatus1 = 0; 3542 if (gpio > 11)
7832 ads->ds_txstatus2 = ads->ds_txstatus3 = 0; 3543 addr = AR_GPIO_OUTPUT_MUX3;
7833 ads->ds_txstatus4 = ads->ds_txstatus5 = 0; 3544 else if (gpio > 5)
7834 ads->ds_txstatus6 = ads->ds_txstatus7 = 0; 3545 addr = AR_GPIO_OUTPUT_MUX2;
7835 ads->ds_txstatus8 = ads->ds_txstatus9 = 0; 3546 else
7836} 3547 addr = AR_GPIO_OUTPUT_MUX1;
7837 3548
7838int 3549 gpio_shift = (gpio % 6) * 5;
7839ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds)
7840{
7841 struct ar5416_desc *ads = AR5416DESC(ds);
7842
7843 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
7844 return -EINPROGRESS;
7845
7846 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
7847 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
7848 ds->ds_txstat.ts_status = 0;
7849 ds->ds_txstat.ts_flags = 0;
7850
7851 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
7852 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
7853 if (ads->ds_txstatus1 & AR_Filtered)
7854 ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
7855 if (ads->ds_txstatus1 & AR_FIFOUnderrun)
7856 ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
7857 if (ads->ds_txstatus9 & AR_TxOpExceeded)
7858 ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
7859 if (ads->ds_txstatus1 & AR_TxTimerExpired)
7860 ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
7861
7862 if (ads->ds_txstatus1 & AR_DescCfgErr)
7863 ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
7864 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
7865 ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
7866 ath9k_hw_updatetxtriglevel(ah, true);
7867 }
7868 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
7869 ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
7870 ath9k_hw_updatetxtriglevel(ah, true);
7871 }
7872 if (ads->ds_txstatus0 & AR_TxBaStatus) {
7873 ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
7874 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
7875 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
7876 }
7877 3550
7878 ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx); 3551 if (AR_SREV_9280_20_OR_LATER(ah)
7879 switch (ds->ds_txstat.ts_rateindex) { 3552 || (addr != AR_GPIO_OUTPUT_MUX1)) {
7880 case 0: 3553 REG_RMW(ah, addr, (type << gpio_shift),
7881 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0); 3554 (0x1f << gpio_shift));
7882 break; 3555 } else {
7883 case 1: 3556 tmp = REG_READ(ah, addr);
7884 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1); 3557 tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0);
7885 break; 3558 tmp &= ~(0x1f << gpio_shift);
7886 case 2: 3559 tmp |= (type << gpio_shift);
7887 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2); 3560 REG_WRITE(ah, addr, tmp);
7888 break;
7889 case 3:
7890 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
7891 break;
7892 } 3561 }
7893
7894 ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
7895 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
7896 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
7897 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
7898 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
7899 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
7900 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
7901 ds->ds_txstat.evm0 = ads->AR_TxEVM0;
7902 ds->ds_txstat.evm1 = ads->AR_TxEVM1;
7903 ds->ds_txstat.evm2 = ads->AR_TxEVM2;
7904 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
7905 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
7906 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
7907 ds->ds_txstat.ts_antenna = 1;
7908
7909 return 0;
7910} 3562}
7911 3563
7912void 3564void ath9k_hw_cfg_gpio_input(struct ath_hal *ah, u32 gpio)
7913ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
7914 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
7915 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
7916{ 3565{
7917 struct ar5416_desc *ads = AR5416DESC(ds); 3566 u32 gpio_shift;
7918 struct ath_hal_5416 *ahp = AH5416(ah);
7919
7920 txPower += ahp->ah_txPowerIndexOffset;
7921 if (txPower > 63)
7922 txPower = 63;
7923
7924 ads->ds_ctl0 = (pktLen & AR_FrameLen)
7925 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
7926 | SM(txPower, AR_XmitPower)
7927 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
7928 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
7929 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
7930 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
7931
7932 ads->ds_ctl1 =
7933 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
7934 | SM(type, AR_FrameType)
7935 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
7936 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
7937 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
7938 3567
7939 ads->ds_ctl6 = SM(keyType, AR_EncrType); 3568 ASSERT(gpio < ah->ah_caps.num_gpio_pins);
7940 3569
7941 if (AR_SREV_9285(ah)) { 3570 gpio_shift = gpio << 1;
7942 3571
7943 ads->ds_ctl8 = 0; 3572 REG_RMW(ah,
7944 ads->ds_ctl9 = 0; 3573 AR_GPIO_OE_OUT,
7945 ads->ds_ctl10 = 0; 3574 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
7946 ads->ds_ctl11 = 0; 3575 (AR_GPIO_OE_OUT_DRV << gpio_shift));
7947 }
7948} 3576}
7949 3577
7950void 3578u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio)
7951ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
7952 struct ath_desc *lastds,
7953 u32 durUpdateEn, u32 rtsctsRate,
7954 u32 rtsctsDuration,
7955 struct ath9k_11n_rate_series series[],
7956 u32 nseries, u32 flags)
7957{ 3579{
7958 struct ar5416_desc *ads = AR5416DESC(ds); 3580 if (gpio >= ah->ah_caps.num_gpio_pins)
7959 struct ar5416_desc *last_ads = AR5416DESC(lastds); 3581 return 0xffffffff;
7960 u32 ds_ctl0;
7961
7962 (void) nseries;
7963 (void) rtsctsDuration;
7964
7965 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
7966 ds_ctl0 = ads->ds_ctl0;
7967
7968 if (flags & ATH9K_TXDESC_RTSENA) {
7969 ds_ctl0 &= ~AR_CTSEnable;
7970 ds_ctl0 |= AR_RTSEnable;
7971 } else {
7972 ds_ctl0 &= ~AR_RTSEnable;
7973 ds_ctl0 |= AR_CTSEnable;
7974 }
7975 3582
7976 ads->ds_ctl0 = ds_ctl0; 3583 if (AR_SREV_9280_10_OR_LATER(ah)) {
3584 return (MS
3585 (REG_READ(ah, AR_GPIO_IN_OUT),
3586 AR928X_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) != 0;
7977 } else { 3587 } else {
7978 ads->ds_ctl0 = 3588 return (MS(REG_READ(ah, AR_GPIO_IN_OUT), AR_GPIO_IN_VAL) &
7979 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable)); 3589 AR_GPIO_BIT(gpio)) != 0;
7980 } 3590 }
7981
7982 ads->ds_ctl2 = set11nTries(series, 0)
7983 | set11nTries(series, 1)
7984 | set11nTries(series, 2)
7985 | set11nTries(series, 3)
7986 | (durUpdateEn ? AR_DurUpdateEna : 0)
7987 | SM(0, AR_BurstDur);
7988
7989 ads->ds_ctl3 = set11nRate(series, 0)
7990 | set11nRate(series, 1)
7991 | set11nRate(series, 2)
7992 | set11nRate(series, 3);
7993
7994 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
7995 | set11nPktDurRTSCTS(series, 1);
7996
7997 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
7998 | set11nPktDurRTSCTS(series, 3);
7999
8000 ads->ds_ctl7 = set11nRateFlags(series, 0)
8001 | set11nRateFlags(series, 1)
8002 | set11nRateFlags(series, 2)
8003 | set11nRateFlags(series, 3)
8004 | SM(rtsctsRate, AR_RTSCTSRate);
8005 last_ads->ds_ctl2 = ads->ds_ctl2;
8006 last_ads->ds_ctl3 = ads->ds_ctl3;
8007} 3591}
8008 3592
8009void 3593void ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio,
8010ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds, 3594 u32 ah_signal_type)
8011 u32 aggrLen)
8012{ 3595{
8013 struct ar5416_desc *ads = AR5416DESC(ds); 3596 u32 gpio_shift;
8014
8015 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
8016
8017 ads->ds_ctl6 &= ~AR_AggrLen;
8018 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
8019}
8020 3597
8021void 3598 ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
8022ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
8023 u32 numDelims)
8024{
8025 struct ar5416_desc *ads = AR5416DESC(ds);
8026 unsigned int ctl6;
8027 3599
8028 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr); 3600 gpio_shift = 2 * gpio;
8029 3601
8030 ctl6 = ads->ds_ctl6; 3602 REG_RMW(ah,
8031 ctl6 &= ~AR_PadDelim; 3603 AR_GPIO_OE_OUT,
8032 ctl6 |= SM(numDelims, AR_PadDelim); 3604 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
8033 ads->ds_ctl6 = ctl6; 3605 (AR_GPIO_OE_OUT_DRV << gpio_shift));
8034} 3606}
8035 3607
8036void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds) 3608void ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, u32 val)
8037{ 3609{
8038 struct ar5416_desc *ads = AR5416DESC(ds); 3610 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
8039 3611 AR_GPIO_BIT(gpio));
8040 ads->ds_ctl1 |= AR_IsAggr;
8041 ads->ds_ctl1 &= ~AR_MoreAggr;
8042 ads->ds_ctl6 &= ~AR_PadDelim;
8043} 3612}
8044 3613
8045void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds) 3614#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
3615void ath9k_enable_rfkill(struct ath_hal *ah)
8046{ 3616{
8047 struct ar5416_desc *ads = AR5416DESC(ds); 3617 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
3618 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
8048 3619
8049 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr); 3620 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
3621 AR_GPIO_INPUT_MUX2_RFSILENT);
3622
3623 ath9k_hw_cfg_gpio_input(ah, ah->ah_rfkill_gpio);
3624 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
8050} 3625}
3626#endif
8051 3627
8052void 3628int ath9k_hw_select_antconfig(struct ath_hal *ah, u32 cfg)
8053ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
8054 u32 burstDuration)
8055{ 3629{
8056 struct ar5416_desc *ads = AR5416DESC(ds); 3630 struct ath9k_channel *chan = ah->ah_curchan;
3631 const struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
3632 u16 ant_config;
3633 u32 halNumAntConfig;
8057 3634
8058 ads->ds_ctl2 &= ~AR_BurstDur; 3635 halNumAntConfig = IS_CHAN_2GHZ(chan) ?
8059 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur); 3636 pCap->num_antcfg_2ghz : pCap->num_antcfg_5ghz;
8060}
8061 3637
8062void 3638 if (cfg < halNumAntConfig) {
8063ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds, 3639 if (!ath9k_hw_get_eeprom_antenna_cfg(ah, chan,
8064 u32 vmf) 3640 cfg, &ant_config)) {
8065{ 3641 REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config);
8066 struct ar5416_desc *ads = AR5416DESC(ds); 3642 return 0;
3643 }
3644 }
8067 3645
8068 if (vmf) 3646 return -EINVAL;
8069 ads->ds_ctl0 |= AR_VirtMoreFrag;
8070 else
8071 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
8072} 3647}
8073 3648
8074void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp) 3649u32 ath9k_hw_getdefantenna(struct ath_hal *ah)
8075{ 3650{
8076 REG_WRITE(ah, AR_RXDP, rxdp); 3651 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
8077} 3652}
8078 3653
8079void ath9k_hw_rxena(struct ath_hal *ah) 3654void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna)
8080{ 3655{
8081 REG_WRITE(ah, AR_CR, AR_CR_RXE); 3656 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
8082} 3657}
8083 3658
8084bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set) 3659bool ath9k_hw_setantennaswitch(struct ath_hal *ah,
3660 enum ath9k_ant_setting settings,
3661 struct ath9k_channel *chan,
3662 u8 *tx_chainmask,
3663 u8 *rx_chainmask,
3664 u8 *antenna_cfgd)
8085{ 3665{
8086 if (set) { 3666 struct ath_hal_5416 *ahp = AH5416(ah);
8087 3667 static u8 tx_chainmask_cfg, rx_chainmask_cfg;
8088 REG_SET_BIT(ah, AR_DIAG_SW,
8089 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
8090
8091 if (!ath9k_hw_wait
8092 (ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 0)) {
8093 u32 reg;
8094 3668
8095 REG_CLR_BIT(ah, AR_DIAG_SW, 3669 if (AR_SREV_9280(ah)) {
8096 (AR_DIAG_RX_DIS | 3670 if (!tx_chainmask_cfg) {
8097 AR_DIAG_RX_ABORT));
8098 3671
8099 reg = REG_READ(ah, AR_OBS_BUS_1); 3672 tx_chainmask_cfg = *tx_chainmask;
8100 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 3673 rx_chainmask_cfg = *rx_chainmask;
8101 "%s: rx failed to go idle in 10 ms RXSM=0x%x\n", 3674 }
8102 __func__, reg);
8103 3675
8104 return false; 3676 switch (settings) {
3677 case ATH9K_ANT_FIXED_A:
3678 *tx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
3679 *rx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
3680 *antenna_cfgd = true;
3681 break;
3682 case ATH9K_ANT_FIXED_B:
3683 if (ah->ah_caps.tx_chainmask >
3684 ATH9K_ANTENNA1_CHAINMASK) {
3685 *tx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
3686 }
3687 *rx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
3688 *antenna_cfgd = true;
3689 break;
3690 case ATH9K_ANT_VARIABLE:
3691 *tx_chainmask = tx_chainmask_cfg;
3692 *rx_chainmask = rx_chainmask_cfg;
3693 *antenna_cfgd = true;
3694 break;
3695 default:
3696 break;
8105 } 3697 }
8106 } else { 3698 } else {
8107 REG_CLR_BIT(ah, AR_DIAG_SW, 3699 ahp->ah_diversityControl = settings;
8108 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
8109 } 3700 }
8110 3701
8111 return true; 3702 return true;
8112} 3703}
8113 3704
8114void 3705/*********************/
8115ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0, 3706/* General Operation */
8116 u32 filter1) 3707/*********************/
8117{
8118 REG_WRITE(ah, AR_MCAST_FIL0, filter0);
8119 REG_WRITE(ah, AR_MCAST_FIL1, filter1);
8120}
8121 3708
8122bool 3709u32 ath9k_hw_getrxfilter(struct ath_hal *ah)
8123ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
8124 u32 size, u32 flags)
8125{ 3710{
8126 struct ar5416_desc *ads = AR5416DESC(ds); 3711 u32 bits = REG_READ(ah, AR_RX_FILTER);
8127 struct ath9k_hw_capabilities *pCap = &ah->ah_caps; 3712 u32 phybits = REG_READ(ah, AR_PHY_ERR);
8128 3713
8129 ads->ds_ctl1 = size & AR_BufLen; 3714 if (phybits & AR_PHY_ERR_RADAR)
8130 if (flags & ATH9K_RXDESC_INTREQ) 3715 bits |= ATH9K_RX_FILTER_PHYRADAR;
8131 ads->ds_ctl1 |= AR_RxIntrReq; 3716 if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING))
3717 bits |= ATH9K_RX_FILTER_PHYERR;
8132 3718
8133 ads->ds_rxstatus8 &= ~AR_RxDone; 3719 return bits;
8134 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
8135 memset(&(ads->u), 0, sizeof(ads->u));
8136 return true;
8137} 3720}
8138 3721
8139int 3722void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits)
8140ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
8141 u32 pa, struct ath_desc *nds, u64 tsf)
8142{ 3723{
8143 struct ar5416_desc ads; 3724 u32 phybits;
8144 struct ar5416_desc *adsp = AR5416DESC(ds);
8145
8146 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
8147 return -EINPROGRESS;
8148
8149 ads.u.rx = adsp->u.rx;
8150
8151 ds->ds_rxstat.rs_status = 0;
8152 ds->ds_rxstat.rs_flags = 0;
8153 3725
8154 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen; 3726 REG_WRITE(ah, AR_RX_FILTER, (bits & 0xffff) | AR_RX_COMPR_BAR);
8155 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp; 3727 phybits = 0;
3728 if (bits & ATH9K_RX_FILTER_PHYRADAR)
3729 phybits |= AR_PHY_ERR_RADAR;
3730 if (bits & ATH9K_RX_FILTER_PHYERR)
3731 phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING;
3732 REG_WRITE(ah, AR_PHY_ERR, phybits);
8156 3733
8157 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); 3734 if (phybits)
8158 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00); 3735 REG_WRITE(ah, AR_RXCFG,
8159 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01); 3736 REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA);
8160 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02);
8161 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10);
8162 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11);
8163 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12);
8164 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
8165 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
8166 else 3737 else
8167 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID; 3738 REG_WRITE(ah, AR_RXCFG,
8168 3739 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
8169 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
8170 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
8171
8172 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
8173 ds->ds_rxstat.rs_moreaggr =
8174 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
8175 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
8176 ds->ds_rxstat.rs_flags =
8177 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
8178 ds->ds_rxstat.rs_flags |=
8179 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
8180
8181 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
8182 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
8183 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
8184 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
8185 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
8186 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
8187
8188 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
8189
8190 if (ads.ds_rxstatus8 & AR_CRCErr)
8191 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
8192 else if (ads.ds_rxstatus8 & AR_PHYErr) {
8193 u32 phyerr;
8194
8195 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
8196 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
8197 ds->ds_rxstat.rs_phyerr = phyerr;
8198 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
8199 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
8200 else if (ads.ds_rxstatus8 & AR_MichaelErr)
8201 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
8202 }
8203
8204 return 0;
8205} 3740}
8206 3741
8207static void ath9k_hw_setup_rate_table(struct ath_hal *ah, 3742bool ath9k_hw_phy_disable(struct ath_hal *ah)
8208 struct ath9k_rate_table *rt)
8209{ 3743{
8210 int i; 3744 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM);
8211
8212 if (rt->rateCodeToIndex[0] != 0)
8213 return;
8214 for (i = 0; i < 256; i++)
8215 rt->rateCodeToIndex[i] = (u8) -1;
8216 for (i = 0; i < rt->rateCount; i++) {
8217 u8 code = rt->info[i].rateCode;
8218 u8 cix = rt->info[i].controlRate;
8219
8220 rt->rateCodeToIndex[code] = i;
8221 rt->rateCodeToIndex[code | rt->info[i].shortPreamble] = i;
8222
8223 rt->info[i].lpAckDuration =
8224 ath9k_hw_computetxtime(ah, rt,
8225 WLAN_CTRL_FRAME_SIZE,
8226 cix,
8227 false);
8228 rt->info[i].spAckDuration =
8229 ath9k_hw_computetxtime(ah, rt,
8230 WLAN_CTRL_FRAME_SIZE,
8231 cix,
8232 true);
8233 }
8234} 3745}
8235 3746
8236const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah, 3747bool ath9k_hw_disable(struct ath_hal *ah)
8237 u32 mode)
8238{ 3748{
8239 struct ath9k_rate_table *rt; 3749 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
8240 switch (mode) { 3750 return false;
8241 case ATH9K_MODE_11A:
8242 rt = &ar5416_11a_table;
8243 break;
8244 case ATH9K_MODE_11B:
8245 rt = &ar5416_11b_table;
8246 break;
8247 case ATH9K_MODE_11G:
8248 rt = &ar5416_11g_table;
8249 break;
8250 case ATH9K_MODE_11NG_HT20:
8251 case ATH9K_MODE_11NG_HT40PLUS:
8252 case ATH9K_MODE_11NG_HT40MINUS:
8253 rt = &ar5416_11ng_table;
8254 break;
8255 case ATH9K_MODE_11NA_HT20:
8256 case ATH9K_MODE_11NA_HT40PLUS:
8257 case ATH9K_MODE_11NA_HT40MINUS:
8258 rt = &ar5416_11na_table;
8259 break;
8260 default:
8261 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, "%s: invalid mode 0x%x\n",
8262 __func__, mode);
8263 return NULL;
8264 }
8265 ath9k_hw_setup_rate_table(ah, rt);
8266 return rt;
8267}
8268 3751
8269static const char *ath9k_hw_devname(u16 devid) 3752 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD);
8270{
8271 switch (devid) {
8272 case AR5416_DEVID_PCI:
8273 case AR5416_DEVID_PCIE:
8274 return "Atheros 5416";
8275 case AR9160_DEVID_PCI:
8276 return "Atheros 9160";
8277 case AR9280_DEVID_PCI:
8278 case AR9280_DEVID_PCIE:
8279 return "Atheros 9280";
8280 }
8281 return NULL;
8282} 3753}
8283 3754
8284const char *ath9k_hw_probe(u16 vendorid, u16 devid) 3755bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit)
8285{ 3756{
8286 return vendorid == ATHEROS_VENDOR_ID ? 3757 struct ath9k_channel *chan = ah->ah_curchan;
8287 ath9k_hw_devname(devid) : NULL;
8288}
8289 3758
8290struct ath_hal *ath9k_hw_attach(u16 devid, 3759 ah->ah_powerLimit = min(limit, (u32) MAX_RATE_POWER);
8291 struct ath_softc *sc,
8292 void __iomem *mem,
8293 int *error)
8294{
8295 struct ath_hal *ah = NULL;
8296 3760
8297 switch (devid) { 3761 if (ath9k_hw_set_txpower(ah, chan,
8298 case AR5416_DEVID_PCI: 3762 ath9k_regd_get_ctl(ah, chan),
8299 case AR5416_DEVID_PCIE: 3763 ath9k_regd_get_antenna_allowed(ah, chan),
8300 case AR9160_DEVID_PCI: 3764 chan->maxRegTxPower * 2,
8301 case AR9280_DEVID_PCI: 3765 min((u32) MAX_RATE_POWER,
8302 case AR9280_DEVID_PCIE: 3766 (u32) ah->ah_powerLimit)) != 0)
8303 ah = ath9k_hw_do_attach(devid, sc, mem, error); 3767 return false;
8304 break;
8305 default:
8306 DPRINTF(ah->ah_sc, ATH_DBG_ANY,
8307 "devid=0x%x not supported.\n", devid);
8308 ah = NULL;
8309 *error = -ENXIO;
8310 break;
8311 }
8312 3768
8313 return ah; 3769 return true;
8314} 3770}
8315 3771
8316u16 3772void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac)
8317ath9k_hw_computetxtime(struct ath_hal *ah,
8318 const struct ath9k_rate_table *rates,
8319 u32 frameLen, u16 rateix,
8320 bool shortPreamble)
8321{ 3773{
8322 u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime; 3774 struct ath_hal_5416 *ahp = AH5416(ah);
8323 u32 kbps;
8324
8325 kbps = rates->info[rateix].rateKbps;
8326
8327 if (kbps == 0)
8328 return 0;
8329 switch (rates->info[rateix].phy) {
8330
8331 case PHY_CCK:
8332 phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS;
8333 if (shortPreamble && rates->info[rateix].shortPreamble)
8334 phyTime >>= 1;
8335 numBits = frameLen << 3;
8336 txTime = CCK_SIFS_TIME + phyTime
8337 + ((numBits * 1000) / kbps);
8338 break;
8339 case PHY_OFDM:
8340 if (ah->ah_curchan && IS_CHAN_QUARTER_RATE(ah->ah_curchan)) {
8341 bitsPerSymbol =
8342 (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000;
8343
8344 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8345 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8346 txTime = OFDM_SIFS_TIME_QUARTER
8347 + OFDM_PREAMBLE_TIME_QUARTER
8348 + (numSymbols * OFDM_SYMBOL_TIME_QUARTER);
8349 } else if (ah->ah_curchan &&
8350 IS_CHAN_HALF_RATE(ah->ah_curchan)) {
8351 bitsPerSymbol =
8352 (kbps * OFDM_SYMBOL_TIME_HALF) / 1000;
8353
8354 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8355 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8356 txTime = OFDM_SIFS_TIME_HALF +
8357 OFDM_PREAMBLE_TIME_HALF
8358 + (numSymbols * OFDM_SYMBOL_TIME_HALF);
8359 } else {
8360 bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000;
8361
8362 numBits = OFDM_PLCP_BITS + (frameLen << 3);
8363 numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
8364 txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME
8365 + (numSymbols * OFDM_SYMBOL_TIME);
8366 }
8367 break;
8368 3775
8369 default: 3776 memcpy(mac, ahp->ah_macaddr, ETH_ALEN);
8370 DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO,
8371 "%s: unknown phy %u (rate ix %u)\n", __func__,
8372 rates->info[rateix].phy, rateix);
8373 txTime = 0;
8374 break;
8375 }
8376 return txTime;
8377} 3777}
8378 3778
8379u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags) 3779bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac)
8380{ 3780{
8381 if (flags & CHANNEL_2GHZ) { 3781 struct ath_hal_5416 *ahp = AH5416(ah);
8382 if (freq == 2484)
8383 return 14;
8384 if (freq < 2484)
8385 return (freq - 2407) / 5;
8386 else
8387 return 15 + ((freq - 2512) / 20);
8388 } else if (flags & CHANNEL_5GHZ) {
8389 if (ath9k_regd_is_public_safety_sku(ah) &&
8390 IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
8391 return ((freq * 10) +
8392 (((freq % 5) == 2) ? 5 : 0) - 49400) / 5;
8393 } else if ((flags & CHANNEL_A) && (freq <= 5000)) {
8394 return (freq - 4000) / 5;
8395 } else {
8396 return (freq - 5000) / 5;
8397 }
8398 } else {
8399 if (freq == 2484)
8400 return 14;
8401 if (freq < 2484)
8402 return (freq - 2407) / 5;
8403 if (freq < 5000) {
8404 if (ath9k_regd_is_public_safety_sku(ah)
8405 && IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) {
8406 return ((freq * 10) +
8407 (((freq % 5) ==
8408 2) ? 5 : 0) - 49400) / 5;
8409 } else if (freq > 4900) {
8410 return (freq - 4000) / 5;
8411 } else {
8412 return 15 + ((freq - 2512) / 20);
8413 }
8414 }
8415 return (freq - 5000) / 5;
8416 }
8417}
8418 3782
8419/* We can tune this as we go by monitoring really low values */ 3783 memcpy(ahp->ah_macaddr, mac, ETH_ALEN);
8420#define ATH9K_NF_TOO_LOW -60
8421 3784
8422/* AR5416 may return very high value (like -31 dBm), in those cases the nf
8423 * is incorrect and we should use the static NF value. Later we can try to
8424 * find out why they are reporting these values */
8425static bool ath9k_hw_nf_in_range(struct ath_hal *ah, s16 nf)
8426{
8427 if (nf > ATH9K_NF_TOO_LOW) {
8428 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
8429 "%s: noise floor value detected (%d) is "
8430 "lower than what we think is a "
8431 "reasonable value (%d)\n",
8432 __func__, nf, ATH9K_NF_TOO_LOW);
8433 return false;
8434 }
8435 return true; 3785 return true;
8436} 3786}
8437 3787
8438s16 3788void ath9k_hw_setopmode(struct ath_hal *ah)
8439ath9k_hw_getchan_noise(struct ath_hal *ah, struct ath9k_channel *chan)
8440{ 3789{
8441 struct ath9k_channel *ichan; 3790 ath9k_hw_set_operating_mode(ah, ah->ah_opmode);
8442 s16 nf;
8443
8444 ichan = ath9k_regd_check_channel(ah, chan);
8445 if (ichan == NULL) {
8446 DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL,
8447 "%s: invalid channel %u/0x%x; no mapping\n",
8448 __func__, chan->channel, chan->channelFlags);
8449 return ATH_DEFAULT_NOISE_FLOOR;
8450 }
8451 if (ichan->rawNoiseFloor == 0) {
8452 enum wireless_mode mode = ath9k_hw_chan2wmode(ah, chan);
8453 nf = NOISE_FLOOR[mode];
8454 } else
8455 nf = ichan->rawNoiseFloor;
8456
8457 if (!ath9k_hw_nf_in_range(ah, nf))
8458 nf = ATH_DEFAULT_NOISE_FLOOR;
8459
8460 return nf;
8461} 3791}
8462 3792
8463bool ath9k_hw_set_tsfadjust(struct ath_hal *ah, u32 setting) 3793void ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0, u32 filter1)
8464{ 3794{
8465 struct ath_hal_5416 *ahp = AH5416(ah); 3795 REG_WRITE(ah, AR_MCAST_FIL0, filter0);
8466 3796 REG_WRITE(ah, AR_MCAST_FIL1, filter1);
8467 if (setting)
8468 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
8469 else
8470 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
8471 return true;
8472} 3797}
8473 3798
8474bool ath9k_hw_phycounters(struct ath_hal *ah) 3799void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask)
8475{ 3800{
8476 struct ath_hal_5416 *ahp = AH5416(ah); 3801 struct ath_hal_5416 *ahp = AH5416(ah);
8477 3802
8478 return ahp->ah_hasHwPhyCounters ? true : false; 3803 memcpy(mask, ahp->ah_bssidmask, ETH_ALEN);
8479} 3804}
8480 3805
8481u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q) 3806bool ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask)
8482{ 3807{
8483 return REG_READ(ah, AR_QTXDP(q)); 3808 struct ath_hal_5416 *ahp = AH5416(ah);
8484}
8485 3809
8486bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q, 3810 memcpy(ahp->ah_bssidmask, mask, ETH_ALEN);
8487 u32 txdp) 3811
8488{ 3812 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask));
8489 REG_WRITE(ah, AR_QTXDP(q), txdp); 3813 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4));
8490 3814
8491 return true; 3815 return true;
8492} 3816}
8493 3817
8494bool ath9k_hw_txstart(struct ath_hal *ah, u32 q) 3818void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid, u16 assocId)
8495{ 3819{
8496 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q); 3820 struct ath_hal_5416 *ahp = AH5416(ah);
8497 3821
8498 REG_WRITE(ah, AR_Q_TXE, 1 << q); 3822 memcpy(ahp->ah_bssid, bssid, ETH_ALEN);
3823 ahp->ah_assocId = assocId;
8499 3824
8500 return true; 3825 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid));
3826 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) |
3827 ((assocId & 0x3fff) << AR_BSS_ID1_AID_S));
8501} 3828}
8502 3829
8503u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q) 3830u64 ath9k_hw_gettsf64(struct ath_hal *ah)
8504{ 3831{
8505 u32 npend; 3832 u64 tsf;
8506 3833
8507 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; 3834 tsf = REG_READ(ah, AR_TSF_U32);
8508 if (npend == 0) { 3835 tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
8509 3836
8510 if (REG_READ(ah, AR_Q_TXE) & (1 << q)) 3837 return tsf;
8511 npend = 1;
8512 }
8513 return npend;
8514} 3838}
8515 3839
8516bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q) 3840void ath9k_hw_reset_tsf(struct ath_hal *ah)
8517{ 3841{
8518 u32 wait; 3842 int count;
8519
8520 REG_WRITE(ah, AR_Q_TXD, 1 << q);
8521 3843
8522 for (wait = 1000; wait != 0; wait--) { 3844 count = 0;
8523 if (ath9k_hw_numtxpending(ah, q) == 0) 3845 while (REG_READ(ah, AR_SLP32_MODE) & AR_SLP32_TSF_WRITE_STATUS) {
3846 count++;
3847 if (count > 10) {
3848 DPRINTF(ah->ah_sc, ATH_DBG_RESET,
3849 "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
8524 break; 3850 break;
8525 udelay(100); 3851 }
3852 udelay(10);
8526 } 3853 }
3854 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
3855}
8527 3856
8528 if (ath9k_hw_numtxpending(ah, q)) { 3857bool ath9k_hw_set_tsfadjust(struct ath_hal *ah, u32 setting)
8529 u32 tsfLow, j; 3858{
8530 3859 struct ath_hal_5416 *ahp = AH5416(ah);
8531 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
8532 "%s: Num of pending TX Frames %d on Q %d\n",
8533 __func__, ath9k_hw_numtxpending(ah, q), q);
8534
8535 for (j = 0; j < 2; j++) {
8536 tsfLow = REG_READ(ah, AR_TSF_L32);
8537 REG_WRITE(ah, AR_QUIET2,
8538 SM(10, AR_QUIET2_QUIET_DUR));
8539 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
8540 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
8541 REG_SET_BIT(ah, AR_TIMER_MODE,
8542 AR_QUIET_TIMER_EN);
8543 3860
8544 if ((REG_READ(ah, AR_TSF_L32) >> 10) == 3861 if (setting)
8545 (tsfLow >> 10)) { 3862 ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF;
8546 break; 3863 else
8547 } 3864 ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF;
8548 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
8549 "%s: TSF have moved while trying to set "
8550 "quiet time TSF: 0x%08x\n",
8551 __func__, tsfLow);
8552 }
8553 3865
8554 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 3866 return true;
3867}
8555 3868
8556 udelay(200); 3869bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us)
8557 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN); 3870{
3871 struct ath_hal_5416 *ahp = AH5416(ah);
8558 3872
8559 wait = 1000; 3873 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
3874 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad slot time %u\n", us);
3875 ahp->ah_slottime = (u32) -1;
3876 return false;
3877 } else {
3878 REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
3879 ahp->ah_slottime = us;
3880 return true;
3881 }
3882}
8560 3883
8561 while (ath9k_hw_numtxpending(ah, q)) { 3884void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode)
8562 if ((--wait) == 0) { 3885{
8563 DPRINTF(ah->ah_sc, ATH_DBG_XMIT, 3886 u32 macmode;
8564 "%s: Failed to stop Tx DMA in 100 "
8565 "msec after killing last frame\n",
8566 __func__);
8567 break;
8568 }
8569 udelay(100);
8570 }
8571 3887
8572 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 3888 if (mode == ATH9K_HT_MACMODE_2040 &&
8573 } 3889 !ah->ah_config.cwm_ignore_extcca)
3890 macmode = AR_2040_JOINED_RX_CLEAR;
3891 else
3892 macmode = 0;
8574 3893
8575 REG_WRITE(ah, AR_Q_TXD, 0); 3894 REG_WRITE(ah, AR_2040_MODE, macmode);
8576 return wait != 0;
8577} 3895}
diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h
index 2113818ee934..91d8f594af81 100644
--- a/drivers/net/wireless/ath9k/hw.h
+++ b/drivers/net/wireless/ath9k/hw.h
@@ -415,6 +415,9 @@ struct ar5416Stats {
415#define AR5416_EEP_MINOR_VER_3 0x3 415#define AR5416_EEP_MINOR_VER_3 0x3
416#define AR5416_EEP_MINOR_VER_7 0x7 416#define AR5416_EEP_MINOR_VER_7 0x7
417#define AR5416_EEP_MINOR_VER_9 0x9 417#define AR5416_EEP_MINOR_VER_9 0x9
418#define AR5416_EEP_MINOR_VER_16 0x10
419#define AR5416_EEP_MINOR_VER_17 0x11
420#define AR5416_EEP_MINOR_VER_19 0x13
418 421
419#define AR5416_NUM_5G_CAL_PIERS 8 422#define AR5416_NUM_5G_CAL_PIERS 8
420#define AR5416_NUM_2G_CAL_PIERS 4 423#define AR5416_NUM_2G_CAL_PIERS 4
@@ -436,6 +439,27 @@ struct ar5416Stats {
436#define AR5416_MAX_CHAINS 3 439#define AR5416_MAX_CHAINS 3
437#define AR5416_PWR_TABLE_OFFSET -5 440#define AR5416_PWR_TABLE_OFFSET -5
438 441
442/* Rx gain type values */
443#define AR5416_EEP_RXGAIN_23DB_BACKOFF 0
444#define AR5416_EEP_RXGAIN_13DB_BACKOFF 1
445#define AR5416_EEP_RXGAIN_ORIG 2
446
447/* Tx gain type values */
448#define AR5416_EEP_TXGAIN_ORIGINAL 0
449#define AR5416_EEP_TXGAIN_HIGH_POWER 1
450
451#define AR5416_EEP4K_START_LOC 64
452#define AR5416_EEP4K_NUM_2G_CAL_PIERS 3
453#define AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS 3
454#define AR5416_EEP4K_NUM_2G_20_TARGET_POWERS 3
455#define AR5416_EEP4K_NUM_2G_40_TARGET_POWERS 3
456#define AR5416_EEP4K_NUM_CTLS 12
457#define AR5416_EEP4K_NUM_BAND_EDGES 4
458#define AR5416_EEP4K_NUM_PD_GAINS 2
459#define AR5416_EEP4K_PD_GAINS_IN_MASK 4
460#define AR5416_EEP4K_PD_GAIN_ICEPTS 5
461#define AR5416_EEP4K_MAX_CHAINS 1
462
439enum eeprom_param { 463enum eeprom_param {
440 EEP_NFTHRESH_5, 464 EEP_NFTHRESH_5,
441 EEP_NFTHRESH_2, 465 EEP_NFTHRESH_2,
@@ -454,6 +478,8 @@ enum eeprom_param {
454 EEP_MINOR_REV, 478 EEP_MINOR_REV,
455 EEP_TX_MASK, 479 EEP_TX_MASK,
456 EEP_RX_MASK, 480 EEP_RX_MASK,
481 EEP_RXGAIN_TYPE,
482 EEP_TXGAIN_TYPE,
457}; 483};
458 484
459enum ar5416_rates { 485enum ar5416_rates {
@@ -469,6 +495,11 @@ enum ar5416_rates {
469 Ar5416RateSize 495 Ar5416RateSize
470}; 496};
471 497
498enum ath9k_hal_freq_band {
499 ATH9K_HAL_FREQ_BAND_5GHZ = 0,
500 ATH9K_HAL_FREQ_BAND_2GHZ = 1
501};
502
472struct base_eep_header { 503struct base_eep_header {
473 u16 length; 504 u16 length;
474 u16 checksum; 505 u16 checksum;
@@ -485,9 +516,32 @@ struct base_eep_header {
485 u32 binBuildNumber; 516 u32 binBuildNumber;
486 u8 deviceType; 517 u8 deviceType;
487 u8 pwdclkind; 518 u8 pwdclkind;
488 u8 futureBase[32]; 519 u8 futureBase_1[2];
520 u8 rxGainType;
521 u8 futureBase_2[3];
522 u8 txGainType;
523 u8 futureBase_3[25];
524} __packed;
525
526struct base_eep_header_4k {
527 u16 length;
528 u16 checksum;
529 u16 version;
530 u8 opCapFlags;
531 u8 eepMisc;
532 u16 regDmn[2];
533 u8 macAddr[6];
534 u8 rxMask;
535 u8 txMask;
536 u16 rfSilent;
537 u16 blueToothOptions;
538 u16 deviceCap;
539 u32 binBuildNumber;
540 u8 deviceType;
541 u8 futureBase[1];
489} __packed; 542} __packed;
490 543
544
491struct spur_chan { 545struct spur_chan {
492 u16 spurChan; 546 u16 spurChan;
493 u8 spurRangeLow; 547 u8 spurRangeLow;
@@ -540,11 +594,58 @@ struct modal_eep_header {
540 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS]; 594 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
541} __packed; 595} __packed;
542 596
597struct modal_eep_4k_header {
598 u32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS];
599 u32 antCtrlCommon;
600 u8 antennaGainCh[AR5416_EEP4K_MAX_CHAINS];
601 u8 switchSettling;
602 u8 txRxAttenCh[AR5416_EEP4K_MAX_CHAINS];
603 u8 rxTxMarginCh[AR5416_EEP4K_MAX_CHAINS];
604 u8 adcDesiredSize;
605 u8 pgaDesiredSize;
606 u8 xlnaGainCh[AR5416_EEP4K_MAX_CHAINS];
607 u8 txEndToXpaOff;
608 u8 txEndToRxOn;
609 u8 txFrameToXpaOn;
610 u8 thresh62;
611 u8 noiseFloorThreshCh[AR5416_EEP4K_MAX_CHAINS];
612 u8 xpdGain;
613 u8 xpd;
614 u8 iqCalICh[AR5416_EEP4K_MAX_CHAINS];
615 u8 iqCalQCh[AR5416_EEP4K_MAX_CHAINS];
616 u8 pdGainOverlap;
617 u8 ob_01;
618 u8 db1_01;
619 u8 xpaBiasLvl;
620 u8 txFrameToDataStart;
621 u8 txFrameToPaOn;
622 u8 ht40PowerIncForPdadc;
623 u8 bswAtten[AR5416_EEP4K_MAX_CHAINS];
624 u8 bswMargin[AR5416_EEP4K_MAX_CHAINS];
625 u8 swSettleHt40;
626 u8 xatten2Db[AR5416_EEP4K_MAX_CHAINS];
627 u8 xatten2Margin[AR5416_EEP4K_MAX_CHAINS];
628 u8 db2_01;
629 u8 version;
630 u16 ob_234;
631 u16 db1_234;
632 u16 db2_234;
633 u8 futureModal[4];
634
635 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS];
636} __packed;
637
638
543struct cal_data_per_freq { 639struct cal_data_per_freq {
544 u8 pwrPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS]; 640 u8 pwrPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
545 u8 vpdPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS]; 641 u8 vpdPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
546} __packed; 642} __packed;
547 643
644struct cal_data_per_freq_4k {
645 u8 pwrPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS];
646 u8 vpdPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS];
647} __packed;
648
548struct cal_target_power_leg { 649struct cal_target_power_leg {
549 u8 bChannel; 650 u8 bChannel;
550 u8 tPow2x[4]; 651 u8 tPow2x[4];
@@ -555,6 +656,7 @@ struct cal_target_power_ht {
555 u8 tPow2x[8]; 656 u8 tPow2x[8];
556} __packed; 657} __packed;
557 658
659
558#ifdef __BIG_ENDIAN_BITFIELD 660#ifdef __BIG_ENDIAN_BITFIELD
559struct cal_ctl_edges { 661struct cal_ctl_edges {
560 u8 bChannel; 662 u8 bChannel;
@@ -569,10 +671,15 @@ struct cal_ctl_edges {
569 671
570struct cal_ctl_data { 672struct cal_ctl_data {
571 struct cal_ctl_edges 673 struct cal_ctl_edges
572 ctlEdges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES]; 674 ctlEdges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES];
573} __packed; 675} __packed;
574 676
575struct ar5416_eeprom { 677struct cal_ctl_data_4k {
678 struct cal_ctl_edges
679 ctlEdges[AR5416_EEP4K_MAX_CHAINS][AR5416_EEP4K_NUM_BAND_EDGES];
680} __packed;
681
682struct ar5416_eeprom_def {
576 struct base_eep_header baseEepHeader; 683 struct base_eep_header baseEepHeader;
577 u8 custData[64]; 684 u8 custData[64];
578 struct modal_eep_header modalHeader[2]; 685 struct modal_eep_header modalHeader[2];
@@ -601,6 +708,26 @@ struct ar5416_eeprom {
601 u8 padding; 708 u8 padding;
602} __packed; 709} __packed;
603 710
711struct ar5416_eeprom_4k {
712 struct base_eep_header_4k baseEepHeader;
713 u8 custData[20];
714 struct modal_eep_4k_header modalHeader;
715 u8 calFreqPier2G[AR5416_EEP4K_NUM_2G_CAL_PIERS];
716 struct cal_data_per_freq_4k
717 calPierData2G[AR5416_EEP4K_MAX_CHAINS][AR5416_EEP4K_NUM_2G_CAL_PIERS];
718 struct cal_target_power_leg
719 calTargetPowerCck[AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS];
720 struct cal_target_power_leg
721 calTargetPower2G[AR5416_EEP4K_NUM_2G_20_TARGET_POWERS];
722 struct cal_target_power_ht
723 calTargetPower2GHT20[AR5416_EEP4K_NUM_2G_20_TARGET_POWERS];
724 struct cal_target_power_ht
725 calTargetPower2GHT40[AR5416_EEP4K_NUM_2G_40_TARGET_POWERS];
726 u8 ctlIndex[AR5416_EEP4K_NUM_CTLS];
727 struct cal_ctl_data_4k ctlData[AR5416_EEP4K_NUM_CTLS];
728 u8 padding;
729} __packed;
730
604struct ar5416IniArray { 731struct ar5416IniArray {
605 u32 *ia_array; 732 u32 *ia_array;
606 u32 ia_rows; 733 u32 ia_rows;
@@ -668,9 +795,22 @@ struct hal_cal_list {
668 struct hal_cal_list *calNext; 795 struct hal_cal_list *calNext;
669}; 796};
670 797
798/*
799 * Enum to indentify the eeprom mappings
800 */
801enum hal_eep_map {
802 EEP_MAP_DEFAULT = 0x0,
803 EEP_MAP_4KBITS,
804 EEP_MAP_MAX
805};
806
807
671struct ath_hal_5416 { 808struct ath_hal_5416 {
672 struct ath_hal ah; 809 struct ath_hal ah;
673 struct ar5416_eeprom ah_eeprom; 810 union {
811 struct ar5416_eeprom_def def;
812 struct ar5416_eeprom_4k map4k;
813 } ah_eeprom;
674 struct ar5416Stats ah_stats; 814 struct ar5416Stats ah_stats;
675 struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES]; 815 struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES];
676 void __iomem *ah_cal_mem; 816 void __iomem *ah_cal_mem;
@@ -792,6 +932,10 @@ struct ath_hal_5416 {
792 struct ar5416IniArray ah_iniAddac; 932 struct ar5416IniArray ah_iniAddac;
793 struct ar5416IniArray ah_iniPcieSerdes; 933 struct ar5416IniArray ah_iniPcieSerdes;
794 struct ar5416IniArray ah_iniModesAdditional; 934 struct ar5416IniArray ah_iniModesAdditional;
935 struct ar5416IniArray ah_iniModesRxGain;
936 struct ar5416IniArray ah_iniModesTxGain;
937 /* To indicate EEPROM mapping used */
938 enum hal_eep_map ah_eep_map;
795}; 939};
796#define AH5416(_ah) ((struct ath_hal_5416 *)(_ah)) 940#define AH5416(_ah) ((struct ath_hal_5416 *)(_ah))
797 941
@@ -833,13 +977,20 @@ struct ath_hal_5416 {
833 (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200 977 (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
834#define AR5416_EEPROM_MAX 0xae0 978#define AR5416_EEPROM_MAX 0xae0
835#define ar5416_get_eep_ver(_ahp) \ 979#define ar5416_get_eep_ver(_ahp) \
836 (((_ahp)->ah_eeprom.baseEepHeader.version >> 12) & 0xF) 980 (((_ahp)->ah_eeprom.def.baseEepHeader.version >> 12) & 0xF)
837#define ar5416_get_eep_rev(_ahp) \ 981#define ar5416_get_eep_rev(_ahp) \
838 (((_ahp)->ah_eeprom.baseEepHeader.version) & 0xFFF) 982 (((_ahp)->ah_eeprom.def.baseEepHeader.version) & 0xFFF)
839#define ar5416_get_ntxchains(_txchainmask) \ 983#define ar5416_get_ntxchains(_txchainmask) \
840 (((_txchainmask >> 2) & 1) + \ 984 (((_txchainmask >> 2) & 1) + \
841 ((_txchainmask >> 1) & 1) + (_txchainmask & 1)) 985 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
842 986
987/* EEPROM 4K bit map definations */
988#define ar5416_get_eep4k_ver(_ahp) \
989 (((_ahp)->ah_eeprom.map4k.baseEepHeader.version >> 12) & 0xF)
990#define ar5416_get_eep4k_rev(_ahp) \
991 (((_ahp)->ah_eeprom.map4k.baseEepHeader.version) & 0xFFF)
992
993
843#ifdef __BIG_ENDIAN 994#ifdef __BIG_ENDIAN
844#define AR5416_EEPROM_MAGIC 0x5aa5 995#define AR5416_EEPROM_MAGIC 0x5aa5
845#else 996#else
@@ -923,7 +1074,7 @@ struct ath_hal_5416 {
923#define OFDM_PLCP_BITS_QUARTER 22 1074#define OFDM_PLCP_BITS_QUARTER 22
924#define OFDM_SYMBOL_TIME_QUARTER 16 1075#define OFDM_SYMBOL_TIME_QUARTER 16
925 1076
926u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp, 1077u32 ath9k_hw_get_eeprom(struct ath_hal *ah,
927 enum eeprom_param param); 1078 enum eeprom_param param);
928 1079
929#endif 1080#endif
diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath9k/initvals.h
index 3dd3815940a4..f3cfa16525e4 100644
--- a/drivers/net/wireless/ath9k/initvals.h
+++ b/drivers/net/wireless/ath9k/initvals.h
@@ -14,6 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17/* AR5416 to Fowl ar5146.ini */
17static const u32 ar5416Modes_9100[][6] = { 18static const u32 ar5416Modes_9100[][6] = {
18 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 19 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
19 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 20 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -31,17 +32,17 @@ static const u32 ar5416Modes_9100[][6] = {
31 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 32 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
32 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 33 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
33 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 34 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
34 { 0x00009850, 0x6de8b4e0, 0x6de8b4e0, 0x6de8b0de, 0x6de8b0de, 0x6de8b0de }, 35 { 0x00009850, 0x6c48b4e0, 0x6c48b4e0, 0x6c48b0de, 0x6c48b0de, 0x6c48b0de },
35 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, 36 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
36 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e }, 37 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
37 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 }, 38 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
38 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 39 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
39 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 }, 40 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
40 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, 41 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
41 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, 42 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
42 { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 }, 43 { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
43 { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b }, 44 { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
44 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 }, 45 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
45 { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, 46 { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
46 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, 47 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
47 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, 48 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
@@ -207,7 +208,7 @@ static const u32 ar5416Common_9100[][2] = {
207 { 0x00008134, 0x00000000 }, 208 { 0x00008134, 0x00000000 },
208 { 0x00008138, 0x00000000 }, 209 { 0x00008138, 0x00000000 },
209 { 0x0000813c, 0x00000000 }, 210 { 0x0000813c, 0x00000000 },
210 { 0x00008144, 0x00000000 }, 211 { 0x00008144, 0xffffffff },
211 { 0x00008168, 0x00000000 }, 212 { 0x00008168, 0x00000000 },
212 { 0x0000816c, 0x00000000 }, 213 { 0x0000816c, 0x00000000 },
213 { 0x00008170, 0x32143320 }, 214 { 0x00008170, 0x32143320 },
@@ -266,7 +267,7 @@ static const u32 ar5416Common_9100[][2] = {
266 { 0x0000832c, 0x00000007 }, 267 { 0x0000832c, 0x00000007 },
267 { 0x00008330, 0x00000302 }, 268 { 0x00008330, 0x00000302 },
268 { 0x00008334, 0x00000e00 }, 269 { 0x00008334, 0x00000e00 },
269 { 0x00008338, 0x00000000 }, 270 { 0x00008338, 0x00070000 },
270 { 0x0000833c, 0x00000000 }, 271 { 0x0000833c, 0x00000000 },
271 { 0x00008340, 0x000107ff }, 272 { 0x00008340, 0x000107ff },
272 { 0x00009808, 0x00000000 }, 273 { 0x00009808, 0x00000000 },
@@ -661,6 +662,7 @@ static const u32 ar5416Addac_9100[][2] = {
661 {0x000098c4, 0x00000000 }, 662 {0x000098c4, 0x00000000 },
662}; 663};
663 664
665/* ar5416 - howl ar5416_howl.ini */
664static const u32 ar5416Modes[][6] = { 666static const u32 ar5416Modes[][6] = {
665 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 667 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
666 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 668 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -952,7 +954,7 @@ static const u32 ar5416Common[][2] = {
952 { 0x0000994c, 0x00020028 }, 954 { 0x0000994c, 0x00020028 },
953 { 0x0000c95c, 0x004b6a8e }, 955 { 0x0000c95c, 0x004b6a8e },
954 { 0x0000c968, 0x000003ce }, 956 { 0x0000c968, 0x000003ce },
955 { 0x00009970, 0x190fb514 }, 957 { 0x00009970, 0x190fb515 },
956 { 0x00009974, 0x00000000 }, 958 { 0x00009974, 0x00000000 },
957 { 0x00009978, 0x00000001 }, 959 { 0x00009978, 0x00000001 },
958 { 0x0000997c, 0x00000000 }, 960 { 0x0000997c, 0x00000000 },
@@ -1311,7 +1313,7 @@ static const u32 ar5416Addac[][2] = {
1311 {0x000098cc, 0x00000000 }, 1313 {0x000098cc, 0x00000000 },
1312}; 1314};
1313 1315
1314 1316/* AR5416 9160 Sowl ar5416_sowl.ini */
1315static const u32 ar5416Modes_9160[][6] = { 1317static const u32 ar5416Modes_9160[][6] = {
1316 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 1318 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1317 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 1319 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -1329,21 +1331,22 @@ static const u32 ar5416Modes_9160[][6] = {
1329 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 1331 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1330 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 1332 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1331 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 1333 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1332 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 }, 1334 { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
1333 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, 1335 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
1334 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e }, 1336 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
1335 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, 1337 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
1336 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 1338 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
1337 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 }, 1339 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
1338 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, 1340 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
1339 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, 1341 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
1340 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, 1342 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
1341 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, 1343 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
1342 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 }, 1344 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
1343 { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, 1345 { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1344 { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, 1346 { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1345 { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, 1347 { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1346 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 }, 1348 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
1349 { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
1347 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 }, 1350 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
1348 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, 1351 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
1349 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, 1352 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
@@ -1505,7 +1508,7 @@ static const u32 ar5416Common_9160[][2] = {
1505 { 0x00008134, 0x00000000 }, 1508 { 0x00008134, 0x00000000 },
1506 { 0x00008138, 0x00000000 }, 1509 { 0x00008138, 0x00000000 },
1507 { 0x0000813c, 0x00000000 }, 1510 { 0x0000813c, 0x00000000 },
1508 { 0x00008144, 0x00000000 }, 1511 { 0x00008144, 0xffffffff },
1509 { 0x00008168, 0x00000000 }, 1512 { 0x00008168, 0x00000000 },
1510 { 0x0000816c, 0x00000000 }, 1513 { 0x0000816c, 0x00000000 },
1511 { 0x00008170, 0x32143320 }, 1514 { 0x00008170, 0x32143320 },
@@ -1564,7 +1567,7 @@ static const u32 ar5416Common_9160[][2] = {
1564 { 0x0000832c, 0x00000007 }, 1567 { 0x0000832c, 0x00000007 },
1565 { 0x00008330, 0x00000302 }, 1568 { 0x00008330, 0x00000302 },
1566 { 0x00008334, 0x00000e00 }, 1569 { 0x00008334, 0x00000e00 },
1567 { 0x00008338, 0x00000000 }, 1570 { 0x00008338, 0x00ff0000 },
1568 { 0x0000833c, 0x00000000 }, 1571 { 0x0000833c, 0x00000000 },
1569 { 0x00008340, 0x000107ff }, 1572 { 0x00008340, 0x000107ff },
1570 { 0x00009808, 0x00000000 }, 1573 { 0x00009808, 0x00000000 },
@@ -1597,7 +1600,6 @@ static const u32 ar5416Common_9160[][2] = {
1597 { 0x00009958, 0x2108ecff }, 1600 { 0x00009958, 0x2108ecff },
1598 { 0x00009940, 0x00750604 }, 1601 { 0x00009940, 0x00750604 },
1599 { 0x0000c95c, 0x004b6a8e }, 1602 { 0x0000c95c, 0x004b6a8e },
1600 { 0x0000c968, 0x000003ce },
1601 { 0x00009970, 0x190fb515 }, 1603 { 0x00009970, 0x190fb515 },
1602 { 0x00009974, 0x00000000 }, 1604 { 0x00009974, 0x00000000 },
1603 { 0x00009978, 0x00000001 }, 1605 { 0x00009978, 0x00000001 },
@@ -1699,7 +1701,7 @@ static const u32 ar5416Common_9160[][2] = {
1699 { 0x0000a244, 0x00007bb6 }, 1701 { 0x0000a244, 0x00007bb6 },
1700 { 0x0000a248, 0x0fff3ffc }, 1702 { 0x0000a248, 0x0fff3ffc },
1701 { 0x0000a24c, 0x00000001 }, 1703 { 0x0000a24c, 0x00000001 },
1702 { 0x0000a250, 0x0000a000 }, 1704 { 0x0000a250, 0x0000e000 },
1703 { 0x0000a254, 0x00000000 }, 1705 { 0x0000a254, 0x00000000 },
1704 { 0x0000a258, 0x0cc75380 }, 1706 { 0x0000a258, 0x0cc75380 },
1705 { 0x0000a25c, 0x0f0f0f01 }, 1707 { 0x0000a25c, 0x0f0f0f01 },
@@ -1719,7 +1721,7 @@ static const u32 ar5416Common_9160[][2] = {
1719 { 0x0000a34c, 0x3fffffff }, 1721 { 0x0000a34c, 0x3fffffff },
1720 { 0x0000a350, 0x3fffffff }, 1722 { 0x0000a350, 0x3fffffff },
1721 { 0x0000a354, 0x0003ffff }, 1723 { 0x0000a354, 0x0003ffff },
1722 { 0x0000a358, 0x79a8aa33 }, 1724 { 0x0000a358, 0x79bfaa03 },
1723 { 0x0000d35c, 0x07ffffef }, 1725 { 0x0000d35c, 0x07ffffef },
1724 { 0x0000d360, 0x0fffffe7 }, 1726 { 0x0000d360, 0x0fffffe7 },
1725 { 0x0000d364, 0x17ffffe5 }, 1727 { 0x0000d364, 0x17ffffe5 },
@@ -1842,7 +1844,6 @@ static const u32 ar5416Bank3_9160[][3] = {
1842}; 1844};
1843 1845
1844static const u32 ar5416Bank6_9160[][3] = { 1846static const u32 ar5416Bank6_9160[][3] = {
1845
1846 { 0x0000989c, 0x00000000, 0x00000000 }, 1847 { 0x0000989c, 0x00000000, 0x00000000 },
1847 { 0x0000989c, 0x00000000, 0x00000000 }, 1848 { 0x0000989c, 0x00000000, 0x00000000 },
1848 { 0x0000989c, 0x00000000, 0x00000000 }, 1849 { 0x0000989c, 0x00000000, 0x00000000 },
@@ -1920,7 +1921,6 @@ static const u32 ar5416Bank7_9160[][2] = {
1920 { 0x000098cc, 0x0000000e }, 1921 { 0x000098cc, 0x0000000e },
1921}; 1922};
1922 1923
1923
1924static u32 ar5416Addac_9160[][2] = { 1924static u32 ar5416Addac_9160[][2] = {
1925 {0x0000989c, 0x00000000 }, 1925 {0x0000989c, 0x00000000 },
1926 {0x0000989c, 0x00000000 }, 1926 {0x0000989c, 0x00000000 },
@@ -1956,7 +1956,6 @@ static u32 ar5416Addac_9160[][2] = {
1956 {0x000098cc, 0x00000000 }, 1956 {0x000098cc, 0x00000000 },
1957}; 1957};
1958 1958
1959
1960static u32 ar5416Addac_91601_1[][2] = { 1959static u32 ar5416Addac_91601_1[][2] = {
1961 {0x0000989c, 0x00000000 }, 1960 {0x0000989c, 0x00000000 },
1962 {0x0000989c, 0x00000000 }, 1961 {0x0000989c, 0x00000000 },
@@ -1992,8 +1991,7 @@ static u32 ar5416Addac_91601_1[][2] = {
1992 {0x000098cc, 0x00000000 }, 1991 {0x000098cc, 0x00000000 },
1993}; 1992};
1994 1993
1995 1994/* XXX 9280 1 */
1996
1997static const u32 ar9280Modes_9280[][6] = { 1995static const u32 ar9280Modes_9280[][6] = {
1998 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 1996 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1999 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 1997 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -2543,9 +2541,7 @@ static const u32 ar9280Common_9280[][2] = {
2543 { 0x00007898, 0x2a850160 }, 2541 { 0x00007898, 0x2a850160 },
2544}; 2542};
2545 2543
2546 2544/* XXX 9280 2 */
2547
2548
2549static const u32 ar9280Modes_9280_2[][6] = { 2545static const u32 ar9280Modes_9280_2[][6] = {
2550 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 2546 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
2551 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 2547 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -2560,26 +2556,24 @@ static const u32 ar9280Modes_9280_2[][6] = {
2560 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 2556 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
2561 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 2557 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2562 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, 2558 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
2563 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a022e, 0x206a022e, 0x206a022e }, 2559 { 0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e },
2564 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, 2560 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
2565 { 0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 }, 2561 { 0x00009850, 0x6c4000e2, 0x6c4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 },
2566 { 0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 }, 2562 { 0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
2567 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, 2563 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x3139605e, 0x31395d5e, 0x31395d5e },
2568 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e },
2569 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
2570 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, 2564 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
2571 { 0x0000c864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 2565 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
2572 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, 2566 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
2573 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, 2567 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
2574 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, 2568 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
2575 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, 2569 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
2576 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, 2570 { 0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
2577 { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 }, 2571 { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010 },
2578 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, 2572 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2579 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, 2573 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2580 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 }, 2574 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
2581 { 0x0000c9b8, 0x0000000f, 0x0000000f, 0x0000001c, 0x0000001c, 0x0000001c }, 2575 { 0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c },
2582 { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, 2576 { 0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00 },
2583 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, 2577 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
2584 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, 2578 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
2585 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, 2579 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
@@ -2587,6 +2581,516 @@ static const u32 ar9280Modes_9280_2[][6] = {
2587 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, 2581 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
2588 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2582 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2589 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2583 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2584 { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 },
2585 { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
2586 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
2587 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
2588 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
2589 { 0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000 },
2590 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
2591 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
2592 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2593 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 },
2594};
2595
2596static const u32 ar9280Common_9280_2[][2] = {
2597 { 0x0000000c, 0x00000000 },
2598 { 0x00000030, 0x00020015 },
2599 { 0x00000034, 0x00000005 },
2600 { 0x00000040, 0x00000000 },
2601 { 0x00000044, 0x00000008 },
2602 { 0x00000048, 0x00000008 },
2603 { 0x0000004c, 0x00000010 },
2604 { 0x00000050, 0x00000000 },
2605 { 0x00000054, 0x0000001f },
2606 { 0x00000800, 0x00000000 },
2607 { 0x00000804, 0x00000000 },
2608 { 0x00000808, 0x00000000 },
2609 { 0x0000080c, 0x00000000 },
2610 { 0x00000810, 0x00000000 },
2611 { 0x00000814, 0x00000000 },
2612 { 0x00000818, 0x00000000 },
2613 { 0x0000081c, 0x00000000 },
2614 { 0x00000820, 0x00000000 },
2615 { 0x00000824, 0x00000000 },
2616 { 0x00001040, 0x002ffc0f },
2617 { 0x00001044, 0x002ffc0f },
2618 { 0x00001048, 0x002ffc0f },
2619 { 0x0000104c, 0x002ffc0f },
2620 { 0x00001050, 0x002ffc0f },
2621 { 0x00001054, 0x002ffc0f },
2622 { 0x00001058, 0x002ffc0f },
2623 { 0x0000105c, 0x002ffc0f },
2624 { 0x00001060, 0x002ffc0f },
2625 { 0x00001064, 0x002ffc0f },
2626 { 0x00001230, 0x00000000 },
2627 { 0x00001270, 0x00000000 },
2628 { 0x00001038, 0x00000000 },
2629 { 0x00001078, 0x00000000 },
2630 { 0x000010b8, 0x00000000 },
2631 { 0x000010f8, 0x00000000 },
2632 { 0x00001138, 0x00000000 },
2633 { 0x00001178, 0x00000000 },
2634 { 0x000011b8, 0x00000000 },
2635 { 0x000011f8, 0x00000000 },
2636 { 0x00001238, 0x00000000 },
2637 { 0x00001278, 0x00000000 },
2638 { 0x000012b8, 0x00000000 },
2639 { 0x000012f8, 0x00000000 },
2640 { 0x00001338, 0x00000000 },
2641 { 0x00001378, 0x00000000 },
2642 { 0x000013b8, 0x00000000 },
2643 { 0x000013f8, 0x00000000 },
2644 { 0x00001438, 0x00000000 },
2645 { 0x00001478, 0x00000000 },
2646 { 0x000014b8, 0x00000000 },
2647 { 0x000014f8, 0x00000000 },
2648 { 0x00001538, 0x00000000 },
2649 { 0x00001578, 0x00000000 },
2650 { 0x000015b8, 0x00000000 },
2651 { 0x000015f8, 0x00000000 },
2652 { 0x00001638, 0x00000000 },
2653 { 0x00001678, 0x00000000 },
2654 { 0x000016b8, 0x00000000 },
2655 { 0x000016f8, 0x00000000 },
2656 { 0x00001738, 0x00000000 },
2657 { 0x00001778, 0x00000000 },
2658 { 0x000017b8, 0x00000000 },
2659 { 0x000017f8, 0x00000000 },
2660 { 0x0000103c, 0x00000000 },
2661 { 0x0000107c, 0x00000000 },
2662 { 0x000010bc, 0x00000000 },
2663 { 0x000010fc, 0x00000000 },
2664 { 0x0000113c, 0x00000000 },
2665 { 0x0000117c, 0x00000000 },
2666 { 0x000011bc, 0x00000000 },
2667 { 0x000011fc, 0x00000000 },
2668 { 0x0000123c, 0x00000000 },
2669 { 0x0000127c, 0x00000000 },
2670 { 0x000012bc, 0x00000000 },
2671 { 0x000012fc, 0x00000000 },
2672 { 0x0000133c, 0x00000000 },
2673 { 0x0000137c, 0x00000000 },
2674 { 0x000013bc, 0x00000000 },
2675 { 0x000013fc, 0x00000000 },
2676 { 0x0000143c, 0x00000000 },
2677 { 0x0000147c, 0x00000000 },
2678 { 0x00004030, 0x00000002 },
2679 { 0x0000403c, 0x00000002 },
2680 { 0x00004024, 0x0000001f },
2681 { 0x00004060, 0x00000000 },
2682 { 0x00004064, 0x00000000 },
2683 { 0x00007010, 0x00000033 },
2684 { 0x00007034, 0x00000002 },
2685 { 0x00007038, 0x000004c2 },
2686 { 0x00008004, 0x00000000 },
2687 { 0x00008008, 0x00000000 },
2688 { 0x0000800c, 0x00000000 },
2689 { 0x00008018, 0x00000700 },
2690 { 0x00008020, 0x00000000 },
2691 { 0x00008038, 0x00000000 },
2692 { 0x0000803c, 0x00000000 },
2693 { 0x00008048, 0x40000000 },
2694 { 0x00008054, 0x00000000 },
2695 { 0x00008058, 0x00000000 },
2696 { 0x0000805c, 0x000fc78f },
2697 { 0x00008060, 0x0000000f },
2698 { 0x00008064, 0x00000000 },
2699 { 0x00008070, 0x00000000 },
2700 { 0x000080c0, 0x2a80001a },
2701 { 0x000080c4, 0x05dc01e0 },
2702 { 0x000080c8, 0x1f402710 },
2703 { 0x000080cc, 0x01f40000 },
2704 { 0x000080d0, 0x00001e00 },
2705 { 0x000080d4, 0x00000000 },
2706 { 0x000080d8, 0x00400000 },
2707 { 0x000080e0, 0xffffffff },
2708 { 0x000080e4, 0x0000ffff },
2709 { 0x000080e8, 0x003f3f3f },
2710 { 0x000080ec, 0x00000000 },
2711 { 0x000080f0, 0x00000000 },
2712 { 0x000080f4, 0x00000000 },
2713 { 0x000080f8, 0x00000000 },
2714 { 0x000080fc, 0x00020000 },
2715 { 0x00008100, 0x00020000 },
2716 { 0x00008104, 0x00000001 },
2717 { 0x00008108, 0x00000052 },
2718 { 0x0000810c, 0x00000000 },
2719 { 0x00008110, 0x00000168 },
2720 { 0x00008118, 0x000100aa },
2721 { 0x0000811c, 0x00003210 },
2722 { 0x00008120, 0x08f04800 },
2723 { 0x00008124, 0x00000000 },
2724 { 0x00008128, 0x00000000 },
2725 { 0x0000812c, 0x00000000 },
2726 { 0x00008130, 0x00000000 },
2727 { 0x00008134, 0x00000000 },
2728 { 0x00008138, 0x00000000 },
2729 { 0x0000813c, 0x00000000 },
2730 { 0x00008144, 0xffffffff },
2731 { 0x00008168, 0x00000000 },
2732 { 0x0000816c, 0x00000000 },
2733 { 0x00008170, 0x32143320 },
2734 { 0x00008174, 0xfaa4fa50 },
2735 { 0x00008178, 0x00000100 },
2736 { 0x0000817c, 0x00000000 },
2737 { 0x000081c0, 0x00000000 },
2738 { 0x000081d0, 0x00003210 },
2739 { 0x000081ec, 0x00000000 },
2740 { 0x000081f0, 0x00000000 },
2741 { 0x000081f4, 0x00000000 },
2742 { 0x000081f8, 0x00000000 },
2743 { 0x000081fc, 0x00000000 },
2744 { 0x00008200, 0x00000000 },
2745 { 0x00008204, 0x00000000 },
2746 { 0x00008208, 0x00000000 },
2747 { 0x0000820c, 0x00000000 },
2748 { 0x00008210, 0x00000000 },
2749 { 0x00008214, 0x00000000 },
2750 { 0x00008218, 0x00000000 },
2751 { 0x0000821c, 0x00000000 },
2752 { 0x00008220, 0x00000000 },
2753 { 0x00008224, 0x00000000 },
2754 { 0x00008228, 0x00000000 },
2755 { 0x0000822c, 0x00000000 },
2756 { 0x00008230, 0x00000000 },
2757 { 0x00008234, 0x00000000 },
2758 { 0x00008238, 0x00000000 },
2759 { 0x0000823c, 0x00000000 },
2760 { 0x00008240, 0x00100000 },
2761 { 0x00008244, 0x0010f400 },
2762 { 0x00008248, 0x00000100 },
2763 { 0x0000824c, 0x0001e800 },
2764 { 0x00008250, 0x00000000 },
2765 { 0x00008254, 0x00000000 },
2766 { 0x00008258, 0x00000000 },
2767 { 0x0000825c, 0x400000ff },
2768 { 0x00008260, 0x00080922 },
2769 { 0x00008264, 0xa8a00010 },
2770 { 0x00008270, 0x00000000 },
2771 { 0x00008274, 0x40000000 },
2772 { 0x00008278, 0x003e4180 },
2773 { 0x0000827c, 0x00000000 },
2774 { 0x00008284, 0x0000002c },
2775 { 0x00008288, 0x0000002c },
2776 { 0x0000828c, 0x00000000 },
2777 { 0x00008294, 0x00000000 },
2778 { 0x00008298, 0x00000000 },
2779 { 0x0000829c, 0x00000000 },
2780 { 0x00008300, 0x00000040 },
2781 { 0x00008314, 0x00000000 },
2782 { 0x00008328, 0x00000000 },
2783 { 0x0000832c, 0x00000007 },
2784 { 0x00008330, 0x00000302 },
2785 { 0x00008334, 0x00000e00 },
2786 { 0x00008338, 0x00ff0000 },
2787 { 0x0000833c, 0x00000000 },
2788 { 0x00008340, 0x000107ff },
2789 { 0x00008344, 0x00581043 },
2790 { 0x00009808, 0x00000000 },
2791 { 0x0000980c, 0xafa68e30 },
2792 { 0x00009810, 0xfd14e000 },
2793 { 0x00009814, 0x9c0a9f6b },
2794 { 0x0000981c, 0x00000000 },
2795 { 0x0000982c, 0x0000a000 },
2796 { 0x00009830, 0x00000000 },
2797 { 0x0000983c, 0x00200400 },
2798 { 0x0000984c, 0x0040233c },
2799 { 0x0000a84c, 0x0040233c },
2800 { 0x00009854, 0x00000044 },
2801 { 0x00009900, 0x00000000 },
2802 { 0x00009904, 0x00000000 },
2803 { 0x00009908, 0x00000000 },
2804 { 0x0000990c, 0x00000000 },
2805 { 0x00009910, 0x01002310 },
2806 { 0x0000991c, 0x10000fff },
2807 { 0x00009920, 0x04900000 },
2808 { 0x0000a920, 0x04900000 },
2809 { 0x00009928, 0x00000001 },
2810 { 0x0000992c, 0x00000004 },
2811 { 0x00009934, 0x1e1f2022 },
2812 { 0x00009938, 0x0a0b0c0d },
2813 { 0x0000993c, 0x00000000 },
2814 { 0x00009948, 0x9280c00a },
2815 { 0x0000994c, 0x00020028 },
2816 { 0x00009954, 0x5f3ca3de },
2817 { 0x00009958, 0x2108ecff },
2818 { 0x00009940, 0x14750604 },
2819 { 0x0000c95c, 0x004b6a8e },
2820 { 0x00009968, 0x000003ce },
2821 { 0x00009970, 0x190fb515 },
2822 { 0x00009974, 0x00000000 },
2823 { 0x00009978, 0x00000001 },
2824 { 0x0000997c, 0x00000000 },
2825 { 0x00009980, 0x00000000 },
2826 { 0x00009984, 0x00000000 },
2827 { 0x00009988, 0x00000000 },
2828 { 0x0000998c, 0x00000000 },
2829 { 0x00009990, 0x00000000 },
2830 { 0x00009994, 0x00000000 },
2831 { 0x00009998, 0x00000000 },
2832 { 0x0000999c, 0x00000000 },
2833 { 0x000099a0, 0x00000000 },
2834 { 0x000099a4, 0x00000001 },
2835 { 0x000099a8, 0x201fff00 },
2836 { 0x000099ac, 0x006f0000 },
2837 { 0x000099b0, 0x03051000 },
2838 { 0x000099b4, 0x00000820 },
2839 { 0x000099dc, 0x00000000 },
2840 { 0x000099e0, 0x00000000 },
2841 { 0x000099e4, 0xaaaaaaaa },
2842 { 0x000099e8, 0x3c466478 },
2843 { 0x000099ec, 0x0cc80caa },
2844 { 0x000099f0, 0x00000000 },
2845 { 0x000099fc, 0x00001042 },
2846 { 0x0000a208, 0x803e4788 },
2847 { 0x0000a210, 0x4080a333 },
2848 { 0x0000a214, 0x40206c10 },
2849 { 0x0000a218, 0x009c4060 },
2850 { 0x0000a220, 0x01834061 },
2851 { 0x0000a224, 0x00000400 },
2852 { 0x0000a228, 0x000003b5 },
2853 { 0x0000a22c, 0x233f7180 },
2854 { 0x0000a234, 0x20202020 },
2855 { 0x0000a238, 0x20202020 },
2856 { 0x0000a23c, 0x13c88000 },
2857 { 0x0000a240, 0x38490a20 },
2858 { 0x0000a244, 0x00007bb6 },
2859 { 0x0000a248, 0x0fff3ffc },
2860 { 0x0000a24c, 0x00000000 },
2861 { 0x0000a254, 0x00000000 },
2862 { 0x0000a258, 0x0cdbd380 },
2863 { 0x0000a25c, 0x0f0f0f01 },
2864 { 0x0000a260, 0xdfa91f01 },
2865 { 0x0000a268, 0x00000000 },
2866 { 0x0000a26c, 0x0ebae9c6 },
2867 { 0x0000b26c, 0x0ebae9c6 },
2868 { 0x0000d270, 0x00820820 },
2869 { 0x0000a278, 0x1ce739ce },
2870 { 0x0000d35c, 0x07ffffef },
2871 { 0x0000d360, 0x0fffffe7 },
2872 { 0x0000d364, 0x17ffffe5 },
2873 { 0x0000d368, 0x1fffffe4 },
2874 { 0x0000d36c, 0x37ffffe3 },
2875 { 0x0000d370, 0x3fffffe3 },
2876 { 0x0000d374, 0x57ffffe3 },
2877 { 0x0000d378, 0x5fffffe2 },
2878 { 0x0000d37c, 0x7fffffe2 },
2879 { 0x0000d380, 0x7f3c7bba },
2880 { 0x0000d384, 0xf3307ff0 },
2881 { 0x0000a388, 0x0c000000 },
2882 { 0x0000a38c, 0x20202020 },
2883 { 0x0000a390, 0x20202020 },
2884 { 0x0000a394, 0x1ce739ce },
2885 { 0x0000a398, 0x000001ce },
2886 { 0x0000a39c, 0x00000001 },
2887 { 0x0000a3a0, 0x00000000 },
2888 { 0x0000a3a4, 0x00000000 },
2889 { 0x0000a3a8, 0x00000000 },
2890 { 0x0000a3ac, 0x00000000 },
2891 { 0x0000a3b0, 0x00000000 },
2892 { 0x0000a3b4, 0x00000000 },
2893 { 0x0000a3b8, 0x00000000 },
2894 { 0x0000a3bc, 0x00000000 },
2895 { 0x0000a3c0, 0x00000000 },
2896 { 0x0000a3c4, 0x00000000 },
2897 { 0x0000a3c8, 0x00000246 },
2898 { 0x0000a3cc, 0x20202020 },
2899 { 0x0000a3d0, 0x20202020 },
2900 { 0x0000a3d4, 0x20202020 },
2901 { 0x0000a3dc, 0x1ce739ce },
2902 { 0x0000a3e0, 0x000001ce },
2903 { 0x0000a3e4, 0x00000000 },
2904 { 0x0000a3e8, 0x18c43433 },
2905 { 0x0000a3ec, 0x00f70081 },
2906 { 0x00007800, 0x00040000 },
2907 { 0x00007804, 0xdb005012 },
2908 { 0x00007808, 0x04924914 },
2909 { 0x0000780c, 0x21084210 },
2910 { 0x00007810, 0x6d801300 },
2911 { 0x00007818, 0x07e41000 },
2912 { 0x0000781c, 0x00392000 },
2913 { 0x00007820, 0x92592480 },
2914 { 0x00007824, 0x00040000 },
2915 { 0x00007828, 0xdb005012 },
2916 { 0x0000782c, 0x04924914 },
2917 { 0x00007830, 0x21084210 },
2918 { 0x00007834, 0x6d801300 },
2919 { 0x0000783c, 0x07e40000 },
2920 { 0x00007840, 0x00392000 },
2921 { 0x00007844, 0x92592480 },
2922 { 0x00007848, 0x00100000 },
2923 { 0x0000784c, 0x773f0567 },
2924 { 0x00007850, 0x54214514 },
2925 { 0x00007854, 0x12035828 },
2926 { 0x00007858, 0x9259269a },
2927 { 0x00007860, 0x52802000 },
2928 { 0x00007864, 0x0a8e370e },
2929 { 0x00007868, 0xc0102850 },
2930 { 0x0000786c, 0x812d4000 },
2931 { 0x00007870, 0x807ec400 },
2932 { 0x00007874, 0x001b6db0 },
2933 { 0x00007878, 0x00376b63 },
2934 { 0x0000787c, 0x06db6db6 },
2935 { 0x00007880, 0x006d8000 },
2936 { 0x00007884, 0xffeffffe },
2937 { 0x00007888, 0xffeffffe },
2938 { 0x0000788c, 0x00010000 },
2939 { 0x00007890, 0x02060aeb },
2940 { 0x00007898, 0x2a850160 },
2941};
2942
2943static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
2944 { 0x00001030, 0x00000268, 0x000004d0 },
2945 { 0x00001070, 0x0000018c, 0x00000318 },
2946 { 0x000010b0, 0x00000fd0, 0x00001fa0 },
2947 { 0x00008014, 0x044c044c, 0x08980898 },
2948 { 0x0000801c, 0x148ec02b, 0x148ec057 },
2949 { 0x00008318, 0x000044c0, 0x00008980 },
2950 { 0x00009820, 0x02020200, 0x02020200 },
2951 { 0x00009824, 0x00000f0f, 0x00000f0f },
2952 { 0x00009828, 0x0b020001, 0x0b020001 },
2953 { 0x00009834, 0x00000f0f, 0x00000f0f },
2954 { 0x00009844, 0x03721821, 0x03721821 },
2955 { 0x00009914, 0x00000898, 0x00001130 },
2956 { 0x00009918, 0x0000000b, 0x00000016 },
2957 { 0x00009944, 0xdfbc1210, 0xdfbc1210 },
2958};
2959
2960static const u32 ar9280Modes_backoff_23db_rxgain_9280_2[][6] = {
2961 { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 },
2962 { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 },
2963 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 },
2964 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 },
2965 { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c },
2966 { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 },
2967 { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 },
2968 { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 },
2969 { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c },
2970 { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 },
2971 { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 },
2972 { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 },
2973 { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c },
2974 { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 },
2975 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 },
2976 { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 },
2977 { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c },
2978 { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 },
2979 { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 },
2980 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 },
2981 { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 },
2982 { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 },
2983 { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c },
2984 { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 },
2985 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 },
2986 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 },
2987 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c },
2988 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 },
2989 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 },
2990 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 },
2991 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 },
2992 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 },
2993 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 },
2994 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 },
2995 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 },
2996 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c },
2997 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 },
2998 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 },
2999 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 },
3000 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 },
3001 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 },
3002 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c },
3003 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 },
3004 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 },
3005 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 },
3006 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 },
3007 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 },
3008 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c },
3009 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b10, 0x00008b10, 0x00008b10 },
3010 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b14, 0x00008b14, 0x00008b14 },
3011 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b01, 0x00008b01, 0x00008b01 },
3012 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b05, 0x00008b05, 0x00008b05 },
3013 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b09, 0x00008b09, 0x00008b09 },
3014 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008b0d, 0x00008b0d, 0x00008b0d },
3015 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008b11, 0x00008b11, 0x00008b11 },
3016 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008b15, 0x00008b15, 0x00008b15 },
3017 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008b02, 0x00008b02, 0x00008b02 },
3018 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008b06, 0x00008b06, 0x00008b06 },
3019 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x00008b0a, 0x00008b0a, 0x00008b0a },
3020 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00008b0e, 0x00008b0e, 0x00008b0e },
3021 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00008b12, 0x00008b12, 0x00008b12 },
3022 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00008b16, 0x00008b16, 0x00008b16 },
3023 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00008b03, 0x00008b03, 0x00008b03 },
3024 { 0x00009afc, 0x0000b794, 0x0000b794, 0x00008b07, 0x00008b07, 0x00008b07 },
3025 { 0x00009b00, 0x0000b798, 0x0000b798, 0x00008b0b, 0x00008b0b, 0x00008b0b },
3026 { 0x00009b04, 0x0000d784, 0x0000d784, 0x00008b0f, 0x00008b0f, 0x00008b0f },
3027 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00008b13, 0x00008b13, 0x00008b13 },
3028 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00008b17, 0x00008b17, 0x00008b17 },
3029 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00008b23, 0x00008b23, 0x00008b23 },
3030 { 0x00009b14, 0x0000f780, 0x0000f780, 0x00008b27, 0x00008b27, 0x00008b27 },
3031 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00008b2b, 0x00008b2b, 0x00008b2b },
3032 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x00008b2f, 0x00008b2f, 0x00008b2f },
3033 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x00008b33, 0x00008b33, 0x00008b33 },
3034 { 0x00009b24, 0x0000f790, 0x0000f790, 0x00008b37, 0x00008b37, 0x00008b37 },
3035 { 0x00009b28, 0x0000f794, 0x0000f794, 0x00008b43, 0x00008b43, 0x00008b43 },
3036 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x00008b47, 0x00008b47, 0x00008b47 },
3037 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00008b4b, 0x00008b4b, 0x00008b4b },
3038 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00008b4f, 0x00008b4f, 0x00008b4f },
3039 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00008b53, 0x00008b53, 0x00008b53 },
3040 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00008b57, 0x00008b57, 0x00008b57 },
3041 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3042 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3043 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3044 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3045 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3046 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3047 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3048 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3049 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3050 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3051 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3052 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3053 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3054 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3055 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3056 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3057 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3058 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3059 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3060 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3061 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3062 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3063 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3064 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3065 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3066 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3067 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3068 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3069 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3070 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3071 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3072 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3073 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3074 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3075 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3076 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3077 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3078 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3079 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3080 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3081 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3082 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3083 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3084 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3085 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3086 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3087 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3088 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x00008b5b, 0x00008b5b, 0x00008b5b },
3089 { 0x00009848, 0x00001066, 0x00001066, 0x00001050, 0x00001050, 0x00001050 },
3090 { 0x0000a848, 0x00001066, 0x00001066, 0x00001050, 0x00001050, 0x00001050 },
3091};
3092
3093static const u32 ar9280Modes_original_rxgain_9280_2[][6] = {
2590 { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 }, 3094 { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 },
2591 { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 }, 3095 { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 },
2592 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 }, 3096 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 },
@@ -2715,14 +3219,172 @@ static const u32 ar9280Modes_9280_2[][6] = {
2715 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 3219 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2716 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 3220 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2717 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, 3221 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db },
2718 { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 }, 3222 { 0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 },
2719 { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 }, 3223 { 0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 },
2720 { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, 3224};
2721 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, 3225
2722 { 0x0000a21c, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a }, 3226static const u32 ar9280Modes_backoff_13db_rxgain_9280_2[][6] = {
2723 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 3227 { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 },
2724 { 0x0000a250, 0x001ff000, 0x001ff000, 0x001da000, 0x001da000, 0x001da000 }, 3228 { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 },
2725 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 }, 3229 { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 },
3230 { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 },
3231 { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c },
3232 { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 },
3233 { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 },
3234 { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 },
3235 { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c },
3236 { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 },
3237 { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 },
3238 { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 },
3239 { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c },
3240 { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 },
3241 { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 },
3242 { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 },
3243 { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c },
3244 { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 },
3245 { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 },
3246 { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 },
3247 { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 },
3248 { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 },
3249 { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c },
3250 { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 },
3251 { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 },
3252 { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 },
3253 { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c },
3254 { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 },
3255 { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 },
3256 { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 },
3257 { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 },
3258 { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 },
3259 { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 },
3260 { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 },
3261 { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 },
3262 { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c },
3263 { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 },
3264 { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 },
3265 { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 },
3266 { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 },
3267 { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 },
3268 { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c },
3269 { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 },
3270 { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 },
3271 { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 },
3272 { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 },
3273 { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 },
3274 { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c },
3275 { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 },
3276 { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 },
3277 { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 },
3278 { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c },
3279 { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 },
3280 { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 },
3281 { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 },
3282 { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 },
3283 { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c },
3284 { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 },
3285 { 0x00009ae8, 0x0000b780, 0x0000b780, 0x00009310, 0x00009310, 0x00009310 },
3286 { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009314, 0x00009314, 0x00009314 },
3287 { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009320, 0x00009320, 0x00009320 },
3288 { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009324, 0x00009324, 0x00009324 },
3289 { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009328, 0x00009328, 0x00009328 },
3290 { 0x00009afc, 0x0000b794, 0x0000b794, 0x0000932c, 0x0000932c, 0x0000932c },
3291 { 0x00009b00, 0x0000b798, 0x0000b798, 0x00009330, 0x00009330, 0x00009330 },
3292 { 0x00009b04, 0x0000d784, 0x0000d784, 0x00009334, 0x00009334, 0x00009334 },
3293 { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009321, 0x00009321, 0x00009321 },
3294 { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009325, 0x00009325, 0x00009325 },
3295 { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009329, 0x00009329, 0x00009329 },
3296 { 0x00009b14, 0x0000f780, 0x0000f780, 0x0000932d, 0x0000932d, 0x0000932d },
3297 { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009331, 0x00009331, 0x00009331 },
3298 { 0x00009b1c, 0x0000f788, 0x0000f788, 0x00009335, 0x00009335, 0x00009335 },
3299 { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x00009322, 0x00009322, 0x00009322 },
3300 { 0x00009b24, 0x0000f790, 0x0000f790, 0x00009326, 0x00009326, 0x00009326 },
3301 { 0x00009b28, 0x0000f794, 0x0000f794, 0x0000932a, 0x0000932a, 0x0000932a },
3302 { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x0000932e, 0x0000932e, 0x0000932e },
3303 { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x00009332, 0x00009332, 0x00009332 },
3304 { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x00009336, 0x00009336, 0x00009336 },
3305 { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x00009323, 0x00009323, 0x00009323 },
3306 { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x00009327, 0x00009327, 0x00009327 },
3307 { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x0000932b, 0x0000932b, 0x0000932b },
3308 { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x0000932f, 0x0000932f, 0x0000932f },
3309 { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x00009333, 0x00009333, 0x00009333 },
3310 { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x00009337, 0x00009337, 0x00009337 },
3311 { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x00009343, 0x00009343, 0x00009343 },
3312 { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x00009347, 0x00009347, 0x00009347 },
3313 { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x0000934b, 0x0000934b, 0x0000934b },
3314 { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x0000934f, 0x0000934f, 0x0000934f },
3315 { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x00009353, 0x00009353, 0x00009353 },
3316 { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x00009357, 0x00009357, 0x00009357 },
3317 { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x0000935b, 0x0000935b, 0x0000935b },
3318 { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x0000935b, 0x0000935b, 0x0000935b },
3319 { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x0000935b, 0x0000935b, 0x0000935b },
3320 { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x0000935b, 0x0000935b, 0x0000935b },
3321 { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x0000935b, 0x0000935b, 0x0000935b },
3322 { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x0000935b, 0x0000935b, 0x0000935b },
3323 { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x0000935b, 0x0000935b, 0x0000935b },
3324 { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x0000935b, 0x0000935b, 0x0000935b },
3325 { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x0000935b, 0x0000935b, 0x0000935b },
3326 { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x0000935b, 0x0000935b, 0x0000935b },
3327 { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x0000935b, 0x0000935b, 0x0000935b },
3328 { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x0000935b, 0x0000935b, 0x0000935b },
3329 { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3330 { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3331 { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3332 { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3333 { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3334 { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3335 { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3336 { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3337 { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3338 { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3339 { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3340 { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3341 { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3342 { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3343 { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3344 { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3345 { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3346 { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3347 { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3348 { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3349 { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3350 { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3351 { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3352 { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3353 { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3354 { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x0000935b, 0x0000935b, 0x0000935b },
3355 { 0x00009848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a },
3356 { 0x0000a848, 0x00001066, 0x00001066, 0x0000105a, 0x0000105a, 0x0000105a },
3357};
3358
3359static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = {
3360 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3361 { 0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002 },
3362 { 0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008 },
3363 { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000c010, 0x0000c010, 0x0000c010 },
3364 { 0x0000a310, 0x0000e012, 0x0000e012, 0x00010012, 0x00010012, 0x00010012 },
3365 { 0x0000a314, 0x00011014, 0x00011014, 0x00013014, 0x00013014, 0x00013014 },
3366 { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001820a, 0x0001820a, 0x0001820a },
3367 { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001b211, 0x0001b211, 0x0001b211 },
3368 { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 },
3369 { 0x0000a324, 0x00020092, 0x00020092, 0x00022411, 0x00022411, 0x00022411 },
3370 { 0x0000a328, 0x0002410a, 0x0002410a, 0x00025413, 0x00025413, 0x00025413 },
3371 { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00029811, 0x00029811, 0x00029811 },
3372 { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002c813, 0x0002c813, 0x0002c813 },
3373 { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030a14, 0x00030a14, 0x00030a14 },
3374 { 0x0000a338, 0x000321ec, 0x000321ec, 0x00035a50, 0x00035a50, 0x00035a50 },
3375 { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00039c4c, 0x00039c4c, 0x00039c4c },
3376 { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003de8a, 0x0003de8a, 0x0003de8a },
3377 { 0x0000a344, 0x000321ec, 0x000321ec, 0x00042e92, 0x00042e92, 0x00042e92 },
3378 { 0x0000a348, 0x000321ec, 0x000321ec, 0x00046ed2, 0x00046ed2, 0x00046ed2 },
3379 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x0004bed5, 0x0004bed5, 0x0004bed5 },
3380 { 0x0000a350, 0x000321ec, 0x000321ec, 0x0004ff54, 0x0004ff54, 0x0004ff54 },
3381 { 0x0000a354, 0x000321ec, 0x000321ec, 0x00053fd5, 0x00053fd5, 0x00053fd5 },
3382 { 0x00007814, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff },
3383 { 0x00007838, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff, 0x00198eff },
3384 { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce },
3385};
3386
3387static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = {
2726 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3388 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2727 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 }, 3389 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
2728 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 }, 3390 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
@@ -2745,14 +3407,363 @@ static const u32 ar9280Modes_9280_2[][6] = {
2745 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a }, 3407 { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a },
2746 { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 }, 3408 { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 },
2747 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 }, 3409 { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 },
3410 { 0x00007814, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff },
3411 { 0x00007838, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff, 0x0019beff },
3412 { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce },
3413};
3414
3415static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
3416 {0x00004040, 0x9248fd00 },
3417 {0x00004040, 0x24924924 },
3418 {0x00004040, 0xa8000019 },
3419 {0x00004040, 0x13160820 },
3420 {0x00004040, 0xe5980560 },
3421 {0x00004040, 0xc01dcffc },
3422 {0x00004040, 0x1aaabe41 },
3423 {0x00004040, 0xbe105554 },
3424 {0x00004040, 0x00043007 },
3425 {0x00004044, 0x00000000 },
3426};
3427
3428static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
3429 {0x00004040, 0x9248fd00 },
3430 {0x00004040, 0x24924924 },
3431 {0x00004040, 0xa8000019 },
3432 {0x00004040, 0x13160820 },
3433 {0x00004040, 0xe5980560 },
3434 {0x00004040, 0xc01dcffd },
3435 {0x00004040, 0x1aaabe41 },
3436 {0x00004040, 0xbe105554 },
3437 {0x00004040, 0x00043007 },
3438 {0x00004044, 0x00000000 },
3439};
3440
3441/* AR9285 */
3442static const u_int32_t ar9285Modes_9285[][6] = {
3443 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
3444 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
3445 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
3446 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 },
3447 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
3448 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
3449 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
3450 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
3451 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
3452 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
3453 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
3454 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
3455 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
3456 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e },
3457 { 0x00009844, 0x0372161e, 0x0372161e, 0x03720020, 0x03720020, 0x037216a0 },
3458 { 0x00009848, 0x00001066, 0x00001066, 0x0000004e, 0x0000004e, 0x00001059 },
3459 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
3460 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
3461 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3136605e, 0x3136605e, 0x3139605e },
3462 { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18 },
3463 { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
3464 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
3465 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
3466 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
3467 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
3468 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
3469 { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1020, 0xdfbc1020, 0xdfbc1010 },
3470 { 0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3471 { 0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3472 { 0x000099b8, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c, 0x00cf4d1c },
3473 { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
3474 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
3475 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
3476 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
3477 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
3478 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
3479 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3480 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3481 { 0x00009a00, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 },
3482 { 0x00009a04, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 },
3483 { 0x00009a08, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 },
3484 { 0x00009a0c, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 },
3485 { 0x00009a10, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 },
3486 { 0x00009a14, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 },
3487 { 0x00009a18, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 },
3488 { 0x00009a1c, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 },
3489 { 0x00009a20, 0x00000000, 0x00000000, 0x00068114, 0x00068114, 0x00000000 },
3490 { 0x00009a24, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 },
3491 { 0x00009a28, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 },
3492 { 0x00009a2c, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 },
3493 { 0x00009a30, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 },
3494 { 0x00009a34, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 },
3495 { 0x00009a38, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 },
3496 { 0x00009a3c, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 },
3497 { 0x00009a40, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 },
3498 { 0x00009a44, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 },
3499 { 0x00009a48, 0x00000000, 0x00000000, 0x00068284, 0x00068284, 0x00000000 },
3500 { 0x00009a4c, 0x00000000, 0x00000000, 0x00068288, 0x00068288, 0x00000000 },
3501 { 0x00009a50, 0x00000000, 0x00000000, 0x00068220, 0x00068220, 0x00000000 },
3502 { 0x00009a54, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 },
3503 { 0x00009a58, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 },
3504 { 0x00009a5c, 0x00000000, 0x00000000, 0x00068304, 0x00068304, 0x00000000 },
3505 { 0x00009a60, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 },
3506 { 0x00009a64, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 },
3507 { 0x00009a68, 0x00000000, 0x00000000, 0x00068380, 0x00068380, 0x00000000 },
3508 { 0x00009a6c, 0x00000000, 0x00000000, 0x00068384, 0x00068384, 0x00000000 },
3509 { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 },
3510 { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 },
3511 { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 },
3512 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
3513 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
3514 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
3515 { 0x00009a88, 0x00000000, 0x00000000, 0x00068b04, 0x00068b04, 0x00000000 },
3516 { 0x00009a8c, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 },
3517 { 0x00009a90, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 },
3518 { 0x00009a94, 0x00000000, 0x00000000, 0x00068b0c, 0x00068b0c, 0x00000000 },
3519 { 0x00009a98, 0x00000000, 0x00000000, 0x00068b80, 0x00068b80, 0x00000000 },
3520 { 0x00009a9c, 0x00000000, 0x00000000, 0x00068b84, 0x00068b84, 0x00000000 },
3521 { 0x00009aa0, 0x00000000, 0x00000000, 0x00068b88, 0x00068b88, 0x00000000 },
3522 { 0x00009aa4, 0x00000000, 0x00000000, 0x00068b8c, 0x00068b8c, 0x00000000 },
3523 { 0x00009aa8, 0x00000000, 0x00000000, 0x000b8b90, 0x000b8b90, 0x00000000 },
3524 { 0x00009aac, 0x00000000, 0x00000000, 0x000b8f80, 0x000b8f80, 0x00000000 },
3525 { 0x00009ab0, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 },
3526 { 0x00009ab4, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 },
3527 { 0x00009ab8, 0x00000000, 0x00000000, 0x000b8f8c, 0x000b8f8c, 0x00000000 },
3528 { 0x00009abc, 0x00000000, 0x00000000, 0x000b8f90, 0x000b8f90, 0x00000000 },
3529 { 0x00009ac0, 0x00000000, 0x00000000, 0x000bb30c, 0x000bb30c, 0x00000000 },
3530 { 0x00009ac4, 0x00000000, 0x00000000, 0x000bb310, 0x000bb310, 0x00000000 },
3531 { 0x00009ac8, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 },
3532 { 0x00009acc, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 },
3533 { 0x00009ad0, 0x00000000, 0x00000000, 0x000bb324, 0x000bb324, 0x00000000 },
3534 { 0x00009ad4, 0x00000000, 0x00000000, 0x000bb704, 0x000bb704, 0x00000000 },
3535 { 0x00009ad8, 0x00000000, 0x00000000, 0x000f96a4, 0x000f96a4, 0x00000000 },
3536 { 0x00009adc, 0x00000000, 0x00000000, 0x000f96a8, 0x000f96a8, 0x00000000 },
3537 { 0x00009ae0, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 },
3538 { 0x00009ae4, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 },
3539 { 0x00009ae8, 0x00000000, 0x00000000, 0x000f9720, 0x000f9720, 0x00000000 },
3540 { 0x00009aec, 0x00000000, 0x00000000, 0x000f9724, 0x000f9724, 0x00000000 },
3541 { 0x00009af0, 0x00000000, 0x00000000, 0x000f9728, 0x000f9728, 0x00000000 },
3542 { 0x00009af4, 0x00000000, 0x00000000, 0x000f972c, 0x000f972c, 0x00000000 },
3543 { 0x00009af8, 0x00000000, 0x00000000, 0x000f97a0, 0x000f97a0, 0x00000000 },
3544 { 0x00009afc, 0x00000000, 0x00000000, 0x000f97a4, 0x000f97a4, 0x00000000 },
3545 { 0x00009b00, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 },
3546 { 0x00009b04, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 },
3547 { 0x00009b08, 0x00000000, 0x00000000, 0x000fb7b4, 0x000fb7b4, 0x00000000 },
3548 { 0x00009b0c, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 },
3549 { 0x00009b10, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 },
3550 { 0x00009b14, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 },
3551 { 0x00009b18, 0x00000000, 0x00000000, 0x000fb7ad, 0x000fb7ad, 0x00000000 },
3552 { 0x00009b1c, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 },
3553 { 0x00009b20, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 },
3554 { 0x00009b24, 0x00000000, 0x00000000, 0x000fb7b9, 0x000fb7b9, 0x00000000 },
3555 { 0x00009b28, 0x00000000, 0x00000000, 0x000fb7c5, 0x000fb7c5, 0x00000000 },
3556 { 0x00009b2c, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 },
3557 { 0x00009b30, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 },
3558 { 0x00009b34, 0x00000000, 0x00000000, 0x000fb7d5, 0x000fb7d5, 0x00000000 },
3559 { 0x00009b38, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 },
3560 { 0x00009b3c, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 },
3561 { 0x00009b40, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 },
3562 { 0x00009b44, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 },
3563 { 0x00009b48, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 },
3564 { 0x00009b4c, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 },
3565 { 0x00009b50, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 },
3566 { 0x00009b54, 0x00000000, 0x00000000, 0x000fb7c7, 0x000fb7c7, 0x00000000 },
3567 { 0x00009b58, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 },
3568 { 0x00009b5c, 0x00000000, 0x00000000, 0x000fb7cf, 0x000fb7cf, 0x00000000 },
3569 { 0x00009b60, 0x00000000, 0x00000000, 0x000fb7d7, 0x000fb7d7, 0x00000000 },
3570 { 0x00009b64, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3571 { 0x00009b68, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3572 { 0x00009b6c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3573 { 0x00009b70, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3574 { 0x00009b74, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3575 { 0x00009b78, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3576 { 0x00009b7c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3577 { 0x00009b80, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3578 { 0x00009b84, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3579 { 0x00009b88, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3580 { 0x00009b8c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3581 { 0x00009b90, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3582 { 0x00009b94, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3583 { 0x00009b98, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3584 { 0x00009b9c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3585 { 0x00009ba0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3586 { 0x00009ba4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3587 { 0x00009ba8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3588 { 0x00009bac, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3589 { 0x00009bb0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3590 { 0x00009bb4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3591 { 0x00009bb8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3592 { 0x00009bbc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3593 { 0x00009bc0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3594 { 0x00009bc4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3595 { 0x00009bc8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3596 { 0x00009bcc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3597 { 0x00009bd0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3598 { 0x00009bd4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3599 { 0x00009bd8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3600 { 0x00009bdc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3601 { 0x00009be0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3602 { 0x00009be4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3603 { 0x00009be8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3604 { 0x00009bec, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3605 { 0x00009bf0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3606 { 0x00009bf4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3607 { 0x00009bf8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3608 { 0x00009bfc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
3609 { 0x0000aa00, 0x00000000, 0x00000000, 0x0006801c, 0x0006801c, 0x00000000 },
3610 { 0x0000aa04, 0x00000000, 0x00000000, 0x00068080, 0x00068080, 0x00000000 },
3611 { 0x0000aa08, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 },
3612 { 0x0000aa0c, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 },
3613 { 0x0000aa10, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 },
3614 { 0x0000aa14, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 },
3615 { 0x0000aa18, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 },
3616 { 0x0000aa1c, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 },
3617 { 0x0000aa20, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 },
3618 { 0x0000aa24, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 },
3619 { 0x0000aa28, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 },
3620 { 0x0000aa2c, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 },
3621 { 0x0000aa30, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 },
3622 { 0x0000aa34, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 },
3623 { 0x0000aa38, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 },
3624 { 0x0000aa3c, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 },
3625 { 0x0000aa40, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 },
3626 { 0x0000aa44, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 },
3627 { 0x0000aa48, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 },
3628 { 0x0000aa4c, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 },
3629 { 0x0000aa50, 0x00000000, 0x00000000, 0x000681ac, 0x000681ac, 0x00000000 },
3630 { 0x0000aa54, 0x00000000, 0x00000000, 0x0006821c, 0x0006821c, 0x00000000 },
3631 { 0x0000aa58, 0x00000000, 0x00000000, 0x00068224, 0x00068224, 0x00000000 },
3632 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 },
3633 { 0x0000aa60, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 },
3634 { 0x0000aa64, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 },
3635 { 0x0000aa68, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 },
3636 { 0x0000aa6c, 0x00000000, 0x00000000, 0x00068310, 0x00068310, 0x00000000 },
3637 { 0x0000aa70, 0x00000000, 0x00000000, 0x00068788, 0x00068788, 0x00000000 },
3638 { 0x0000aa74, 0x00000000, 0x00000000, 0x0006878c, 0x0006878c, 0x00000000 },
3639 { 0x0000aa78, 0x00000000, 0x00000000, 0x00068790, 0x00068790, 0x00000000 },
3640 { 0x0000aa7c, 0x00000000, 0x00000000, 0x00068794, 0x00068794, 0x00000000 },
3641 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068798, 0x00068798, 0x00000000 },
3642 { 0x0000aa84, 0x00000000, 0x00000000, 0x0006879c, 0x0006879c, 0x00000000 },
3643 { 0x0000aa88, 0x00000000, 0x00000000, 0x00068b89, 0x00068b89, 0x00000000 },
3644 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00068b8d, 0x00068b8d, 0x00000000 },
3645 { 0x0000aa90, 0x00000000, 0x00000000, 0x00068b91, 0x00068b91, 0x00000000 },
3646 { 0x0000aa94, 0x00000000, 0x00000000, 0x00068b95, 0x00068b95, 0x00000000 },
3647 { 0x0000aa98, 0x00000000, 0x00000000, 0x00068b99, 0x00068b99, 0x00000000 },
3648 { 0x0000aa9c, 0x00000000, 0x00000000, 0x00068ba5, 0x00068ba5, 0x00000000 },
3649 { 0x0000aaa0, 0x00000000, 0x00000000, 0x00068ba9, 0x00068ba9, 0x00000000 },
3650 { 0x0000aaa4, 0x00000000, 0x00000000, 0x00068bad, 0x00068bad, 0x00000000 },
3651 { 0x0000aaa8, 0x00000000, 0x00000000, 0x000b8b0c, 0x000b8b0c, 0x00000000 },
3652 { 0x0000aaac, 0x00000000, 0x00000000, 0x000b8f10, 0x000b8f10, 0x00000000 },
3653 { 0x0000aab0, 0x00000000, 0x00000000, 0x000b8f14, 0x000b8f14, 0x00000000 },
3654 { 0x0000aab4, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 },
3655 { 0x0000aab8, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 },
3656 { 0x0000aabc, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 },
3657 { 0x0000aac0, 0x00000000, 0x00000000, 0x000bb380, 0x000bb380, 0x00000000 },
3658 { 0x0000aac4, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 },
3659 { 0x0000aac8, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 },
3660 { 0x0000aacc, 0x00000000, 0x00000000, 0x000bb38c, 0x000bb38c, 0x00000000 },
3661 { 0x0000aad0, 0x00000000, 0x00000000, 0x000bb394, 0x000bb394, 0x00000000 },
3662 { 0x0000aad4, 0x00000000, 0x00000000, 0x000bb798, 0x000bb798, 0x00000000 },
3663 { 0x0000aad8, 0x00000000, 0x00000000, 0x000f970c, 0x000f970c, 0x00000000 },
3664 { 0x0000aadc, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 },
3665 { 0x0000aae0, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 },
3666 { 0x0000aae4, 0x00000000, 0x00000000, 0x000f9718, 0x000f9718, 0x00000000 },
3667 { 0x0000aae8, 0x00000000, 0x00000000, 0x000f9705, 0x000f9705, 0x00000000 },
3668 { 0x0000aaec, 0x00000000, 0x00000000, 0x000f9709, 0x000f9709, 0x00000000 },
3669 { 0x0000aaf0, 0x00000000, 0x00000000, 0x000f970d, 0x000f970d, 0x00000000 },
3670 { 0x0000aaf4, 0x00000000, 0x00000000, 0x000f9711, 0x000f9711, 0x00000000 },
3671 { 0x0000aaf8, 0x00000000, 0x00000000, 0x000f9715, 0x000f9715, 0x00000000 },
3672 { 0x0000aafc, 0x00000000, 0x00000000, 0x000f9719, 0x000f9719, 0x00000000 },
3673 { 0x0000ab00, 0x00000000, 0x00000000, 0x000fb7a4, 0x000fb7a4, 0x00000000 },
3674 { 0x0000ab04, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 },
3675 { 0x0000ab08, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 },
3676 { 0x0000ab0c, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 },
3677 { 0x0000ab10, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 },
3678 { 0x0000ab14, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 },
3679 { 0x0000ab18, 0x00000000, 0x00000000, 0x000fb7bc, 0x000fb7bc, 0x00000000 },
3680 { 0x0000ab1c, 0x00000000, 0x00000000, 0x000fb7a1, 0x000fb7a1, 0x00000000 },
3681 { 0x0000ab20, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 },
3682 { 0x0000ab24, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 },
3683 { 0x0000ab28, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 },
3684 { 0x0000ab2c, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 },
3685 { 0x0000ab30, 0x00000000, 0x00000000, 0x000fb7bd, 0x000fb7bd, 0x00000000 },
3686 { 0x0000ab34, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 },
3687 { 0x0000ab38, 0x00000000, 0x00000000, 0x000fb7cd, 0x000fb7cd, 0x00000000 },
3688 { 0x0000ab3c, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 },
3689 { 0x0000ab40, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 },
3690 { 0x0000ab44, 0x00000000, 0x00000000, 0x000fb7c2, 0x000fb7c2, 0x00000000 },
3691 { 0x0000ab48, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 },
3692 { 0x0000ab4c, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 },
3693 { 0x0000ab50, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 },
3694 { 0x0000ab54, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 },
3695 { 0x0000ab58, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 },
3696 { 0x0000ab5c, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 },
3697 { 0x0000ab60, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 },
3698 { 0x0000ab64, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3699 { 0x0000ab68, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3700 { 0x0000ab6c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3701 { 0x0000ab70, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3702 { 0x0000ab74, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3703 { 0x0000ab78, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3704 { 0x0000ab7c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3705 { 0x0000ab80, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3706 { 0x0000ab84, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3707 { 0x0000ab88, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3708 { 0x0000ab8c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3709 { 0x0000ab90, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3710 { 0x0000ab94, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3711 { 0x0000ab98, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3712 { 0x0000ab9c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3713 { 0x0000aba0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3714 { 0x0000aba4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3715 { 0x0000aba8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3716 { 0x0000abac, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3717 { 0x0000abb0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3718 { 0x0000abb4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3719 { 0x0000abb8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3720 { 0x0000abbc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3721 { 0x0000abc0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3722 { 0x0000abc4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3723 { 0x0000abc8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3724 { 0x0000abcc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3725 { 0x0000abd0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3726 { 0x0000abd4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3727 { 0x0000abd8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3728 { 0x0000abdc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3729 { 0x0000abe0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3730 { 0x0000abe4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3731 { 0x0000abe8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3732 { 0x0000abec, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3733 { 0x0000abf0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3734 { 0x0000abf4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3735 { 0x0000abf8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3736 { 0x0000abfc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
3737 { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 },
3738 { 0x0000a20c, 0x00000014, 0x00000014, 0x00000000, 0x00000000, 0x0001f000 },
3739 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
3740 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
3741 { 0x0000a250, 0x001ff000, 0x001ff000, 0x001ca000, 0x001ca000, 0x001da000 },
3742 { 0x0000a274, 0x0a81c652, 0x0a81c652, 0x0a820652, 0x0a820652, 0x0a82a652 },
3743 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3744 { 0x0000a304, 0x00000000, 0x00000000, 0x00007201, 0x00007201, 0x00000000 },
3745 { 0x0000a308, 0x00000000, 0x00000000, 0x00010408, 0x00010408, 0x00000000 },
3746 { 0x0000a30c, 0x00000000, 0x00000000, 0x0001860a, 0x0001860a, 0x00000000 },
3747 { 0x0000a310, 0x00000000, 0x00000000, 0x00020818, 0x00020818, 0x00000000 },
3748 { 0x0000a314, 0x00000000, 0x00000000, 0x00024858, 0x00024858, 0x00000000 },
3749 { 0x0000a318, 0x00000000, 0x00000000, 0x00026859, 0x00026859, 0x00000000 },
3750 { 0x0000a31c, 0x00000000, 0x00000000, 0x0002985b, 0x0002985b, 0x00000000 },
3751 { 0x0000a320, 0x00000000, 0x00000000, 0x0002c89a, 0x0002c89a, 0x00000000 },
3752 { 0x0000a324, 0x00000000, 0x00000000, 0x0002e89b, 0x0002e89b, 0x00000000 },
3753 { 0x0000a328, 0x00000000, 0x00000000, 0x0003089c, 0x0003089c, 0x00000000 },
3754 { 0x0000a32c, 0x00000000, 0x00000000, 0x0003289d, 0x0003289d, 0x00000000 },
3755 { 0x0000a330, 0x00000000, 0x00000000, 0x0003489e, 0x0003489e, 0x00000000 },
3756 { 0x0000a334, 0x00000000, 0x00000000, 0x000388de, 0x000388de, 0x00000000 },
3757 { 0x0000a338, 0x00000000, 0x00000000, 0x0003b91e, 0x0003b91e, 0x00000000 },
3758 { 0x0000a33c, 0x00000000, 0x00000000, 0x0003d95e, 0x0003d95e, 0x00000000 },
3759 { 0x0000a340, 0x00000000, 0x00000000, 0x000419df, 0x000419df, 0x00000000 },
3760 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2748 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 3761 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
2749 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2750 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 },
2751}; 3762};
2752 3763
2753static const u32 ar9280Common_9280_2[][2] = { 3764static const u_int32_t ar9285Common_9285[][2] = {
2754 { 0x0000000c, 0x00000000 }, 3765 { 0x0000000c, 0x00000000 },
2755 { 0x00000030, 0x00020015 }, 3766 { 0x00000030, 0x00020045 },
2756 { 0x00000034, 0x00000005 }, 3767 { 0x00000034, 0x00000005 },
2757 { 0x00000040, 0x00000000 }, 3768 { 0x00000040, 0x00000000 },
2758 { 0x00000044, 0x00000008 }, 3769 { 0x00000044, 0x00000008 },
@@ -2837,7 +3848,7 @@ static const u32 ar9280Common_9280_2[][2] = {
2837 { 0x00004024, 0x0000001f }, 3848 { 0x00004024, 0x0000001f },
2838 { 0x00004060, 0x00000000 }, 3849 { 0x00004060, 0x00000000 },
2839 { 0x00004064, 0x00000000 }, 3850 { 0x00004064, 0x00000000 },
2840 { 0x00007010, 0x00000033 }, 3851 { 0x00007010, 0x00000031 },
2841 { 0x00007034, 0x00000002 }, 3852 { 0x00007034, 0x00000002 },
2842 { 0x00007038, 0x000004c2 }, 3853 { 0x00007038, 0x000004c2 },
2843 { 0x00008004, 0x00000000 }, 3854 { 0x00008004, 0x00000000 },
@@ -2847,7 +3858,7 @@ static const u32 ar9280Common_9280_2[][2] = {
2847 { 0x00008020, 0x00000000 }, 3858 { 0x00008020, 0x00000000 },
2848 { 0x00008038, 0x00000000 }, 3859 { 0x00008038, 0x00000000 },
2849 { 0x0000803c, 0x00000000 }, 3860 { 0x0000803c, 0x00000000 },
2850 { 0x00008048, 0x40000000 }, 3861 { 0x00008048, 0x00000000 },
2851 { 0x00008054, 0x00000000 }, 3862 { 0x00008054, 0x00000000 },
2852 { 0x00008058, 0x00000000 }, 3863 { 0x00008058, 0x00000000 },
2853 { 0x0000805c, 0x000fc78f }, 3864 { 0x0000805c, 0x000fc78f },
@@ -2923,6 +3934,7 @@ static const u32 ar9280Common_9280_2[][2] = {
2923 { 0x00008258, 0x00000000 }, 3934 { 0x00008258, 0x00000000 },
2924 { 0x0000825c, 0x400000ff }, 3935 { 0x0000825c, 0x400000ff },
2925 { 0x00008260, 0x00080922 }, 3936 { 0x00008260, 0x00080922 },
3937 { 0x00008264, 0xa8a00010 },
2926 { 0x00008270, 0x00000000 }, 3938 { 0x00008270, 0x00000000 },
2927 { 0x00008274, 0x40000000 }, 3939 { 0x00008274, 0x40000000 },
2928 { 0x00008278, 0x003e4180 }, 3940 { 0x00008278, 0x003e4180 },
@@ -2936,15 +3948,15 @@ static const u32 ar9280Common_9280_2[][2] = {
2936 { 0x00008300, 0x00000040 }, 3948 { 0x00008300, 0x00000040 },
2937 { 0x00008314, 0x00000000 }, 3949 { 0x00008314, 0x00000000 },
2938 { 0x00008328, 0x00000000 }, 3950 { 0x00008328, 0x00000000 },
2939 { 0x0000832c, 0x00000007 }, 3951 { 0x0000832c, 0x00000001 },
2940 { 0x00008330, 0x00000302 }, 3952 { 0x00008330, 0x00000302 },
2941 { 0x00008334, 0x00000e00 }, 3953 { 0x00008334, 0x00000e00 },
2942 { 0x00008338, 0x00000000 }, 3954 { 0x00008338, 0x00000000 },
2943 { 0x0000833c, 0x00000000 }, 3955 { 0x0000833c, 0x00000000 },
2944 { 0x00008340, 0x000107ff }, 3956 { 0x00008340, 0x00010380 },
2945 { 0x00008344, 0x00581043 }, 3957 { 0x00008344, 0x00581043 },
2946 { 0x00009808, 0x00000000 }, 3958 { 0x00009808, 0x00000000 },
2947 { 0x0000980c, 0xafa68e30 }, 3959 { 0x0000980c, 0xafe68e30 },
2948 { 0x00009810, 0xfd14e000 }, 3960 { 0x00009810, 0xfd14e000 },
2949 { 0x00009814, 0x9c0a9f6b }, 3961 { 0x00009814, 0x9c0a9f6b },
2950 { 0x0000981c, 0x00000000 }, 3962 { 0x0000981c, 0x00000000 },
@@ -2952,7 +3964,6 @@ static const u32 ar9280Common_9280_2[][2] = {
2952 { 0x00009830, 0x00000000 }, 3964 { 0x00009830, 0x00000000 },
2953 { 0x0000983c, 0x00200400 }, 3965 { 0x0000983c, 0x00200400 },
2954 { 0x0000984c, 0x0040233c }, 3966 { 0x0000984c, 0x0040233c },
2955 { 0x0000a84c, 0x0040233c },
2956 { 0x00009854, 0x00000044 }, 3967 { 0x00009854, 0x00000044 },
2957 { 0x00009900, 0x00000000 }, 3968 { 0x00009900, 0x00000000 },
2958 { 0x00009904, 0x00000000 }, 3969 { 0x00009904, 0x00000000 },
@@ -2961,20 +3972,18 @@ static const u32 ar9280Common_9280_2[][2] = {
2961 { 0x00009910, 0x01002310 }, 3972 { 0x00009910, 0x01002310 },
2962 { 0x0000991c, 0x10000fff }, 3973 { 0x0000991c, 0x10000fff },
2963 { 0x00009920, 0x04900000 }, 3974 { 0x00009920, 0x04900000 },
2964 { 0x0000a920, 0x04900000 },
2965 { 0x00009928, 0x00000001 }, 3975 { 0x00009928, 0x00000001 },
2966 { 0x0000992c, 0x00000004 }, 3976 { 0x0000992c, 0x00000004 },
2967 { 0x00009934, 0x1e1f2022 }, 3977 { 0x00009934, 0x1e1f2022 },
2968 { 0x00009938, 0x0a0b0c0d }, 3978 { 0x00009938, 0x0a0b0c0d },
2969 { 0x0000993c, 0x00000000 }, 3979 { 0x0000993c, 0x00000000 },
3980 { 0x00009940, 0x14750604 },
2970 { 0x00009948, 0x9280c00a }, 3981 { 0x00009948, 0x9280c00a },
2971 { 0x0000994c, 0x00020028 }, 3982 { 0x0000994c, 0x00020028 },
2972 { 0x00009954, 0x5f3ca3de }, 3983 { 0x00009954, 0x5f3ca3de },
2973 { 0x00009958, 0x2108ecff }, 3984 { 0x00009958, 0x2108ecff },
2974 { 0x00009940, 0x14750604 }, 3985 { 0x00009968, 0x000003ce },
2975 { 0x0000c95c, 0x004b6a8e }, 3986 { 0x00009970, 0x1927b515 },
2976 { 0x0000c968, 0x000003ce },
2977 { 0x00009970, 0x190fb515 },
2978 { 0x00009974, 0x00000000 }, 3987 { 0x00009974, 0x00000000 },
2979 { 0x00009978, 0x00000001 }, 3988 { 0x00009978, 0x00000001 },
2980 { 0x0000997c, 0x00000000 }, 3989 { 0x0000997c, 0x00000000 },
@@ -2989,7 +3998,7 @@ static const u32 ar9280Common_9280_2[][2] = {
2989 { 0x000099a0, 0x00000000 }, 3998 { 0x000099a0, 0x00000000 },
2990 { 0x000099a4, 0x00000001 }, 3999 { 0x000099a4, 0x00000001 },
2991 { 0x000099a8, 0x201fff00 }, 4000 { 0x000099a8, 0x201fff00 },
2992 { 0x000099ac, 0x006f0000 }, 4001 { 0x000099ac, 0x2def0a00 },
2993 { 0x000099b0, 0x03051000 }, 4002 { 0x000099b0, 0x03051000 },
2994 { 0x000099b4, 0x00000820 }, 4003 { 0x000099b4, 0x00000820 },
2995 { 0x000099dc, 0x00000000 }, 4004 { 0x000099dc, 0x00000000 },
@@ -2998,31 +4007,28 @@ static const u32 ar9280Common_9280_2[][2] = {
2998 { 0x000099e8, 0x3c466478 }, 4007 { 0x000099e8, 0x3c466478 },
2999 { 0x000099ec, 0x0cc80caa }, 4008 { 0x000099ec, 0x0cc80caa },
3000 { 0x000099f0, 0x00000000 }, 4009 { 0x000099f0, 0x00000000 },
3001 { 0x000099fc, 0x00001042 }, 4010 { 0x0000a208, 0x803e6788 },
3002 { 0x0000a210, 0x4080a333 }, 4011 { 0x0000a210, 0x4080a333 },
3003 { 0x0000a214, 0x40206c10 }, 4012 { 0x0000a214, 0x00206c10 },
3004 { 0x0000a218, 0x009c4060 }, 4013 { 0x0000a218, 0x009c4060 },
3005 { 0x0000a220, 0x01834061 }, 4014 { 0x0000a220, 0x01834061 },
3006 { 0x0000a224, 0x00000400 }, 4015 { 0x0000a224, 0x00000400 },
3007 { 0x0000a228, 0x000003b5 }, 4016 { 0x0000a228, 0x000003b5 },
3008 { 0x0000a22c, 0x233f71c0 }, 4017 { 0x0000a22c, 0x00000000 },
3009 { 0x0000a234, 0x20202020 }, 4018 { 0x0000a234, 0x20202020 },
3010 { 0x0000a238, 0x20202020 }, 4019 { 0x0000a238, 0x20202020 },
3011 { 0x0000a23c, 0x13c88000 }, 4020 { 0x0000a244, 0x00000000 },
3012 { 0x0000a240, 0x38490a20 }, 4021 { 0x0000a248, 0xfffffffc },
3013 { 0x0000a244, 0x00007bb6 },
3014 { 0x0000a248, 0x0fff3ffc },
3015 { 0x0000a24c, 0x00000000 }, 4022 { 0x0000a24c, 0x00000000 },
3016 { 0x0000a254, 0x00000000 }, 4023 { 0x0000a254, 0x00000000 },
3017 { 0x0000a258, 0x0cdbd380 }, 4024 { 0x0000a258, 0x0ccb5380 },
3018 { 0x0000a25c, 0x0f0f0f01 }, 4025 { 0x0000a25c, 0x15151501 },
3019 { 0x0000a260, 0xdfa91f01 }, 4026 { 0x0000a260, 0xdfa90f01 },
3020 { 0x0000a268, 0x00000000 }, 4027 { 0x0000a268, 0x00000000 },
3021 { 0x0000a26c, 0x0ebae9c6 }, 4028 { 0x0000a26c, 0x0ebae9e6 },
3022 { 0x0000b26c, 0x0ebae9c6 }, 4029 { 0x0000d270, 0x0d820820 },
3023 { 0x0000d270, 0x00820820 }, 4030 { 0x0000a278, 0x39ce739c },
3024 { 0x0000a278, 0x1ce739ce }, 4031 { 0x0000a27c, 0x050e039c },
3025 { 0x0000a27c, 0x050701ce },
3026 { 0x0000d35c, 0x07ffffef }, 4032 { 0x0000d35c, 0x07ffffef },
3027 { 0x0000d360, 0x0fffffe7 }, 4033 { 0x0000d360, 0x0fffffe7 },
3028 { 0x0000d364, 0x17ffffe5 }, 4034 { 0x0000d364, 0x17ffffe5 },
@@ -3037,8 +4043,8 @@ static const u32 ar9280Common_9280_2[][2] = {
3037 { 0x0000a388, 0x0c000000 }, 4043 { 0x0000a388, 0x0c000000 },
3038 { 0x0000a38c, 0x20202020 }, 4044 { 0x0000a38c, 0x20202020 },
3039 { 0x0000a390, 0x20202020 }, 4045 { 0x0000a390, 0x20202020 },
3040 { 0x0000a394, 0x1ce739ce }, 4046 { 0x0000a394, 0x39ce739c },
3041 { 0x0000a398, 0x000001ce }, 4047 { 0x0000a398, 0x0000039c },
3042 { 0x0000a39c, 0x00000001 }, 4048 { 0x0000a39c, 0x00000001 },
3043 { 0x0000a3a0, 0x00000000 }, 4049 { 0x0000a3a0, 0x00000000 },
3044 { 0x0000a3a4, 0x00000000 }, 4050 { 0x0000a3a4, 0x00000000 },
@@ -3050,96 +4056,746 @@ static const u32 ar9280Common_9280_2[][2] = {
3050 { 0x0000a3bc, 0x00000000 }, 4056 { 0x0000a3bc, 0x00000000 },
3051 { 0x0000a3c0, 0x00000000 }, 4057 { 0x0000a3c0, 0x00000000 },
3052 { 0x0000a3c4, 0x00000000 }, 4058 { 0x0000a3c4, 0x00000000 },
3053 { 0x0000a3c8, 0x00000246 },
3054 { 0x0000a3cc, 0x20202020 }, 4059 { 0x0000a3cc, 0x20202020 },
3055 { 0x0000a3d0, 0x20202020 }, 4060 { 0x0000a3d0, 0x20202020 },
3056 { 0x0000a3d4, 0x20202020 }, 4061 { 0x0000a3d4, 0x20202020 },
3057 { 0x0000a3dc, 0x1ce739ce }, 4062 { 0x0000a3dc, 0x39ce739c },
3058 { 0x0000a3e0, 0x000001ce }, 4063 { 0x0000a3e0, 0x0000039c },
3059 { 0x0000a3e4, 0x00000000 }, 4064 { 0x0000a3e4, 0x00000000 },
3060 { 0x0000a3e8, 0x18c43433 }, 4065 { 0x0000a3e8, 0x18c43433 },
3061 { 0x0000a3ec, 0x00f70081 }, 4066 { 0x0000a3ec, 0x00f70081 },
3062 { 0x00007800, 0x00040000 }, 4067 { 0x00007800, 0x00140000 },
3063 { 0x00007804, 0xdb005012 }, 4068 { 0x00007804, 0x0e4548d8 },
3064 { 0x00007808, 0x04924914 }, 4069 { 0x00007808, 0x54214514 },
3065 { 0x0000780c, 0x21084210 }, 4070 { 0x0000780c, 0x02025820 },
3066 { 0x00007810, 0x6d801300 }, 4071 { 0x00007810, 0x71c0d388 },
3067 { 0x00007814, 0x0019beff }, 4072 { 0x00007814, 0x924934a8 },
3068 { 0x00007818, 0x07e41000 }, 4073 { 0x0000781c, 0x00000000 },
3069 { 0x0000781c, 0x00392000 }, 4074 { 0x00007820, 0x00000c04 },
3070 { 0x00007820, 0x92592480 }, 4075 { 0x00007824, 0x00d86fff },
3071 { 0x00007824, 0x00040000 }, 4076 { 0x00007828, 0x26d2491b },
3072 { 0x00007828, 0xdb005012 }, 4077 { 0x0000782c, 0x6e36d97b },
3073 { 0x0000782c, 0x04924914 }, 4078 { 0x00007830, 0xedb6d96c },
3074 { 0x00007830, 0x21084210 }, 4079 { 0x00007834, 0x71400086 },
3075 { 0x00007834, 0x6d801300 }, 4080 { 0x00007838, 0xfac68800 },
3076 { 0x00007838, 0x0019beff }, 4081 { 0x0000783c, 0x0001fffe },
3077 { 0x0000783c, 0x07e40000 }, 4082 { 0x00007840, 0xffeb1a20 },
3078 { 0x00007840, 0x00392000 }, 4083 { 0x00007844, 0x000c0db6 },
3079 { 0x00007844, 0x92592480 }, 4084 { 0x00007848, 0x6db61b6f },
3080 { 0x00007848, 0x00100000 }, 4085 { 0x0000784c, 0x6d9b66db },
3081 { 0x0000784c, 0x773f0567 }, 4086 { 0x00007850, 0x6d8c6dba },
3082 { 0x00007850, 0x54214514 }, 4087 { 0x00007854, 0x00040000 },
3083 { 0x00007854, 0x12035828 }, 4088 { 0x00007858, 0xdb003012 },
3084 { 0x00007858, 0x9259269a }, 4089 { 0x0000785c, 0x04924914 },
3085 { 0x00007860, 0x52802000 }, 4090 { 0x00007860, 0x21084210 },
3086 { 0x00007864, 0x0a8e370e }, 4091 { 0x00007864, 0xf7d7ffde },
3087 { 0x00007868, 0xc0102850 }, 4092 { 0x00007868, 0xc2034080 },
3088 { 0x0000786c, 0x812d4000 }, 4093 { 0x0000786c, 0x48609eb4 },
3089 { 0x00007870, 0x807ec400 }, 4094 { 0x00007870, 0x10142c00 },
3090 { 0x00007874, 0x001b6db0 },
3091 { 0x00007878, 0x00376b63 },
3092 { 0x0000787c, 0x06db6db6 },
3093 { 0x00007880, 0x006d8000 },
3094 { 0x00007884, 0xffeffffe },
3095 { 0x00007888, 0xffeffffe },
3096 { 0x0000788c, 0x00010000 },
3097 { 0x00007890, 0x02060aeb },
3098 { 0x00007898, 0x2a850160 },
3099}; 4095};
3100 4096
3101static const u32 ar9280Modes_fast_clock_9280_2[][3] = { 4097static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
3102 { 0x00001030, 0x00000268, 0x000004d0 }, 4098 {0x00004040, 0x9248fd00 },
3103 { 0x00001070, 0x0000018c, 0x00000318 }, 4099 {0x00004040, 0x24924924 },
3104 { 0x000010b0, 0x00000fd0, 0x00001fa0 }, 4100 {0x00004040, 0xa8000019 },
3105 { 0x00008014, 0x044c044c, 0x08980898 }, 4101 {0x00004040, 0x13160820 },
3106 { 0x0000801c, 0x148ec02b, 0x148ec057 }, 4102 {0x00004040, 0xe5980560 },
3107 { 0x00008318, 0x000044c0, 0x00008980 }, 4103 {0x00004040, 0xc01dcffd },
3108 { 0x00009820, 0x02020200, 0x02020200 }, 4104 {0x00004040, 0x1aaabe41 },
3109 { 0x00009824, 0x00000f0f, 0x00000f0f }, 4105 {0x00004040, 0xbe105554 },
3110 { 0x00009828, 0x0b020001, 0x0b020001 }, 4106 {0x00004040, 0x00043007 },
3111 { 0x00009834, 0x00000f0f, 0x00000f0f }, 4107 {0x00004044, 0x00000000 },
3112 { 0x00009844, 0x03721821, 0x03721821 },
3113 { 0x00009914, 0x00000898, 0x00000898 },
3114 { 0x00009918, 0x0000000b, 0x00000016 },
3115 { 0x00009944, 0xdfbc1210, 0xdfbc1210 },
3116}; 4108};
3117 4109
3118 4110static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = {
3119
3120static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
3121 {0x00004040, 0x9248fd00 }, 4111 {0x00004040, 0x9248fd00 },
3122 {0x00004040, 0x24924924 }, 4112 {0x00004040, 0x24924924 },
3123 {0x00004040, 0xa8000019 }, 4113 {0x00004040, 0xa8000019 },
3124 {0x00004040, 0x13160820 }, 4114 {0x00004040, 0x13160820 },
3125 {0x00004040, 0xe5980560 }, 4115 {0x00004040, 0xe5980560 },
3126 {0x00004040, 0x401dcffc }, 4116 {0x00004040, 0xc01dcffc },
3127 {0x00004040, 0x1aaabe40 }, 4117 {0x00004040, 0x1aaabe41 },
3128 {0x00004040, 0xbe105554 }, 4118 {0x00004040, 0xbe105554 },
3129 {0x00004040, 0x00043007 }, 4119 {0x00004040, 0x00043007 },
3130 {0x00004044, 0x00000000 }, 4120 {0x00004044, 0x00000000 },
3131}; 4121};
3132 4122
4123static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4124 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
4125 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
4126 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
4127 { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 },
4128 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
4129 { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f },
4130 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
4131 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
4132 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
4133 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
4134 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
4135 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
4136 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
4137 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e },
4138 { 0x00009844, 0x0372161e, 0x0372161e, 0x03720020, 0x03720020, 0x037216a0 },
4139 { 0x00009848, 0x00001066, 0x00001066, 0x00000057, 0x00000057, 0x00001059 },
4140 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
4141 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
4142 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3136605e, 0x3136605e, 0x3139605e },
4143 { 0x00009860, 0x00058d18, 0x00058d18, 0x00058d20, 0x00058d20, 0x00058d18 },
4144 { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
4145 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
4146 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
4147 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
4148 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
4149 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
4150 { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1020, 0xffbc1020, 0xffbc1010 },
4151 { 0x00009960, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4152 { 0x00009964, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4153 { 0x000099b8, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c, 0x0000421c },
4154 { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
4155 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
4156 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
4157 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
4158 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
4159 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
4160 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4161 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4162 { 0x00009a00, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 },
4163 { 0x00009a04, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 },
4164 { 0x00009a08, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 },
4165 { 0x00009a0c, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 },
4166 { 0x00009a10, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 },
4167 { 0x00009a14, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 },
4168 { 0x00009a18, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 },
4169 { 0x00009a1c, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 },
4170 { 0x00009a20, 0x00000000, 0x00000000, 0x00068114, 0x00068114, 0x00000000 },
4171 { 0x00009a24, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 },
4172 { 0x00009a28, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 },
4173 { 0x00009a2c, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 },
4174 { 0x00009a30, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 },
4175 { 0x00009a34, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 },
4176 { 0x00009a38, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 },
4177 { 0x00009a3c, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 },
4178 { 0x00009a40, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 },
4179 { 0x00009a44, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 },
4180 { 0x00009a48, 0x00000000, 0x00000000, 0x00068284, 0x00068284, 0x00000000 },
4181 { 0x00009a4c, 0x00000000, 0x00000000, 0x00068288, 0x00068288, 0x00000000 },
4182 { 0x00009a50, 0x00000000, 0x00000000, 0x00068220, 0x00068220, 0x00000000 },
4183 { 0x00009a54, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 },
4184 { 0x00009a58, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 },
4185 { 0x00009a5c, 0x00000000, 0x00000000, 0x00068304, 0x00068304, 0x00000000 },
4186 { 0x00009a60, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 },
4187 { 0x00009a64, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 },
4188 { 0x00009a68, 0x00000000, 0x00000000, 0x00068380, 0x00068380, 0x00000000 },
4189 { 0x00009a6c, 0x00000000, 0x00000000, 0x00068384, 0x00068384, 0x00000000 },
4190 { 0x00009a70, 0x00000000, 0x00000000, 0x00068700, 0x00068700, 0x00000000 },
4191 { 0x00009a74, 0x00000000, 0x00000000, 0x00068704, 0x00068704, 0x00000000 },
4192 { 0x00009a78, 0x00000000, 0x00000000, 0x00068708, 0x00068708, 0x00000000 },
4193 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
4194 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
4195 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
4196 { 0x00009a88, 0x00000000, 0x00000000, 0x00068b04, 0x00068b04, 0x00000000 },
4197 { 0x00009a8c, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 },
4198 { 0x00009a90, 0x00000000, 0x00000000, 0x00068b08, 0x00068b08, 0x00000000 },
4199 { 0x00009a94, 0x00000000, 0x00000000, 0x00068b0c, 0x00068b0c, 0x00000000 },
4200 { 0x00009a98, 0x00000000, 0x00000000, 0x00068b80, 0x00068b80, 0x00000000 },
4201 { 0x00009a9c, 0x00000000, 0x00000000, 0x00068b84, 0x00068b84, 0x00000000 },
4202 { 0x00009aa0, 0x00000000, 0x00000000, 0x00068b88, 0x00068b88, 0x00000000 },
4203 { 0x00009aa4, 0x00000000, 0x00000000, 0x00068b8c, 0x00068b8c, 0x00000000 },
4204 { 0x00009aa8, 0x00000000, 0x00000000, 0x000b8b90, 0x000b8b90, 0x00000000 },
4205 { 0x00009aac, 0x00000000, 0x00000000, 0x000b8f80, 0x000b8f80, 0x00000000 },
4206 { 0x00009ab0, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 },
4207 { 0x00009ab4, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 },
4208 { 0x00009ab8, 0x00000000, 0x00000000, 0x000b8f8c, 0x000b8f8c, 0x00000000 },
4209 { 0x00009abc, 0x00000000, 0x00000000, 0x000b8f90, 0x000b8f90, 0x00000000 },
4210 { 0x00009ac0, 0x00000000, 0x00000000, 0x000bb30c, 0x000bb30c, 0x00000000 },
4211 { 0x00009ac4, 0x00000000, 0x00000000, 0x000bb310, 0x000bb310, 0x00000000 },
4212 { 0x00009ac8, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 },
4213 { 0x00009acc, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 },
4214 { 0x00009ad0, 0x00000000, 0x00000000, 0x000bb324, 0x000bb324, 0x00000000 },
4215 { 0x00009ad4, 0x00000000, 0x00000000, 0x000bb704, 0x000bb704, 0x00000000 },
4216 { 0x00009ad8, 0x00000000, 0x00000000, 0x000f96a4, 0x000f96a4, 0x00000000 },
4217 { 0x00009adc, 0x00000000, 0x00000000, 0x000f96a8, 0x000f96a8, 0x00000000 },
4218 { 0x00009ae0, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 },
4219 { 0x00009ae4, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 },
4220 { 0x00009ae8, 0x00000000, 0x00000000, 0x000f9720, 0x000f9720, 0x00000000 },
4221 { 0x00009aec, 0x00000000, 0x00000000, 0x000f9724, 0x000f9724, 0x00000000 },
4222 { 0x00009af0, 0x00000000, 0x00000000, 0x000f9728, 0x000f9728, 0x00000000 },
4223 { 0x00009af4, 0x00000000, 0x00000000, 0x000f972c, 0x000f972c, 0x00000000 },
4224 { 0x00009af8, 0x00000000, 0x00000000, 0x000f97a0, 0x000f97a0, 0x00000000 },
4225 { 0x00009afc, 0x00000000, 0x00000000, 0x000f97a4, 0x000f97a4, 0x00000000 },
4226 { 0x00009b00, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 },
4227 { 0x00009b04, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 },
4228 { 0x00009b08, 0x00000000, 0x00000000, 0x000fb7b4, 0x000fb7b4, 0x00000000 },
4229 { 0x00009b0c, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 },
4230 { 0x00009b10, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 },
4231 { 0x00009b14, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 },
4232 { 0x00009b18, 0x00000000, 0x00000000, 0x000fb7ad, 0x000fb7ad, 0x00000000 },
4233 { 0x00009b1c, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 },
4234 { 0x00009b20, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 },
4235 { 0x00009b24, 0x00000000, 0x00000000, 0x000fb7b9, 0x000fb7b9, 0x00000000 },
4236 { 0x00009b28, 0x00000000, 0x00000000, 0x000fb7c5, 0x000fb7c5, 0x00000000 },
4237 { 0x00009b2c, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 },
4238 { 0x00009b30, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 },
4239 { 0x00009b34, 0x00000000, 0x00000000, 0x000fb7d5, 0x000fb7d5, 0x00000000 },
4240 { 0x00009b38, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 },
4241 { 0x00009b3c, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 },
4242 { 0x00009b40, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 },
4243 { 0x00009b44, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 },
4244 { 0x00009b48, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 },
4245 { 0x00009b4c, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 },
4246 { 0x00009b50, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 },
4247 { 0x00009b54, 0x00000000, 0x00000000, 0x000fb7c7, 0x000fb7c7, 0x00000000 },
4248 { 0x00009b58, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 },
4249 { 0x00009b5c, 0x00000000, 0x00000000, 0x000fb7cf, 0x000fb7cf, 0x00000000 },
4250 { 0x00009b60, 0x00000000, 0x00000000, 0x000fb7d7, 0x000fb7d7, 0x00000000 },
4251 { 0x00009b64, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4252 { 0x00009b68, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4253 { 0x00009b6c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4254 { 0x00009b70, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4255 { 0x00009b74, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4256 { 0x00009b78, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4257 { 0x00009b7c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4258 { 0x00009b80, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4259 { 0x00009b84, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4260 { 0x00009b88, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4261 { 0x00009b8c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4262 { 0x00009b90, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4263 { 0x00009b94, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4264 { 0x00009b98, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4265 { 0x00009b9c, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4266 { 0x00009ba0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4267 { 0x00009ba4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4268 { 0x00009ba8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4269 { 0x00009bac, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4270 { 0x00009bb0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4271 { 0x00009bb4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4272 { 0x00009bb8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4273 { 0x00009bbc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4274 { 0x00009bc0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4275 { 0x00009bc4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4276 { 0x00009bc8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4277 { 0x00009bcc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4278 { 0x00009bd0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4279 { 0x00009bd4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4280 { 0x00009bd8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4281 { 0x00009bdc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4282 { 0x00009be0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4283 { 0x00009be4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4284 { 0x00009be8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4285 { 0x00009bec, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4286 { 0x00009bf0, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4287 { 0x00009bf4, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4288 { 0x00009bf8, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4289 { 0x00009bfc, 0x00000000, 0x00000000, 0x000fb7db, 0x000fb7db, 0x00000000 },
4290 { 0x0000aa00, 0x00000000, 0x00000000, 0x0006801c, 0x0006801c, 0x00000000 },
4291 { 0x0000aa04, 0x00000000, 0x00000000, 0x0006801c, 0x0006801c, 0x00000000 },
4292 { 0x0000aa08, 0x00000000, 0x00000000, 0x0006801c, 0x0006801c, 0x00000000 },
4293 { 0x0000aa0c, 0x00000000, 0x00000000, 0x00068080, 0x00068080, 0x00000000 },
4294 { 0x0000aa10, 0x00000000, 0x00000000, 0x00068084, 0x00068084, 0x00000000 },
4295 { 0x0000aa14, 0x00000000, 0x00000000, 0x00068088, 0x00068088, 0x00000000 },
4296 { 0x0000aa18, 0x00000000, 0x00000000, 0x0006808c, 0x0006808c, 0x00000000 },
4297 { 0x0000aa1c, 0x00000000, 0x00000000, 0x00068100, 0x00068100, 0x00000000 },
4298 { 0x0000aa20, 0x00000000, 0x00000000, 0x00068104, 0x00068104, 0x00000000 },
4299 { 0x0000aa24, 0x00000000, 0x00000000, 0x00068108, 0x00068108, 0x00000000 },
4300 { 0x0000aa28, 0x00000000, 0x00000000, 0x0006810c, 0x0006810c, 0x00000000 },
4301 { 0x0000aa2c, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 },
4302 { 0x0000aa30, 0x00000000, 0x00000000, 0x00068110, 0x00068110, 0x00000000 },
4303 { 0x0000aa34, 0x00000000, 0x00000000, 0x00068180, 0x00068180, 0x00000000 },
4304 { 0x0000aa38, 0x00000000, 0x00000000, 0x00068184, 0x00068184, 0x00000000 },
4305 { 0x0000aa3c, 0x00000000, 0x00000000, 0x00068188, 0x00068188, 0x00000000 },
4306 { 0x0000aa40, 0x00000000, 0x00000000, 0x0006818c, 0x0006818c, 0x00000000 },
4307 { 0x0000aa44, 0x00000000, 0x00000000, 0x00068190, 0x00068190, 0x00000000 },
4308 { 0x0000aa48, 0x00000000, 0x00000000, 0x00068194, 0x00068194, 0x00000000 },
4309 { 0x0000aa4c, 0x00000000, 0x00000000, 0x000681a0, 0x000681a0, 0x00000000 },
4310 { 0x0000aa50, 0x00000000, 0x00000000, 0x0006820c, 0x0006820c, 0x00000000 },
4311 { 0x0000aa54, 0x00000000, 0x00000000, 0x000681a8, 0x000681a8, 0x00000000 },
4312 { 0x0000aa58, 0x00000000, 0x00000000, 0x000681ac, 0x000681ac, 0x00000000 },
4313 { 0x0000aa5c, 0x00000000, 0x00000000, 0x0006821c, 0x0006821c, 0x00000000 },
4314 { 0x0000aa60, 0x00000000, 0x00000000, 0x00068224, 0x00068224, 0x00000000 },
4315 { 0x0000aa64, 0x00000000, 0x00000000, 0x00068290, 0x00068290, 0x00000000 },
4316 { 0x0000aa68, 0x00000000, 0x00000000, 0x00068300, 0x00068300, 0x00000000 },
4317 { 0x0000aa6c, 0x00000000, 0x00000000, 0x00068308, 0x00068308, 0x00000000 },
4318 { 0x0000aa70, 0x00000000, 0x00000000, 0x0006830c, 0x0006830c, 0x00000000 },
4319 { 0x0000aa74, 0x00000000, 0x00000000, 0x00068310, 0x00068310, 0x00000000 },
4320 { 0x0000aa78, 0x00000000, 0x00000000, 0x00068788, 0x00068788, 0x00000000 },
4321 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006878c, 0x0006878c, 0x00000000 },
4322 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068790, 0x00068790, 0x00000000 },
4323 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068794, 0x00068794, 0x00000000 },
4324 { 0x0000aa88, 0x00000000, 0x00000000, 0x00068798, 0x00068798, 0x00000000 },
4325 { 0x0000aa8c, 0x00000000, 0x00000000, 0x0006879c, 0x0006879c, 0x00000000 },
4326 { 0x0000aa90, 0x00000000, 0x00000000, 0x00068b89, 0x00068b89, 0x00000000 },
4327 { 0x0000aa94, 0x00000000, 0x00000000, 0x00068b8d, 0x00068b8d, 0x00000000 },
4328 { 0x0000aa98, 0x00000000, 0x00000000, 0x00068b91, 0x00068b91, 0x00000000 },
4329 { 0x0000aa9c, 0x00000000, 0x00000000, 0x00068b95, 0x00068b95, 0x00000000 },
4330 { 0x0000aaa0, 0x00000000, 0x00000000, 0x00068b99, 0x00068b99, 0x00000000 },
4331 { 0x0000aaa4, 0x00000000, 0x00000000, 0x00068ba5, 0x00068ba5, 0x00000000 },
4332 { 0x0000aaa8, 0x00000000, 0x00000000, 0x00068ba9, 0x00068ba9, 0x00000000 },
4333 { 0x0000aaac, 0x00000000, 0x00000000, 0x00068bad, 0x00068bad, 0x00000000 },
4334 { 0x0000aab0, 0x00000000, 0x00000000, 0x000b8b0c, 0x000b8b0c, 0x00000000 },
4335 { 0x0000aab4, 0x00000000, 0x00000000, 0x000b8f10, 0x000b8f10, 0x00000000 },
4336 { 0x0000aab8, 0x00000000, 0x00000000, 0x000b8f14, 0x000b8f14, 0x00000000 },
4337 { 0x0000aabc, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 },
4338 { 0x0000aac0, 0x00000000, 0x00000000, 0x000b8f84, 0x000b8f84, 0x00000000 },
4339 { 0x0000aac4, 0x00000000, 0x00000000, 0x000b8f88, 0x000b8f88, 0x00000000 },
4340 { 0x0000aac8, 0x00000000, 0x00000000, 0x000bb380, 0x000bb380, 0x00000000 },
4341 { 0x0000aacc, 0x00000000, 0x00000000, 0x000bb384, 0x000bb384, 0x00000000 },
4342 { 0x0000aad0, 0x00000000, 0x00000000, 0x000bb388, 0x000bb388, 0x00000000 },
4343 { 0x0000aad4, 0x00000000, 0x00000000, 0x000bb38c, 0x000bb38c, 0x00000000 },
4344 { 0x0000aad8, 0x00000000, 0x00000000, 0x000bb394, 0x000bb394, 0x00000000 },
4345 { 0x0000aadc, 0x00000000, 0x00000000, 0x000bb798, 0x000bb798, 0x00000000 },
4346 { 0x0000aae0, 0x00000000, 0x00000000, 0x000f970c, 0x000f970c, 0x00000000 },
4347 { 0x0000aae4, 0x00000000, 0x00000000, 0x000f9710, 0x000f9710, 0x00000000 },
4348 { 0x0000aae8, 0x00000000, 0x00000000, 0x000f9714, 0x000f9714, 0x00000000 },
4349 { 0x0000aaec, 0x00000000, 0x00000000, 0x000f9718, 0x000f9718, 0x00000000 },
4350 { 0x0000aaf0, 0x00000000, 0x00000000, 0x000f9705, 0x000f9705, 0x00000000 },
4351 { 0x0000aaf4, 0x00000000, 0x00000000, 0x000f9709, 0x000f9709, 0x00000000 },
4352 { 0x0000aaf8, 0x00000000, 0x00000000, 0x000f970d, 0x000f970d, 0x00000000 },
4353 { 0x0000aafc, 0x00000000, 0x00000000, 0x000f9711, 0x000f9711, 0x00000000 },
4354 { 0x0000ab00, 0x00000000, 0x00000000, 0x000f9715, 0x000f9715, 0x00000000 },
4355 { 0x0000ab04, 0x00000000, 0x00000000, 0x000f9719, 0x000f9719, 0x00000000 },
4356 { 0x0000ab08, 0x00000000, 0x00000000, 0x000fb7a4, 0x000fb7a4, 0x00000000 },
4357 { 0x0000ab0c, 0x00000000, 0x00000000, 0x000fb7a8, 0x000fb7a8, 0x00000000 },
4358 { 0x0000ab10, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 },
4359 { 0x0000ab14, 0x00000000, 0x00000000, 0x000fb7ac, 0x000fb7ac, 0x00000000 },
4360 { 0x0000ab18, 0x00000000, 0x00000000, 0x000fb7b0, 0x000fb7b0, 0x00000000 },
4361 { 0x0000ab1c, 0x00000000, 0x00000000, 0x000fb7b8, 0x000fb7b8, 0x00000000 },
4362 { 0x0000ab20, 0x00000000, 0x00000000, 0x000fb7bc, 0x000fb7bc, 0x00000000 },
4363 { 0x0000ab24, 0x00000000, 0x00000000, 0x000fb7a1, 0x000fb7a1, 0x00000000 },
4364 { 0x0000ab28, 0x00000000, 0x00000000, 0x000fb7a5, 0x000fb7a5, 0x00000000 },
4365 { 0x0000ab2c, 0x00000000, 0x00000000, 0x000fb7a9, 0x000fb7a9, 0x00000000 },
4366 { 0x0000ab30, 0x00000000, 0x00000000, 0x000fb7b1, 0x000fb7b1, 0x00000000 },
4367 { 0x0000ab34, 0x00000000, 0x00000000, 0x000fb7b5, 0x000fb7b5, 0x00000000 },
4368 { 0x0000ab38, 0x00000000, 0x00000000, 0x000fb7bd, 0x000fb7bd, 0x00000000 },
4369 { 0x0000ab3c, 0x00000000, 0x00000000, 0x000fb7c9, 0x000fb7c9, 0x00000000 },
4370 { 0x0000ab40, 0x00000000, 0x00000000, 0x000fb7cd, 0x000fb7cd, 0x00000000 },
4371 { 0x0000ab44, 0x00000000, 0x00000000, 0x000fb7d1, 0x000fb7d1, 0x00000000 },
4372 { 0x0000ab48, 0x00000000, 0x00000000, 0x000fb7d9, 0x000fb7d9, 0x00000000 },
4373 { 0x0000ab4c, 0x00000000, 0x00000000, 0x000fb7c2, 0x000fb7c2, 0x00000000 },
4374 { 0x0000ab50, 0x00000000, 0x00000000, 0x000fb7c6, 0x000fb7c6, 0x00000000 },
4375 { 0x0000ab54, 0x00000000, 0x00000000, 0x000fb7ca, 0x000fb7ca, 0x00000000 },
4376 { 0x0000ab58, 0x00000000, 0x00000000, 0x000fb7ce, 0x000fb7ce, 0x00000000 },
4377 { 0x0000ab5c, 0x00000000, 0x00000000, 0x000fb7d2, 0x000fb7d2, 0x00000000 },
4378 { 0x0000ab60, 0x00000000, 0x00000000, 0x000fb7d6, 0x000fb7d6, 0x00000000 },
4379 { 0x0000ab64, 0x00000000, 0x00000000, 0x000fb7c3, 0x000fb7c3, 0x00000000 },
4380 { 0x0000ab68, 0x00000000, 0x00000000, 0x000fb7cb, 0x000fb7cb, 0x00000000 },
4381 { 0x0000ab6c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4382 { 0x0000ab70, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4383 { 0x0000ab74, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4384 { 0x0000ab78, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4385 { 0x0000ab7c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4386 { 0x0000ab80, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4387 { 0x0000ab84, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4388 { 0x0000ab88, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4389 { 0x0000ab8c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4390 { 0x0000ab90, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4391 { 0x0000ab94, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4392 { 0x0000ab98, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4393 { 0x0000ab9c, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4394 { 0x0000aba0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4395 { 0x0000aba4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4396 { 0x0000aba8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4397 { 0x0000abac, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4398 { 0x0000abb0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4399 { 0x0000abb4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4400 { 0x0000abb8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4401 { 0x0000abbc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4402 { 0x0000abc0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4403 { 0x0000abc4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4404 { 0x0000abc8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4405 { 0x0000abcc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4406 { 0x0000abd0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4407 { 0x0000abd4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4408 { 0x0000abd8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4409 { 0x0000abdc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4410 { 0x0000abe0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4411 { 0x0000abe4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4412 { 0x0000abe8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4413 { 0x0000abec, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4414 { 0x0000abf0, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4415 { 0x0000abf4, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4416 { 0x0000abf8, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4417 { 0x0000abfc, 0x00000000, 0x00000000, 0x000fb7d3, 0x000fb7d3, 0x00000000 },
4418 { 0x0000a204, 0x00000004, 0x00000004, 0x00000004, 0x00000004, 0x00000004 },
4419 { 0x0000a20c, 0x00000014, 0x00000014, 0x00000000, 0x00000000, 0x0001f000 },
4420 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
4421 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
4422 { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 },
4423 { 0x0000a274, 0x0a81c652, 0x0a81c652, 0x0a820652, 0x0a820652, 0x0a82a652 },
4424 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4425 { 0x0000a304, 0x00000000, 0x00000000, 0x00007201, 0x00007201, 0x00000000 },
4426 { 0x0000a308, 0x00000000, 0x00000000, 0x00010408, 0x00010408, 0x00000000 },
4427 { 0x0000a30c, 0x00000000, 0x00000000, 0x0001860a, 0x0001860a, 0x00000000 },
4428 { 0x0000a310, 0x00000000, 0x00000000, 0x00020818, 0x00020818, 0x00000000 },
4429 { 0x0000a314, 0x00000000, 0x00000000, 0x00024858, 0x00024858, 0x00000000 },
4430 { 0x0000a318, 0x00000000, 0x00000000, 0x00026859, 0x00026859, 0x00000000 },
4431 { 0x0000a31c, 0x00000000, 0x00000000, 0x0002985b, 0x0002985b, 0x00000000 },
4432 { 0x0000a320, 0x00000000, 0x00000000, 0x0002b89a, 0x0002b89a, 0x00000000 },
4433 { 0x0000a324, 0x00000000, 0x00000000, 0x0002d89b, 0x0002d89b, 0x00000000 },
4434 { 0x0000a328, 0x00000000, 0x00000000, 0x0002f89c, 0x0002f89c, 0x00000000 },
4435 { 0x0000a32c, 0x00000000, 0x00000000, 0x0003189d, 0x0003189d, 0x00000000 },
4436 { 0x0000a330, 0x00000000, 0x00000000, 0x0003389e, 0x0003389e, 0x00000000 },
4437 { 0x0000a334, 0x00000000, 0x00000000, 0x000368de, 0x000368de, 0x00000000 },
4438 { 0x0000a338, 0x00000000, 0x00000000, 0x0003891e, 0x0003891e, 0x00000000 },
4439 { 0x0000a33c, 0x00000000, 0x00000000, 0x0003a95e, 0x0003a95e, 0x00000000 },
4440 { 0x0000a340, 0x00000000, 0x00000000, 0x0003e9df, 0x0003e9df, 0x00000000 },
4441 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4442 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
4443};
3133 4444
4445static const u_int32_t ar9285Common_9285_1_2[][2] = {
4446 { 0x0000000c, 0x00000000 },
4447 { 0x00000030, 0x00020045 },
4448 { 0x00000034, 0x00000005 },
4449 { 0x00000040, 0x00000000 },
4450 { 0x00000044, 0x00000008 },
4451 { 0x00000048, 0x00000008 },
4452 { 0x0000004c, 0x00000010 },
4453 { 0x00000050, 0x00000000 },
4454 { 0x00000054, 0x0000001f },
4455 { 0x00000800, 0x00000000 },
4456 { 0x00000804, 0x00000000 },
4457 { 0x00000808, 0x00000000 },
4458 { 0x0000080c, 0x00000000 },
4459 { 0x00000810, 0x00000000 },
4460 { 0x00000814, 0x00000000 },
4461 { 0x00000818, 0x00000000 },
4462 { 0x0000081c, 0x00000000 },
4463 { 0x00000820, 0x00000000 },
4464 { 0x00000824, 0x00000000 },
4465 { 0x00001040, 0x002ffc0f },
4466 { 0x00001044, 0x002ffc0f },
4467 { 0x00001048, 0x002ffc0f },
4468 { 0x0000104c, 0x002ffc0f },
4469 { 0x00001050, 0x002ffc0f },
4470 { 0x00001054, 0x002ffc0f },
4471 { 0x00001058, 0x002ffc0f },
4472 { 0x0000105c, 0x002ffc0f },
4473 { 0x00001060, 0x002ffc0f },
4474 { 0x00001064, 0x002ffc0f },
4475 { 0x00001230, 0x00000000 },
4476 { 0x00001270, 0x00000000 },
4477 { 0x00001038, 0x00000000 },
4478 { 0x00001078, 0x00000000 },
4479 { 0x000010b8, 0x00000000 },
4480 { 0x000010f8, 0x00000000 },
4481 { 0x00001138, 0x00000000 },
4482 { 0x00001178, 0x00000000 },
4483 { 0x000011b8, 0x00000000 },
4484 { 0x000011f8, 0x00000000 },
4485 { 0x00001238, 0x00000000 },
4486 { 0x00001278, 0x00000000 },
4487 { 0x000012b8, 0x00000000 },
4488 { 0x000012f8, 0x00000000 },
4489 { 0x00001338, 0x00000000 },
4490 { 0x00001378, 0x00000000 },
4491 { 0x000013b8, 0x00000000 },
4492 { 0x000013f8, 0x00000000 },
4493 { 0x00001438, 0x00000000 },
4494 { 0x00001478, 0x00000000 },
4495 { 0x000014b8, 0x00000000 },
4496 { 0x000014f8, 0x00000000 },
4497 { 0x00001538, 0x00000000 },
4498 { 0x00001578, 0x00000000 },
4499 { 0x000015b8, 0x00000000 },
4500 { 0x000015f8, 0x00000000 },
4501 { 0x00001638, 0x00000000 },
4502 { 0x00001678, 0x00000000 },
4503 { 0x000016b8, 0x00000000 },
4504 { 0x000016f8, 0x00000000 },
4505 { 0x00001738, 0x00000000 },
4506 { 0x00001778, 0x00000000 },
4507 { 0x000017b8, 0x00000000 },
4508 { 0x000017f8, 0x00000000 },
4509 { 0x0000103c, 0x00000000 },
4510 { 0x0000107c, 0x00000000 },
4511 { 0x000010bc, 0x00000000 },
4512 { 0x000010fc, 0x00000000 },
4513 { 0x0000113c, 0x00000000 },
4514 { 0x0000117c, 0x00000000 },
4515 { 0x000011bc, 0x00000000 },
4516 { 0x000011fc, 0x00000000 },
4517 { 0x0000123c, 0x00000000 },
4518 { 0x0000127c, 0x00000000 },
4519 { 0x000012bc, 0x00000000 },
4520 { 0x000012fc, 0x00000000 },
4521 { 0x0000133c, 0x00000000 },
4522 { 0x0000137c, 0x00000000 },
4523 { 0x000013bc, 0x00000000 },
4524 { 0x000013fc, 0x00000000 },
4525 { 0x0000143c, 0x00000000 },
4526 { 0x0000147c, 0x00000000 },
4527 { 0x00004030, 0x00000002 },
4528 { 0x0000403c, 0x00000002 },
4529 { 0x00004024, 0x0000001f },
4530 { 0x00004060, 0x00000000 },
4531 { 0x00004064, 0x00000000 },
4532 { 0x00007010, 0x00000031 },
4533 { 0x00007034, 0x00000002 },
4534 { 0x00007038, 0x000004c2 },
4535 { 0x00008004, 0x00000000 },
4536 { 0x00008008, 0x00000000 },
4537 { 0x0000800c, 0x00000000 },
4538 { 0x00008018, 0x00000700 },
4539 { 0x00008020, 0x00000000 },
4540 { 0x00008038, 0x00000000 },
4541 { 0x0000803c, 0x00000000 },
4542 { 0x00008048, 0x00000000 },
4543 { 0x00008054, 0x00000000 },
4544 { 0x00008058, 0x00000000 },
4545 { 0x0000805c, 0x000fc78f },
4546 { 0x00008060, 0x0000000f },
4547 { 0x00008064, 0x00000000 },
4548 { 0x00008070, 0x00000000 },
4549 { 0x000080c0, 0x2a80001a },
4550 { 0x000080c4, 0x05dc01e0 },
4551 { 0x000080c8, 0x1f402710 },
4552 { 0x000080cc, 0x01f40000 },
4553 { 0x000080d0, 0x00001e00 },
4554 { 0x000080d4, 0x00000000 },
4555 { 0x000080d8, 0x00400000 },
4556 { 0x000080e0, 0xffffffff },
4557 { 0x000080e4, 0x0000ffff },
4558 { 0x000080e8, 0x003f3f3f },
4559 { 0x000080ec, 0x00000000 },
4560 { 0x000080f0, 0x00000000 },
4561 { 0x000080f4, 0x00000000 },
4562 { 0x000080f8, 0x00000000 },
4563 { 0x000080fc, 0x00020000 },
4564 { 0x00008100, 0x00020000 },
4565 { 0x00008104, 0x00000001 },
4566 { 0x00008108, 0x00000052 },
4567 { 0x0000810c, 0x00000000 },
4568 { 0x00008110, 0x00000168 },
4569 { 0x00008118, 0x000100aa },
4570 { 0x0000811c, 0x00003210 },
4571 { 0x00008120, 0x08f04800 },
4572 { 0x00008124, 0x00000000 },
4573 { 0x00008128, 0x00000000 },
4574 { 0x0000812c, 0x00000000 },
4575 { 0x00008130, 0x00000000 },
4576 { 0x00008134, 0x00000000 },
4577 { 0x00008138, 0x00000000 },
4578 { 0x0000813c, 0x00000000 },
4579 { 0x00008144, 0xffffffff },
4580 { 0x00008168, 0x00000000 },
4581 { 0x0000816c, 0x00000000 },
4582 { 0x00008170, 0x32143320 },
4583 { 0x00008174, 0xfaa4fa50 },
4584 { 0x00008178, 0x00000100 },
4585 { 0x0000817c, 0x00000000 },
4586 { 0x000081c0, 0x00000000 },
4587 { 0x000081d0, 0x00003210 },
4588 { 0x000081ec, 0x00000000 },
4589 { 0x000081f0, 0x00000000 },
4590 { 0x000081f4, 0x00000000 },
4591 { 0x000081f8, 0x00000000 },
4592 { 0x000081fc, 0x00000000 },
4593 { 0x00008200, 0x00000000 },
4594 { 0x00008204, 0x00000000 },
4595 { 0x00008208, 0x00000000 },
4596 { 0x0000820c, 0x00000000 },
4597 { 0x00008210, 0x00000000 },
4598 { 0x00008214, 0x00000000 },
4599 { 0x00008218, 0x00000000 },
4600 { 0x0000821c, 0x00000000 },
4601 { 0x00008220, 0x00000000 },
4602 { 0x00008224, 0x00000000 },
4603 { 0x00008228, 0x00000000 },
4604 { 0x0000822c, 0x00000000 },
4605 { 0x00008230, 0x00000000 },
4606 { 0x00008234, 0x00000000 },
4607 { 0x00008238, 0x00000000 },
4608 { 0x0000823c, 0x00000000 },
4609 { 0x00008240, 0x00100000 },
4610 { 0x00008244, 0x0010f400 },
4611 { 0x00008248, 0x00000100 },
4612 { 0x0000824c, 0x0001e800 },
4613 { 0x00008250, 0x00000000 },
4614 { 0x00008254, 0x00000000 },
4615 { 0x00008258, 0x00000000 },
4616 { 0x0000825c, 0x400000ff },
4617 { 0x00008260, 0x00080922 },
4618 { 0x00008264, 0xa8a00010 },
4619 { 0x00008270, 0x00000000 },
4620 { 0x00008274, 0x40000000 },
4621 { 0x00008278, 0x003e4180 },
4622 { 0x0000827c, 0x00000000 },
4623 { 0x00008284, 0x0000002c },
4624 { 0x00008288, 0x0000002c },
4625 { 0x0000828c, 0x00000000 },
4626 { 0x00008294, 0x00000000 },
4627 { 0x00008298, 0x00000000 },
4628 { 0x0000829c, 0x00000000 },
4629 { 0x00008300, 0x00000040 },
4630 { 0x00008314, 0x00000000 },
4631 { 0x00008328, 0x00000000 },
4632 { 0x0000832c, 0x00000001 },
4633 { 0x00008330, 0x00000302 },
4634 { 0x00008334, 0x00000e00 },
4635 { 0x00008338, 0x00ff0000 },
4636 { 0x0000833c, 0x00000000 },
4637 { 0x00008340, 0x00010380 },
4638 { 0x00008344, 0x00581043 },
4639 { 0x00009808, 0x00000000 },
4640 { 0x0000980c, 0xafe68e30 },
4641 { 0x00009810, 0xfd14e000 },
4642 { 0x00009814, 0x9c0a9f6b },
4643 { 0x0000981c, 0x00000000 },
4644 { 0x0000982c, 0x0000a000 },
4645 { 0x00009830, 0x00000000 },
4646 { 0x0000983c, 0x00200400 },
4647 { 0x0000984c, 0x0040233c },
4648 { 0x00009854, 0x00000044 },
4649 { 0x00009900, 0x00000000 },
4650 { 0x00009904, 0x00000000 },
4651 { 0x00009908, 0x00000000 },
4652 { 0x0000990c, 0x00000000 },
4653 { 0x00009910, 0x01002310 },
4654 { 0x0000991c, 0x10000fff },
4655 { 0x00009920, 0x04900000 },
4656 { 0x00009928, 0x00000001 },
4657 { 0x0000992c, 0x00000004 },
4658 { 0x00009934, 0x1e1f2022 },
4659 { 0x00009938, 0x0a0b0c0d },
4660 { 0x0000993c, 0x00000000 },
4661 { 0x00009940, 0x14750604 },
4662 { 0x00009948, 0x9280c00a },
4663 { 0x0000994c, 0x00020028 },
4664 { 0x00009954, 0x5f3ca3de },
4665 { 0x00009958, 0x2108ecff },
4666 { 0x00009968, 0x000003ce },
4667 { 0x00009970, 0x192bb515 },
4668 { 0x00009974, 0x00000000 },
4669 { 0x00009978, 0x00000001 },
4670 { 0x0000997c, 0x00000000 },
4671 { 0x00009980, 0x00000000 },
4672 { 0x00009984, 0x00000000 },
4673 { 0x00009988, 0x00000000 },
4674 { 0x0000998c, 0x00000000 },
4675 { 0x00009990, 0x00000000 },
4676 { 0x00009994, 0x00000000 },
4677 { 0x00009998, 0x00000000 },
4678 { 0x0000999c, 0x00000000 },
4679 { 0x000099a0, 0x00000000 },
4680 { 0x000099a4, 0x00000001 },
4681 { 0x000099a8, 0x201fff00 },
4682 { 0x000099ac, 0x2def1000 },
4683 { 0x000099b0, 0x03051000 },
4684 { 0x000099b4, 0x00000820 },
4685 { 0x000099dc, 0x00000000 },
4686 { 0x000099e0, 0x00000000 },
4687 { 0x000099e4, 0xaaaaaaaa },
4688 { 0x000099e8, 0x3c466478 },
4689 { 0x000099ec, 0x0cc80caa },
4690 { 0x000099f0, 0x00000000 },
4691 { 0x0000a208, 0x803e6788 },
4692 { 0x0000a210, 0x4080a333 },
4693 { 0x0000a214, 0x00206c10 },
4694 { 0x0000a218, 0x009c4060 },
4695 { 0x0000a220, 0x01834061 },
4696 { 0x0000a224, 0x00000400 },
4697 { 0x0000a228, 0x000003b5 },
4698 { 0x0000a22c, 0x00000000 },
4699 { 0x0000a234, 0x20202020 },
4700 { 0x0000a238, 0x20202020 },
4701 { 0x0000a244, 0x00000000 },
4702 { 0x0000a248, 0xfffffffc },
4703 { 0x0000a24c, 0x00000000 },
4704 { 0x0000a254, 0x00000000 },
4705 { 0x0000a258, 0x0ccb5380 },
4706 { 0x0000a25c, 0x15151501 },
4707 { 0x0000a260, 0xdfa90f01 },
4708 { 0x0000a268, 0x00000000 },
4709 { 0x0000a26c, 0x0ebae9e6 },
4710 { 0x0000d270, 0x0d820820 },
4711 { 0x0000a278, 0x318c6318 },
4712 { 0x0000a27c, 0x050c0318 },
4713 { 0x0000d35c, 0x07ffffef },
4714 { 0x0000d360, 0x0fffffe7 },
4715 { 0x0000d364, 0x17ffffe5 },
4716 { 0x0000d368, 0x1fffffe4 },
4717 { 0x0000d36c, 0x37ffffe3 },
4718 { 0x0000d370, 0x3fffffe3 },
4719 { 0x0000d374, 0x57ffffe3 },
4720 { 0x0000d378, 0x5fffffe2 },
4721 { 0x0000d37c, 0x7fffffe2 },
4722 { 0x0000d380, 0x7f3c7bba },
4723 { 0x0000d384, 0xf3307ff0 },
4724 { 0x0000a388, 0x0c000000 },
4725 { 0x0000a38c, 0x20202020 },
4726 { 0x0000a390, 0x20202020 },
4727 { 0x0000a394, 0x318c6318 },
4728 { 0x0000a398, 0x00000318 },
4729 { 0x0000a39c, 0x00000001 },
4730 { 0x0000a3a0, 0x00000000 },
4731 { 0x0000a3a4, 0x00000000 },
4732 { 0x0000a3a8, 0x00000000 },
4733 { 0x0000a3ac, 0x00000000 },
4734 { 0x0000a3b0, 0x00000000 },
4735 { 0x0000a3b4, 0x00000000 },
4736 { 0x0000a3b8, 0x00000000 },
4737 { 0x0000a3bc, 0x00000000 },
4738 { 0x0000a3c0, 0x00000000 },
4739 { 0x0000a3c4, 0x00000000 },
4740 { 0x0000a3cc, 0x20202020 },
4741 { 0x0000a3d0, 0x20202020 },
4742 { 0x0000a3d4, 0x20202020 },
4743 { 0x0000a3dc, 0x318c6318 },
4744 { 0x0000a3e0, 0x00000318 },
4745 { 0x0000a3e4, 0x00000000 },
4746 { 0x0000a3e8, 0x18c43433 },
4747 { 0x0000a3ec, 0x00f70081 },
4748 { 0x00007800, 0x00140000 },
4749 { 0x00007804, 0x0e4548d8 },
4750 { 0x00007808, 0x54214514 },
4751 { 0x0000780c, 0x02025820 },
4752 { 0x00007810, 0x71c0d388 },
4753 { 0x00007814, 0x924934a8 },
4754 { 0x0000781c, 0x00000000 },
4755 { 0x00007820, 0x00000c04 },
4756 { 0x00007824, 0x00d86fff },
4757 { 0x00007828, 0x26d2491b },
4758 { 0x0000782c, 0x6e36d97b },
4759 { 0x00007830, 0xedb6d96e },
4760 { 0x00007834, 0x71400087 },
4761 { 0x00007838, 0xfac68801 },
4762 { 0x0000783c, 0x0001fffe },
4763 { 0x00007840, 0xffeb1a20 },
4764 { 0x00007844, 0x000c0db6 },
4765 { 0x00007848, 0x6db61b6f },
4766 { 0x0000784c, 0x6d9b66db },
4767 { 0x00007850, 0x6d8c6dba },
4768 { 0x00007854, 0x00040000 },
4769 { 0x00007858, 0xdb003012 },
4770 { 0x0000785c, 0x04924914 },
4771 { 0x00007860, 0x21084210 },
4772 { 0x00007864, 0xf7d7ffde },
4773 { 0x00007868, 0xc2034080 },
4774 { 0x0000786c, 0x48609eb4 },
4775 { 0x00007870, 0x10142c00 },
4776};
3134 4777
3135static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = { 4778static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
4779 {0x00004040, 0x9248fd00 },
4780 {0x00004040, 0x24924924 },
4781 {0x00004040, 0xa8000019 },
4782 {0x00004040, 0x13160820 },
4783 {0x00004040, 0xe5980560 },
4784 {0x00004040, 0xc01dcffd },
4785 {0x00004040, 0x1aaabe41 },
4786 {0x00004040, 0xbe105554 },
4787 {0x00004040, 0x00043007 },
4788 {0x00004044, 0x00000000 },
4789};
4790
4791static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
3136 {0x00004040, 0x9248fd00 }, 4792 {0x00004040, 0x9248fd00 },
3137 {0x00004040, 0x24924924 }, 4793 {0x00004040, 0x24924924 },
3138 {0x00004040, 0xa8000019 }, 4794 {0x00004040, 0xa8000019 },
3139 {0x00004040, 0x13160820 }, 4795 {0x00004040, 0x13160820 },
3140 {0x00004040, 0xe5980560 }, 4796 {0x00004040, 0xe5980560 },
3141 {0x00004040, 0x401dcffd }, 4797 {0x00004040, 0xc01dcffc },
3142 {0x00004040, 0x1aaabe40 }, 4798 {0x00004040, 0x1aaabe41 },
3143 {0x00004040, 0xbe105554 }, 4799 {0x00004040, 0xbe105554 },
3144 {0x00004040, 0x00043007 }, 4800 {0x00004040, 0x00043007 },
3145 {0x00004044, 0x00000000 }, 4801 {0x00004044, 0x00000000 },
diff --git a/drivers/net/wireless/ath9k/mac.c b/drivers/net/wireless/ath9k/mac.c
new file mode 100644
index 000000000000..af32d091dc38
--- /dev/null
+++ b/drivers/net/wireless/ath9k/mac.c
@@ -0,0 +1,946 @@
1/*
2 * Copyright (c) 2008 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "hw.h"
19#include "reg.h"
20#include "phy.h"
21
22static void ath9k_hw_set_txq_interrupts(struct ath_hal *ah,
23 struct ath9k_tx_queue_info *qi)
24{
25 struct ath_hal_5416 *ahp = AH5416(ah);
26
27 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
28 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
29 ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
30 ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
31 ahp->ah_txUrnInterruptMask);
32
33 REG_WRITE(ah, AR_IMR_S0,
34 SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
35 | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC));
36 REG_WRITE(ah, AR_IMR_S1,
37 SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
38 | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL));
39 REG_RMW_FIELD(ah, AR_IMR_S2,
40 AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
41}
42
43u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q)
44{
45 return REG_READ(ah, AR_QTXDP(q));
46}
47
48bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q, u32 txdp)
49{
50 REG_WRITE(ah, AR_QTXDP(q), txdp);
51
52 return true;
53}
54
55bool ath9k_hw_txstart(struct ath_hal *ah, u32 q)
56{
57 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %u\n", q);
58
59 REG_WRITE(ah, AR_Q_TXE, 1 << q);
60
61 return true;
62}
63
64u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q)
65{
66 u32 npend;
67
68 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
69 if (npend == 0) {
70
71 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
72 npend = 1;
73 }
74
75 return npend;
76}
77
78bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel)
79{
80 struct ath_hal_5416 *ahp = AH5416(ah);
81 u32 txcfg, curLevel, newLevel;
82 enum ath9k_int omask;
83
84 if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD)
85 return false;
86
87 omask = ath9k_hw_set_interrupts(ah, ahp->ah_maskReg & ~ATH9K_INT_GLOBAL);
88
89 txcfg = REG_READ(ah, AR_TXCFG);
90 curLevel = MS(txcfg, AR_FTRIG);
91 newLevel = curLevel;
92 if (bIncTrigLevel) {
93 if (curLevel < MAX_TX_FIFO_THRESHOLD)
94 newLevel++;
95 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
96 newLevel--;
97 if (newLevel != curLevel)
98 REG_WRITE(ah, AR_TXCFG,
99 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
100
101 ath9k_hw_set_interrupts(ah, omask);
102
103 ah->ah_txTrigLevel = newLevel;
104
105 return newLevel != curLevel;
106}
107
108bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q)
109{
110 u32 tsfLow, j, wait;
111
112 REG_WRITE(ah, AR_Q_TXD, 1 << q);
113
114 for (wait = 1000; wait != 0; wait--) {
115 if (ath9k_hw_numtxpending(ah, q) == 0)
116 break;
117 udelay(100);
118 }
119
120 if (ath9k_hw_numtxpending(ah, q)) {
121 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
122 "%s: Num of pending TX Frames %d on Q %d\n",
123 __func__, ath9k_hw_numtxpending(ah, q), q);
124
125 for (j = 0; j < 2; j++) {
126 tsfLow = REG_READ(ah, AR_TSF_L32);
127 REG_WRITE(ah, AR_QUIET2,
128 SM(10, AR_QUIET2_QUIET_DUR));
129 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
130 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
131 REG_SET_BIT(ah, AR_TIMER_MODE,
132 AR_QUIET_TIMER_EN);
133
134 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
135 break;
136
137 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
138 "TSF have moved while trying to set "
139 "quiet time TSF: 0x%08x\n", tsfLow);
140 }
141
142 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
143
144 udelay(200);
145 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
146
147 wait = 1000;
148
149 while (ath9k_hw_numtxpending(ah, q)) {
150 if ((--wait) == 0) {
151 DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
152 "Failed to stop Tx DMA in 100 "
153 "msec after killing last frame\n");
154 break;
155 }
156 udelay(100);
157 }
158
159 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
160 }
161
162 REG_WRITE(ah, AR_Q_TXD, 0);
163
164 return wait != 0;
165}
166
167bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
168 u32 segLen, bool firstSeg,
169 bool lastSeg, const struct ath_desc *ds0)
170{
171 struct ar5416_desc *ads = AR5416DESC(ds);
172
173 if (firstSeg) {
174 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
175 } else if (lastSeg) {
176 ads->ds_ctl0 = 0;
177 ads->ds_ctl1 = segLen;
178 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
179 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
180 } else {
181 ads->ds_ctl0 = 0;
182 ads->ds_ctl1 = segLen | AR_TxMore;
183 ads->ds_ctl2 = 0;
184 ads->ds_ctl3 = 0;
185 }
186 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
187 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
188 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
189 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
190 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
191
192 return true;
193}
194
195void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds)
196{
197 struct ar5416_desc *ads = AR5416DESC(ds);
198
199 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
200 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
201 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
202 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
203 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
204}
205
206int ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds)
207{
208 struct ar5416_desc *ads = AR5416DESC(ds);
209
210 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
211 return -EINPROGRESS;
212
213 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
214 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
215 ds->ds_txstat.ts_status = 0;
216 ds->ds_txstat.ts_flags = 0;
217
218 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
219 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
220 if (ads->ds_txstatus1 & AR_Filtered)
221 ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
222 if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
223 ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
224 ath9k_hw_updatetxtriglevel(ah, true);
225 }
226 if (ads->ds_txstatus9 & AR_TxOpExceeded)
227 ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
228 if (ads->ds_txstatus1 & AR_TxTimerExpired)
229 ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
230
231 if (ads->ds_txstatus1 & AR_DescCfgErr)
232 ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
233 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
234 ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
235 ath9k_hw_updatetxtriglevel(ah, true);
236 }
237 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
238 ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
239 ath9k_hw_updatetxtriglevel(ah, true);
240 }
241 if (ads->ds_txstatus0 & AR_TxBaStatus) {
242 ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
243 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
244 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
245 }
246
247 ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
248 switch (ds->ds_txstat.ts_rateindex) {
249 case 0:
250 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
251 break;
252 case 1:
253 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
254 break;
255 case 2:
256 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
257 break;
258 case 3:
259 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
260 break;
261 }
262
263 ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
264 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
265 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
266 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
267 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
268 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
269 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
270 ds->ds_txstat.evm0 = ads->AR_TxEVM0;
271 ds->ds_txstat.evm1 = ads->AR_TxEVM1;
272 ds->ds_txstat.evm2 = ads->AR_TxEVM2;
273 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
274 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
275 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
276 ds->ds_txstat.ts_antenna = 1;
277
278 return 0;
279}
280
281void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
282 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
283 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
284{
285 struct ar5416_desc *ads = AR5416DESC(ds);
286 struct ath_hal_5416 *ahp = AH5416(ah);
287
288 txPower += ahp->ah_txPowerIndexOffset;
289 if (txPower > 63)
290 txPower = 63;
291
292 ads->ds_ctl0 = (pktLen & AR_FrameLen)
293 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
294 | SM(txPower, AR_XmitPower)
295 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
296 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
297 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
298 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
299
300 ads->ds_ctl1 =
301 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
302 | SM(type, AR_FrameType)
303 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
304 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
305 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
306
307 ads->ds_ctl6 = SM(keyType, AR_EncrType);
308
309 if (AR_SREV_9285(ah)) {
310 ads->ds_ctl8 = 0;
311 ads->ds_ctl9 = 0;
312 ads->ds_ctl10 = 0;
313 ads->ds_ctl11 = 0;
314 }
315}
316
317void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
318 struct ath_desc *lastds,
319 u32 durUpdateEn, u32 rtsctsRate,
320 u32 rtsctsDuration,
321 struct ath9k_11n_rate_series series[],
322 u32 nseries, u32 flags)
323{
324 struct ar5416_desc *ads = AR5416DESC(ds);
325 struct ar5416_desc *last_ads = AR5416DESC(lastds);
326 u32 ds_ctl0;
327
328 (void) nseries;
329 (void) rtsctsDuration;
330
331 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
332 ds_ctl0 = ads->ds_ctl0;
333
334 if (flags & ATH9K_TXDESC_RTSENA) {
335 ds_ctl0 &= ~AR_CTSEnable;
336 ds_ctl0 |= AR_RTSEnable;
337 } else {
338 ds_ctl0 &= ~AR_RTSEnable;
339 ds_ctl0 |= AR_CTSEnable;
340 }
341
342 ads->ds_ctl0 = ds_ctl0;
343 } else {
344 ads->ds_ctl0 =
345 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
346 }
347
348 ads->ds_ctl2 = set11nTries(series, 0)
349 | set11nTries(series, 1)
350 | set11nTries(series, 2)
351 | set11nTries(series, 3)
352 | (durUpdateEn ? AR_DurUpdateEna : 0)
353 | SM(0, AR_BurstDur);
354
355 ads->ds_ctl3 = set11nRate(series, 0)
356 | set11nRate(series, 1)
357 | set11nRate(series, 2)
358 | set11nRate(series, 3);
359
360 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
361 | set11nPktDurRTSCTS(series, 1);
362
363 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
364 | set11nPktDurRTSCTS(series, 3);
365
366 ads->ds_ctl7 = set11nRateFlags(series, 0)
367 | set11nRateFlags(series, 1)
368 | set11nRateFlags(series, 2)
369 | set11nRateFlags(series, 3)
370 | SM(rtsctsRate, AR_RTSCTSRate);
371 last_ads->ds_ctl2 = ads->ds_ctl2;
372 last_ads->ds_ctl3 = ads->ds_ctl3;
373}
374
375void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
376 u32 aggrLen)
377{
378 struct ar5416_desc *ads = AR5416DESC(ds);
379
380 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
381 ads->ds_ctl6 &= ~AR_AggrLen;
382 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
383}
384
385void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
386 u32 numDelims)
387{
388 struct ar5416_desc *ads = AR5416DESC(ds);
389 unsigned int ctl6;
390
391 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
392
393 ctl6 = ads->ds_ctl6;
394 ctl6 &= ~AR_PadDelim;
395 ctl6 |= SM(numDelims, AR_PadDelim);
396 ads->ds_ctl6 = ctl6;
397}
398
399void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds)
400{
401 struct ar5416_desc *ads = AR5416DESC(ds);
402
403 ads->ds_ctl1 |= AR_IsAggr;
404 ads->ds_ctl1 &= ~AR_MoreAggr;
405 ads->ds_ctl6 &= ~AR_PadDelim;
406}
407
408void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds)
409{
410 struct ar5416_desc *ads = AR5416DESC(ds);
411
412 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
413}
414
415void ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
416 u32 burstDuration)
417{
418 struct ar5416_desc *ads = AR5416DESC(ds);
419
420 ads->ds_ctl2 &= ~AR_BurstDur;
421 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
422}
423
424void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
425 u32 vmf)
426{
427 struct ar5416_desc *ads = AR5416DESC(ds);
428
429 if (vmf)
430 ads->ds_ctl0 |= AR_VirtMoreFrag;
431 else
432 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
433}
434
435void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs)
436{
437 struct ath_hal_5416 *ahp = AH5416(ah);
438
439 *txqs &= ahp->ah_intrTxqs;
440 ahp->ah_intrTxqs &= ~(*txqs);
441}
442
443bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
444 const struct ath9k_tx_queue_info *qinfo)
445{
446 u32 cw;
447 struct ath_hal_5416 *ahp = AH5416(ah);
448 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
449 struct ath9k_tx_queue_info *qi;
450
451 if (q >= pCap->total_queues) {
452 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
453 return false;
454 }
455
456 qi = &ahp->ah_txq[q];
457 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
458 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n");
459 return false;
460 }
461
462 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %p\n", qi);
463
464 qi->tqi_ver = qinfo->tqi_ver;
465 qi->tqi_subtype = qinfo->tqi_subtype;
466 qi->tqi_qflags = qinfo->tqi_qflags;
467 qi->tqi_priority = qinfo->tqi_priority;
468 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
469 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
470 else
471 qi->tqi_aifs = INIT_AIFS;
472 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
473 cw = min(qinfo->tqi_cwmin, 1024U);
474 qi->tqi_cwmin = 1;
475 while (qi->tqi_cwmin < cw)
476 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
477 } else
478 qi->tqi_cwmin = qinfo->tqi_cwmin;
479 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
480 cw = min(qinfo->tqi_cwmax, 1024U);
481 qi->tqi_cwmax = 1;
482 while (qi->tqi_cwmax < cw)
483 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
484 } else
485 qi->tqi_cwmax = INIT_CWMAX;
486
487 if (qinfo->tqi_shretry != 0)
488 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
489 else
490 qi->tqi_shretry = INIT_SH_RETRY;
491 if (qinfo->tqi_lgretry != 0)
492 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
493 else
494 qi->tqi_lgretry = INIT_LG_RETRY;
495 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
496 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
497 qi->tqi_burstTime = qinfo->tqi_burstTime;
498 qi->tqi_readyTime = qinfo->tqi_readyTime;
499
500 switch (qinfo->tqi_subtype) {
501 case ATH9K_WME_UPSD:
502 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
503 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
504 break;
505 default:
506 break;
507 }
508
509 return true;
510}
511
512bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
513 struct ath9k_tx_queue_info *qinfo)
514{
515 struct ath_hal_5416 *ahp = AH5416(ah);
516 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
517 struct ath9k_tx_queue_info *qi;
518
519 if (q >= pCap->total_queues) {
520 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
521 return false;
522 }
523
524 qi = &ahp->ah_txq[q];
525 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
526 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue\n");
527 return false;
528 }
529
530 qinfo->tqi_qflags = qi->tqi_qflags;
531 qinfo->tqi_ver = qi->tqi_ver;
532 qinfo->tqi_subtype = qi->tqi_subtype;
533 qinfo->tqi_qflags = qi->tqi_qflags;
534 qinfo->tqi_priority = qi->tqi_priority;
535 qinfo->tqi_aifs = qi->tqi_aifs;
536 qinfo->tqi_cwmin = qi->tqi_cwmin;
537 qinfo->tqi_cwmax = qi->tqi_cwmax;
538 qinfo->tqi_shretry = qi->tqi_shretry;
539 qinfo->tqi_lgretry = qi->tqi_lgretry;
540 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
541 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
542 qinfo->tqi_burstTime = qi->tqi_burstTime;
543 qinfo->tqi_readyTime = qi->tqi_readyTime;
544
545 return true;
546}
547
548int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
549 const struct ath9k_tx_queue_info *qinfo)
550{
551 struct ath_hal_5416 *ahp = AH5416(ah);
552 struct ath9k_tx_queue_info *qi;
553 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
554 int q;
555
556 switch (type) {
557 case ATH9K_TX_QUEUE_BEACON:
558 q = pCap->total_queues - 1;
559 break;
560 case ATH9K_TX_QUEUE_CAB:
561 q = pCap->total_queues - 2;
562 break;
563 case ATH9K_TX_QUEUE_PSPOLL:
564 q = 1;
565 break;
566 case ATH9K_TX_QUEUE_UAPSD:
567 q = pCap->total_queues - 3;
568 break;
569 case ATH9K_TX_QUEUE_DATA:
570 for (q = 0; q < pCap->total_queues; q++)
571 if (ahp->ah_txq[q].tqi_type ==
572 ATH9K_TX_QUEUE_INACTIVE)
573 break;
574 if (q == pCap->total_queues) {
575 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
576 "no available tx queue\n");
577 return -1;
578 }
579 break;
580 default:
581 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "bad tx queue type %u\n", type);
582 return -1;
583 }
584
585 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "queue %u\n", q);
586
587 qi = &ahp->ah_txq[q];
588 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
589 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
590 "tx queue %u already active\n", q);
591 return -1;
592 }
593 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
594 qi->tqi_type = type;
595 if (qinfo == NULL) {
596 qi->tqi_qflags =
597 TXQ_FLAG_TXOKINT_ENABLE
598 | TXQ_FLAG_TXERRINT_ENABLE
599 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
600 qi->tqi_aifs = INIT_AIFS;
601 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
602 qi->tqi_cwmax = INIT_CWMAX;
603 qi->tqi_shretry = INIT_SH_RETRY;
604 qi->tqi_lgretry = INIT_LG_RETRY;
605 qi->tqi_physCompBuf = 0;
606 } else {
607 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
608 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
609 }
610
611 return q;
612}
613
614bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q)
615{
616 struct ath_hal_5416 *ahp = AH5416(ah);
617 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
618 struct ath9k_tx_queue_info *qi;
619
620 if (q >= pCap->total_queues) {
621 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
622 return false;
623 }
624 qi = &ahp->ah_txq[q];
625 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
626 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue %u\n", q);
627 return false;
628 }
629
630 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "release queue %u\n", q);
631
632 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
633 ahp->ah_txOkInterruptMask &= ~(1 << q);
634 ahp->ah_txErrInterruptMask &= ~(1 << q);
635 ahp->ah_txDescInterruptMask &= ~(1 << q);
636 ahp->ah_txEolInterruptMask &= ~(1 << q);
637 ahp->ah_txUrnInterruptMask &= ~(1 << q);
638 ath9k_hw_set_txq_interrupts(ah, qi);
639
640 return true;
641}
642
643bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
644{
645 struct ath_hal_5416 *ahp = AH5416(ah);
646 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
647 struct ath9k_channel *chan = ah->ah_curchan;
648 struct ath9k_tx_queue_info *qi;
649 u32 cwMin, chanCwMin, value;
650
651 if (q >= pCap->total_queues) {
652 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "invalid queue num %u\n", q);
653 return false;
654 }
655
656 qi = &ahp->ah_txq[q];
657 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
658 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "inactive queue %u\n", q);
659 return true;
660 }
661
662 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "reset queue %u\n", q);
663
664 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
665 if (chan && IS_CHAN_B(chan))
666 chanCwMin = INIT_CWMIN_11B;
667 else
668 chanCwMin = INIT_CWMIN;
669
670 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
671 } else
672 cwMin = qi->tqi_cwmin;
673
674 REG_WRITE(ah, AR_DLCL_IFS(q),
675 SM(cwMin, AR_D_LCL_IFS_CWMIN) |
676 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
677 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
678
679 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
680 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
681 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
682 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
683
684 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
685 REG_WRITE(ah, AR_DMISC(q),
686 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
687
688 if (qi->tqi_cbrPeriod) {
689 REG_WRITE(ah, AR_QCBRCFG(q),
690 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
691 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
692 REG_WRITE(ah, AR_QMISC(q),
693 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
694 (qi->tqi_cbrOverflowLimit ?
695 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
696 }
697 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
698 REG_WRITE(ah, AR_QRDYTIMECFG(q),
699 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
700 AR_Q_RDYTIMECFG_EN);
701 }
702
703 REG_WRITE(ah, AR_DCHNTIME(q),
704 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
705 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
706
707 if (qi->tqi_burstTime
708 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
709 REG_WRITE(ah, AR_QMISC(q),
710 REG_READ(ah, AR_QMISC(q)) |
711 AR_Q_MISC_RDYTIME_EXP_POLICY);
712
713 }
714
715 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
716 REG_WRITE(ah, AR_DMISC(q),
717 REG_READ(ah, AR_DMISC(q)) |
718 AR_D_MISC_POST_FR_BKOFF_DIS);
719 }
720 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
721 REG_WRITE(ah, AR_DMISC(q),
722 REG_READ(ah, AR_DMISC(q)) |
723 AR_D_MISC_FRAG_BKOFF_EN);
724 }
725 switch (qi->tqi_type) {
726 case ATH9K_TX_QUEUE_BEACON:
727 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
728 | AR_Q_MISC_FSP_DBA_GATED
729 | AR_Q_MISC_BEACON_USE
730 | AR_Q_MISC_CBR_INCR_DIS1);
731
732 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
733 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
734 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
735 | AR_D_MISC_BEACON_USE
736 | AR_D_MISC_POST_FR_BKOFF_DIS);
737 break;
738 case ATH9K_TX_QUEUE_CAB:
739 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
740 | AR_Q_MISC_FSP_DBA_GATED
741 | AR_Q_MISC_CBR_INCR_DIS1
742 | AR_Q_MISC_CBR_INCR_DIS0);
743 value = (qi->tqi_readyTime -
744 (ah->ah_config.sw_beacon_response_time -
745 ah->ah_config.dma_beacon_response_time) -
746 ah->ah_config.additional_swba_backoff) * 1024;
747 REG_WRITE(ah, AR_QRDYTIMECFG(q),
748 value | AR_Q_RDYTIMECFG_EN);
749 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
750 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
751 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
752 break;
753 case ATH9K_TX_QUEUE_PSPOLL:
754 REG_WRITE(ah, AR_QMISC(q),
755 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
756 break;
757 case ATH9K_TX_QUEUE_UAPSD:
758 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
759 AR_D_MISC_POST_FR_BKOFF_DIS);
760 break;
761 default:
762 break;
763 }
764
765 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
766 REG_WRITE(ah, AR_DMISC(q),
767 REG_READ(ah, AR_DMISC(q)) |
768 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
769 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
770 AR_D_MISC_POST_FR_BKOFF_DIS);
771 }
772
773 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
774 ahp->ah_txOkInterruptMask |= 1 << q;
775 else
776 ahp->ah_txOkInterruptMask &= ~(1 << q);
777 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
778 ahp->ah_txErrInterruptMask |= 1 << q;
779 else
780 ahp->ah_txErrInterruptMask &= ~(1 << q);
781 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
782 ahp->ah_txDescInterruptMask |= 1 << q;
783 else
784 ahp->ah_txDescInterruptMask &= ~(1 << q);
785 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
786 ahp->ah_txEolInterruptMask |= 1 << q;
787 else
788 ahp->ah_txEolInterruptMask &= ~(1 << q);
789 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
790 ahp->ah_txUrnInterruptMask |= 1 << q;
791 else
792 ahp->ah_txUrnInterruptMask &= ~(1 << q);
793 ath9k_hw_set_txq_interrupts(ah, qi);
794
795 return true;
796}
797
798int ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
799 u32 pa, struct ath_desc *nds, u64 tsf)
800{
801 struct ar5416_desc ads;
802 struct ar5416_desc *adsp = AR5416DESC(ds);
803 u32 phyerr;
804
805 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
806 return -EINPROGRESS;
807
808 ads.u.rx = adsp->u.rx;
809
810 ds->ds_rxstat.rs_status = 0;
811 ds->ds_rxstat.rs_flags = 0;
812
813 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
814 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
815
816 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
817 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00);
818 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01);
819 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02);
820 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10);
821 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11);
822 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12);
823 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
824 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
825 else
826 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
827
828 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
829 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
830
831 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
832 ds->ds_rxstat.rs_moreaggr =
833 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
834 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
835 ds->ds_rxstat.rs_flags =
836 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
837 ds->ds_rxstat.rs_flags |=
838 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
839
840 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
841 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
842 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
843 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
844 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
845 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
846
847 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
848 if (ads.ds_rxstatus8 & AR_CRCErr)
849 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
850 else if (ads.ds_rxstatus8 & AR_PHYErr) {
851 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
852 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
853 ds->ds_rxstat.rs_phyerr = phyerr;
854 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
855 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
856 else if (ads.ds_rxstatus8 & AR_MichaelErr)
857 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
858 }
859
860 return 0;
861}
862
863bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
864 u32 size, u32 flags)
865{
866 struct ar5416_desc *ads = AR5416DESC(ds);
867 struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
868
869 ads->ds_ctl1 = size & AR_BufLen;
870 if (flags & ATH9K_RXDESC_INTREQ)
871 ads->ds_ctl1 |= AR_RxIntrReq;
872
873 ads->ds_rxstatus8 &= ~AR_RxDone;
874 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
875 memset(&(ads->u), 0, sizeof(ads->u));
876
877 return true;
878}
879
880bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set)
881{
882 u32 reg;
883
884 if (set) {
885 REG_SET_BIT(ah, AR_DIAG_SW,
886 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
887
888 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 0)) {
889 REG_CLR_BIT(ah, AR_DIAG_SW,
890 (AR_DIAG_RX_DIS |
891 AR_DIAG_RX_ABORT));
892
893 reg = REG_READ(ah, AR_OBS_BUS_1);
894 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
895 "rx failed to go idle in 10 ms RXSM=0x%x\n", reg);
896
897 return false;
898 }
899 } else {
900 REG_CLR_BIT(ah, AR_DIAG_SW,
901 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
902 }
903
904 return true;
905}
906
907void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp)
908{
909 REG_WRITE(ah, AR_RXDP, rxdp);
910}
911
912void ath9k_hw_rxena(struct ath_hal *ah)
913{
914 REG_WRITE(ah, AR_CR, AR_CR_RXE);
915}
916
917void ath9k_hw_startpcureceive(struct ath_hal *ah)
918{
919 ath9k_enable_mib_counters(ah);
920
921 ath9k_ani_reset(ah);
922
923 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
924}
925
926void ath9k_hw_stoppcurecv(struct ath_hal *ah)
927{
928 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
929
930 ath9k_hw_disable_mib_counters(ah);
931}
932
933bool ath9k_hw_stopdmarecv(struct ath_hal *ah)
934{
935 REG_WRITE(ah, AR_CR, AR_CR_RXD);
936
937 if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0)) {
938 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
939 "dma failed to stop in 10ms\n"
940 "AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n",
941 REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
942 return false;
943 } else {
944 return true;
945 }
946}
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index f05f584ab7bc..191eec50dc75 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -14,15 +14,13 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17/* mac80211 and PCI callbacks */
18
19#include <linux/nl80211.h> 17#include <linux/nl80211.h>
20#include "core.h" 18#include "core.h"
19#include "reg.h"
20#include "hw.h"
21 21
22#define ATH_PCI_VERSION "0.1" 22#define ATH_PCI_VERSION "0.1"
23 23
24#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13
25
26static char *dev_info = "ath9k"; 24static char *dev_info = "ath9k";
27 25
28MODULE_AUTHOR("Atheros Communications"); 26MODULE_AUTHOR("Atheros Communications");
@@ -36,9 +34,581 @@ static struct pci_device_id ath_pci_id_table[] __devinitdata = {
36 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ 34 { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
37 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ 35 { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
38 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ 36 { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
37 { PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
39 { 0 } 38 { 0 }
40}; 39};
41 40
41static void ath_detach(struct ath_softc *sc);
42
43/* return bus cachesize in 4B word units */
44
45static void bus_read_cachesize(struct ath_softc *sc, int *csz)
46{
47 u8 u8tmp;
48
49 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
50 *csz = (int)u8tmp;
51
52 /*
53 * This check was put in to avoid "unplesant" consequences if
54 * the bootrom has not fully initialized all PCI devices.
55 * Sometimes the cache line size register is not set
56 */
57
58 if (*csz == 0)
59 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
60}
61
62static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
63{
64 sc->cur_rate_table = sc->hw_rate_table[mode];
65 /*
66 * All protection frames are transmited at 2Mb/s for
67 * 11g, otherwise at 1Mb/s.
68 * XXX select protection rate index from rate table.
69 */
70 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
71}
72
73static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
74{
75 if (chan->chanmode == CHANNEL_A)
76 return ATH9K_MODE_11A;
77 else if (chan->chanmode == CHANNEL_G)
78 return ATH9K_MODE_11G;
79 else if (chan->chanmode == CHANNEL_B)
80 return ATH9K_MODE_11B;
81 else if (chan->chanmode == CHANNEL_A_HT20)
82 return ATH9K_MODE_11NA_HT20;
83 else if (chan->chanmode == CHANNEL_G_HT20)
84 return ATH9K_MODE_11NG_HT20;
85 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
86 return ATH9K_MODE_11NA_HT40PLUS;
87 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
88 return ATH9K_MODE_11NA_HT40MINUS;
89 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
90 return ATH9K_MODE_11NG_HT40PLUS;
91 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
92 return ATH9K_MODE_11NG_HT40MINUS;
93
94 WARN_ON(1); /* should not get here */
95
96 return ATH9K_MODE_11B;
97}
98
99static void ath_update_txpow(struct ath_softc *sc)
100{
101 struct ath_hal *ah = sc->sc_ah;
102 u32 txpow;
103
104 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
105 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
106 /* read back in case value is clamped */
107 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
108 sc->sc_curtxpow = txpow;
109 }
110}
111
112static u8 parse_mpdudensity(u8 mpdudensity)
113{
114 /*
115 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
116 * 0 for no restriction
117 * 1 for 1/4 us
118 * 2 for 1/2 us
119 * 3 for 1 us
120 * 4 for 2 us
121 * 5 for 4 us
122 * 6 for 8 us
123 * 7 for 16 us
124 */
125 switch (mpdudensity) {
126 case 0:
127 return 0;
128 case 1:
129 case 2:
130 case 3:
131 /* Our lower layer calculations limit our precision to
132 1 microsecond */
133 return 1;
134 case 4:
135 return 2;
136 case 5:
137 return 4;
138 case 6:
139 return 8;
140 case 7:
141 return 16;
142 default:
143 return 0;
144 }
145}
146
147static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
148{
149 struct ath_rate_table *rate_table = NULL;
150 struct ieee80211_supported_band *sband;
151 struct ieee80211_rate *rate;
152 int i, maxrates;
153
154 switch (band) {
155 case IEEE80211_BAND_2GHZ:
156 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
157 break;
158 case IEEE80211_BAND_5GHZ:
159 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
160 break;
161 default:
162 break;
163 }
164
165 if (rate_table == NULL)
166 return;
167
168 sband = &sc->sbands[band];
169 rate = sc->rates[band];
170
171 if (rate_table->rate_cnt > ATH_RATE_MAX)
172 maxrates = ATH_RATE_MAX;
173 else
174 maxrates = rate_table->rate_cnt;
175
176 for (i = 0; i < maxrates; i++) {
177 rate[i].bitrate = rate_table->info[i].ratekbps / 100;
178 rate[i].hw_value = rate_table->info[i].ratecode;
179 sband->n_bitrates++;
180 DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n",
181 rate[i].bitrate / 10, rate[i].hw_value);
182 }
183}
184
185static int ath_setup_channels(struct ath_softc *sc)
186{
187 struct ath_hal *ah = sc->sc_ah;
188 int nchan, i, a = 0, b = 0;
189 u8 regclassids[ATH_REGCLASSIDS_MAX];
190 u32 nregclass = 0;
191 struct ieee80211_supported_band *band_2ghz;
192 struct ieee80211_supported_band *band_5ghz;
193 struct ieee80211_channel *chan_2ghz;
194 struct ieee80211_channel *chan_5ghz;
195 struct ath9k_channel *c;
196
197 /* Fill in ah->ah_channels */
198 if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (u32 *)&nchan,
199 regclassids, ATH_REGCLASSIDS_MAX,
200 &nregclass, CTRY_DEFAULT, false, 1)) {
201 u32 rd = ah->ah_currentRD;
202 DPRINTF(sc, ATH_DBG_FATAL,
203 "Unable to collect channel list; "
204 "regdomain likely %u country code %u\n",
205 rd, CTRY_DEFAULT);
206 return -EINVAL;
207 }
208
209 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
210 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
211 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
212 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
213
214 for (i = 0; i < nchan; i++) {
215 c = &ah->ah_channels[i];
216 if (IS_CHAN_2GHZ(c)) {
217 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
218 chan_2ghz[a].center_freq = c->channel;
219 chan_2ghz[a].max_power = c->maxTxPower;
220
221 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
222 chan_2ghz[a].flags |= IEEE80211_CHAN_NO_IBSS;
223 if (c->channelFlags & CHANNEL_PASSIVE)
224 chan_2ghz[a].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
225
226 band_2ghz->n_channels = ++a;
227
228 DPRINTF(sc, ATH_DBG_CONFIG, "2MHz channel: %d, "
229 "channelFlags: 0x%x\n",
230 c->channel, c->channelFlags);
231 } else if (IS_CHAN_5GHZ(c)) {
232 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
233 chan_5ghz[b].center_freq = c->channel;
234 chan_5ghz[b].max_power = c->maxTxPower;
235
236 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
237 chan_5ghz[b].flags |= IEEE80211_CHAN_NO_IBSS;
238 if (c->channelFlags & CHANNEL_PASSIVE)
239 chan_5ghz[b].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
240
241 band_5ghz->n_channels = ++b;
242
243 DPRINTF(sc, ATH_DBG_CONFIG, "5MHz channel: %d, "
244 "channelFlags: 0x%x\n",
245 c->channel, c->channelFlags);
246 }
247 }
248
249 return 0;
250}
251
252/*
253 * Set/change channels. If the channel is really being changed, it's done
254 * by reseting the chip. To accomplish this we must first cleanup any pending
255 * DMA, then restart stuff.
256*/
257static int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
258{
259 struct ath_hal *ah = sc->sc_ah;
260 bool fastcc = true, stopped;
261
262 if (sc->sc_flags & SC_OP_INVALID)
263 return -EIO;
264
265 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
266 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
267 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
268 (sc->sc_flags & SC_OP_FULL_RESET)) {
269 int status;
270 /*
271 * This is only performed if the channel settings have
272 * actually changed.
273 *
274 * To switch channels clear any pending DMA operations;
275 * wait long enough for the RX fifo to drain, reset the
276 * hardware at the new frequency, and then re-enable
277 * the relevant bits of the h/w.
278 */
279 ath9k_hw_set_interrupts(ah, 0);
280 ath_draintxq(sc, false);
281 stopped = ath_stoprecv(sc);
282
283 /* XXX: do not flush receive queue here. We don't want
284 * to flush data frames already in queue because of
285 * changing channel. */
286
287 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
288 fastcc = false;
289
290 DPRINTF(sc, ATH_DBG_CONFIG,
291 "(%u MHz) -> (%u MHz), cflags:%x, chanwidth: %d\n",
292 sc->sc_ah->ah_curchan->channel,
293 hchan->channel, hchan->channelFlags, sc->tx_chan_width);
294
295 spin_lock_bh(&sc->sc_resetlock);
296 if (!ath9k_hw_reset(ah, hchan, sc->tx_chan_width,
297 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
298 sc->sc_ht_extprotspacing, fastcc, &status)) {
299 DPRINTF(sc, ATH_DBG_FATAL,
300 "Unable to reset channel %u (%uMhz) "
301 "flags 0x%x hal status %u\n",
302 ath9k_hw_mhz2ieee(ah, hchan->channel,
303 hchan->channelFlags),
304 hchan->channel, hchan->channelFlags, status);
305 spin_unlock_bh(&sc->sc_resetlock);
306 return -EIO;
307 }
308 spin_unlock_bh(&sc->sc_resetlock);
309
310 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
311 sc->sc_flags &= ~SC_OP_FULL_RESET;
312
313 if (ath_startrecv(sc) != 0) {
314 DPRINTF(sc, ATH_DBG_FATAL,
315 "Unable to restart recv logic\n");
316 return -EIO;
317 }
318
319 ath_setcurmode(sc, ath_chan2mode(hchan));
320 ath_update_txpow(sc);
321 ath9k_hw_set_interrupts(ah, sc->sc_imask);
322 }
323 return 0;
324}
325
326/*
327 * This routine performs the periodic noise floor calibration function
328 * that is used to adjust and optimize the chip performance. This
329 * takes environmental changes (location, temperature) into account.
330 * When the task is complete, it reschedules itself depending on the
331 * appropriate interval that was calculated.
332 */
333static void ath_ani_calibrate(unsigned long data)
334{
335 struct ath_softc *sc;
336 struct ath_hal *ah;
337 bool longcal = false;
338 bool shortcal = false;
339 bool aniflag = false;
340 unsigned int timestamp = jiffies_to_msecs(jiffies);
341 u32 cal_interval;
342
343 sc = (struct ath_softc *)data;
344 ah = sc->sc_ah;
345
346 /*
347 * don't calibrate when we're scanning.
348 * we are most likely not on our home channel.
349 */
350 if (sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)
351 return;
352
353 /* Long calibration runs independently of short calibration. */
354 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
355 longcal = true;
356 DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
357 sc->sc_ani.sc_longcal_timer = timestamp;
358 }
359
360 /* Short calibration applies only while sc_caldone is false */
361 if (!sc->sc_ani.sc_caldone) {
362 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
363 ATH_SHORT_CALINTERVAL) {
364 shortcal = true;
365 DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies);
366 sc->sc_ani.sc_shortcal_timer = timestamp;
367 sc->sc_ani.sc_resetcal_timer = timestamp;
368 }
369 } else {
370 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
371 ATH_RESTART_CALINTERVAL) {
372 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
373 &sc->sc_ani.sc_caldone);
374 if (sc->sc_ani.sc_caldone)
375 sc->sc_ani.sc_resetcal_timer = timestamp;
376 }
377 }
378
379 /* Verify whether we must check ANI */
380 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
381 ATH_ANI_POLLINTERVAL) {
382 aniflag = true;
383 sc->sc_ani.sc_checkani_timer = timestamp;
384 }
385
386 /* Skip all processing if there's nothing to do. */
387 if (longcal || shortcal || aniflag) {
388 /* Call ANI routine if necessary */
389 if (aniflag)
390 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
391 ah->ah_curchan);
392
393 /* Perform calibration if necessary */
394 if (longcal || shortcal) {
395 bool iscaldone = false;
396
397 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
398 sc->sc_rx_chainmask, longcal,
399 &iscaldone)) {
400 if (longcal)
401 sc->sc_ani.sc_noise_floor =
402 ath9k_hw_getchan_noise(ah,
403 ah->ah_curchan);
404
405 DPRINTF(sc, ATH_DBG_ANI,
406 "calibrate chan %u/%x nf: %d\n",
407 ah->ah_curchan->channel,
408 ah->ah_curchan->channelFlags,
409 sc->sc_ani.sc_noise_floor);
410 } else {
411 DPRINTF(sc, ATH_DBG_ANY,
412 "calibrate chan %u/%x failed\n",
413 ah->ah_curchan->channel,
414 ah->ah_curchan->channelFlags);
415 }
416 sc->sc_ani.sc_caldone = iscaldone;
417 }
418 }
419
420 /*
421 * Set timer interval based on previous results.
422 * The interval must be the shortest necessary to satisfy ANI,
423 * short calibration and long calibration.
424 */
425 cal_interval = ATH_LONG_CALINTERVAL;
426 if (sc->sc_ah->ah_config.enable_ani)
427 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
428 if (!sc->sc_ani.sc_caldone)
429 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
430
431 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
432}
433
434/*
435 * Update tx/rx chainmask. For legacy association,
436 * hard code chainmask to 1x1, for 11n association, use
437 * the chainmask configuration.
438 */
439static void ath_update_chainmask(struct ath_softc *sc, int is_ht)
440{
441 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
442 if (is_ht) {
443 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
444 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
445 } else {
446 sc->sc_tx_chainmask = 1;
447 sc->sc_rx_chainmask = 1;
448 }
449
450 DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n",
451 sc->sc_tx_chainmask, sc->sc_rx_chainmask);
452}
453
454static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
455{
456 struct ath_node *an;
457
458 an = (struct ath_node *)sta->drv_priv;
459
460 if (sc->sc_flags & SC_OP_TXAGGR)
461 ath_tx_node_init(sc, an);
462
463 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
464 sta->ht_cap.ampdu_factor);
465 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
466}
467
468static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
469{
470 struct ath_node *an = (struct ath_node *)sta->drv_priv;
471
472 if (sc->sc_flags & SC_OP_TXAGGR)
473 ath_tx_node_cleanup(sc, an);
474}
475
476static void ath9k_tasklet(unsigned long data)
477{
478 struct ath_softc *sc = (struct ath_softc *)data;
479 u32 status = sc->sc_intrstatus;
480
481 if (status & ATH9K_INT_FATAL) {
482 /* need a chip reset */
483 ath_reset(sc, false);
484 return;
485 } else {
486
487 if (status &
488 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
489 spin_lock_bh(&sc->rx.rxflushlock);
490 ath_rx_tasklet(sc, 0);
491 spin_unlock_bh(&sc->rx.rxflushlock);
492 }
493 /* XXX: optimize this */
494 if (status & ATH9K_INT_TX)
495 ath_tx_tasklet(sc);
496 }
497
498 /* re-enable hardware interrupt */
499 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
500}
501
502static irqreturn_t ath_isr(int irq, void *dev)
503{
504 struct ath_softc *sc = dev;
505 struct ath_hal *ah = sc->sc_ah;
506 enum ath9k_int status;
507 bool sched = false;
508
509 do {
510 if (sc->sc_flags & SC_OP_INVALID) {
511 /*
512 * The hardware is not ready/present, don't
513 * touch anything. Note this can happen early
514 * on if the IRQ is shared.
515 */
516 return IRQ_NONE;
517 }
518 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
519 return IRQ_NONE;
520 }
521
522 /*
523 * Figure out the reason(s) for the interrupt. Note
524 * that the hal returns a pseudo-ISR that may include
525 * bits we haven't explicitly enabled so we mask the
526 * value to insure we only process bits we requested.
527 */
528 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
529
530 status &= sc->sc_imask; /* discard unasked-for bits */
531
532 /*
533 * If there are no status bits set, then this interrupt was not
534 * for me (should have been caught above).
535 */
536 if (!status)
537 return IRQ_NONE;
538
539 sc->sc_intrstatus = status;
540
541 if (status & ATH9K_INT_FATAL) {
542 /* need a chip reset */
543 sched = true;
544 } else if (status & ATH9K_INT_RXORN) {
545 /* need a chip reset */
546 sched = true;
547 } else {
548 if (status & ATH9K_INT_SWBA) {
549 /* schedule a tasklet for beacon handling */
550 tasklet_schedule(&sc->bcon_tasklet);
551 }
552 if (status & ATH9K_INT_RXEOL) {
553 /*
554 * NB: the hardware should re-read the link when
555 * RXE bit is written, but it doesn't work
556 * at least on older hardware revs.
557 */
558 sched = true;
559 }
560
561 if (status & ATH9K_INT_TXURN)
562 /* bump tx trigger level */
563 ath9k_hw_updatetxtriglevel(ah, true);
564 /* XXX: optimize this */
565 if (status & ATH9K_INT_RX)
566 sched = true;
567 if (status & ATH9K_INT_TX)
568 sched = true;
569 if (status & ATH9K_INT_BMISS)
570 sched = true;
571 /* carrier sense timeout */
572 if (status & ATH9K_INT_CST)
573 sched = true;
574 if (status & ATH9K_INT_MIB) {
575 /*
576 * Disable interrupts until we service the MIB
577 * interrupt; otherwise it will continue to
578 * fire.
579 */
580 ath9k_hw_set_interrupts(ah, 0);
581 /*
582 * Let the hal handle the event. We assume
583 * it will clear whatever condition caused
584 * the interrupt.
585 */
586 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
587 ath9k_hw_set_interrupts(ah, sc->sc_imask);
588 }
589 if (status & ATH9K_INT_TIM_TIMER) {
590 if (!(ah->ah_caps.hw_caps &
591 ATH9K_HW_CAP_AUTOSLEEP)) {
592 /* Clear RxAbort bit so that we can
593 * receive frames */
594 ath9k_hw_setrxabort(ah, 0);
595 sched = true;
596 }
597 }
598 }
599 } while (0);
600
601 ath_debug_stat_interrupt(sc, status);
602
603 if (sched) {
604 /* turn off every interrupt except SWBA */
605 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
606 tasklet_schedule(&sc->intr_tq);
607 }
608
609 return IRQ_HANDLED;
610}
611
42static int ath_get_channel(struct ath_softc *sc, 612static int ath_get_channel(struct ath_softc *sc,
43 struct ieee80211_channel *chan) 613 struct ieee80211_channel *chan)
44{ 614{
@@ -53,34 +623,39 @@ static int ath_get_channel(struct ath_softc *sc,
53} 623}
54 624
55static u32 ath_get_extchanmode(struct ath_softc *sc, 625static u32 ath_get_extchanmode(struct ath_softc *sc,
56 struct ieee80211_channel *chan) 626 struct ieee80211_channel *chan,
627 enum nl80211_channel_type channel_type)
57{ 628{
58 u32 chanmode = 0; 629 u32 chanmode = 0;
59 u8 ext_chan_offset = sc->sc_ht_info.ext_chan_offset;
60 enum ath9k_ht_macmode tx_chan_width = sc->sc_ht_info.tx_chan_width;
61 630
62 switch (chan->band) { 631 switch (chan->band) {
63 case IEEE80211_BAND_2GHZ: 632 case IEEE80211_BAND_2GHZ:
64 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) && 633 switch(channel_type) {
65 (tx_chan_width == ATH9K_HT_MACMODE_20)) 634 case NL80211_CHAN_NO_HT:
635 case NL80211_CHAN_HT20:
66 chanmode = CHANNEL_G_HT20; 636 chanmode = CHANNEL_G_HT20;
67 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) && 637 break;
68 (tx_chan_width == ATH9K_HT_MACMODE_2040)) 638 case NL80211_CHAN_HT40PLUS:
69 chanmode = CHANNEL_G_HT40PLUS; 639 chanmode = CHANNEL_G_HT40PLUS;
70 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) && 640 break;
71 (tx_chan_width == ATH9K_HT_MACMODE_2040)) 641 case NL80211_CHAN_HT40MINUS:
72 chanmode = CHANNEL_G_HT40MINUS; 642 chanmode = CHANNEL_G_HT40MINUS;
643 break;
644 }
73 break; 645 break;
74 case IEEE80211_BAND_5GHZ: 646 case IEEE80211_BAND_5GHZ:
75 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) && 647 switch(channel_type) {
76 (tx_chan_width == ATH9K_HT_MACMODE_20)) 648 case NL80211_CHAN_NO_HT:
649 case NL80211_CHAN_HT20:
77 chanmode = CHANNEL_A_HT20; 650 chanmode = CHANNEL_A_HT20;
78 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) && 651 break;
79 (tx_chan_width == ATH9K_HT_MACMODE_2040)) 652 case NL80211_CHAN_HT40PLUS:
80 chanmode = CHANNEL_A_HT40PLUS; 653 chanmode = CHANNEL_A_HT40PLUS;
81 if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) && 654 break;
82 (tx_chan_width == ATH9K_HT_MACMODE_2040)) 655 case NL80211_CHAN_HT40MINUS:
83 chanmode = CHANNEL_A_HT40MINUS; 656 chanmode = CHANNEL_A_HT40MINUS;
657 break;
658 }
84 break; 659 break;
85 default: 660 default:
86 break; 661 break;
@@ -89,22 +664,31 @@ static u32 ath_get_extchanmode(struct ath_softc *sc,
89 return chanmode; 664 return chanmode;
90} 665}
91 666
667static int ath_keyset(struct ath_softc *sc, u16 keyix,
668 struct ath9k_keyval *hk, const u8 mac[ETH_ALEN])
669{
670 bool status;
671
672 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
673 keyix, hk, mac, false);
674
675 return status != false;
676}
92 677
93static int ath_setkey_tkip(struct ath_softc *sc, 678static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
94 struct ieee80211_key_conf *key,
95 struct ath9k_keyval *hk, 679 struct ath9k_keyval *hk,
96 const u8 *addr) 680 const u8 *addr)
97{ 681{
98 u8 *key_rxmic = NULL; 682 const u8 *key_rxmic;
99 u8 *key_txmic = NULL; 683 const u8 *key_txmic;
100 684
101 key_txmic = key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY; 685 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
102 key_rxmic = key->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY; 686 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
103 687
104 if (addr == NULL) { 688 if (addr == NULL) {
105 /* Group key installation */ 689 /* Group key installation */
106 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); 690 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
107 return ath_keyset(sc, key->keyidx, hk, addr); 691 return ath_keyset(sc, keyix, hk, addr);
108 } 692 }
109 if (!sc->sc_splitmic) { 693 if (!sc->sc_splitmic) {
110 /* 694 /*
@@ -113,34 +697,113 @@ static int ath_setkey_tkip(struct ath_softc *sc,
113 */ 697 */
114 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); 698 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
115 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic)); 699 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
116 return ath_keyset(sc, key->keyidx, hk, addr); 700 return ath_keyset(sc, keyix, hk, addr);
117 } 701 }
118 /* 702 /*
119 * TX key goes at first index, RX key at +32. 703 * TX key goes at first index, RX key at +32.
120 * The hal handles the MIC keys at index+64. 704 * The hal handles the MIC keys at index+64.
121 */ 705 */
122 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); 706 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
123 if (!ath_keyset(sc, key->keyidx, hk, NULL)) { 707 if (!ath_keyset(sc, keyix, hk, NULL)) {
124 /* Txmic entry failed. No need to proceed further */ 708 /* Txmic entry failed. No need to proceed further */
125 DPRINTF(sc, ATH_DBG_KEYCACHE, 709 DPRINTF(sc, ATH_DBG_KEYCACHE,
126 "%s Setting TX MIC Key Failed\n", __func__); 710 "Setting TX MIC Key Failed\n");
127 return 0; 711 return 0;
128 } 712 }
129 713
130 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); 714 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
131 /* XXX delete tx key on failure? */ 715 /* XXX delete tx key on failure? */
132 return ath_keyset(sc, key->keyidx+32, hk, addr); 716 return ath_keyset(sc, keyix + 32, hk, addr);
717}
718
719static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc)
720{
721 int i;
722
723 for (i = IEEE80211_WEP_NKID; i < sc->sc_keymax / 2; i++) {
724 if (test_bit(i, sc->sc_keymap) ||
725 test_bit(i + 64, sc->sc_keymap))
726 continue; /* At least one part of TKIP key allocated */
727 if (sc->sc_splitmic &&
728 (test_bit(i + 32, sc->sc_keymap) ||
729 test_bit(i + 64 + 32, sc->sc_keymap)))
730 continue; /* At least one part of TKIP key allocated */
731
732 /* Found a free slot for a TKIP key */
733 return i;
734 }
735 return -1;
736}
737
738static int ath_reserve_key_cache_slot(struct ath_softc *sc)
739{
740 int i;
741
742 /* First, try to find slots that would not be available for TKIP. */
743 if (sc->sc_splitmic) {
744 for (i = IEEE80211_WEP_NKID; i < sc->sc_keymax / 4; i++) {
745 if (!test_bit(i, sc->sc_keymap) &&
746 (test_bit(i + 32, sc->sc_keymap) ||
747 test_bit(i + 64, sc->sc_keymap) ||
748 test_bit(i + 64 + 32, sc->sc_keymap)))
749 return i;
750 if (!test_bit(i + 32, sc->sc_keymap) &&
751 (test_bit(i, sc->sc_keymap) ||
752 test_bit(i + 64, sc->sc_keymap) ||
753 test_bit(i + 64 + 32, sc->sc_keymap)))
754 return i + 32;
755 if (!test_bit(i + 64, sc->sc_keymap) &&
756 (test_bit(i , sc->sc_keymap) ||
757 test_bit(i + 32, sc->sc_keymap) ||
758 test_bit(i + 64 + 32, sc->sc_keymap)))
759 return i + 64;
760 if (!test_bit(i + 64 + 32, sc->sc_keymap) &&
761 (test_bit(i, sc->sc_keymap) ||
762 test_bit(i + 32, sc->sc_keymap) ||
763 test_bit(i + 64, sc->sc_keymap)))
764 return i + 64 + 32;
765 }
766 } else {
767 for (i = IEEE80211_WEP_NKID; i < sc->sc_keymax / 2; i++) {
768 if (!test_bit(i, sc->sc_keymap) &&
769 test_bit(i + 64, sc->sc_keymap))
770 return i;
771 if (test_bit(i, sc->sc_keymap) &&
772 !test_bit(i + 64, sc->sc_keymap))
773 return i + 64;
774 }
775 }
776
777 /* No partially used TKIP slots, pick any available slot */
778 for (i = IEEE80211_WEP_NKID; i < sc->sc_keymax; i++) {
779 /* Do not allow slots that could be needed for TKIP group keys
780 * to be used. This limitation could be removed if we know that
781 * TKIP will not be used. */
782 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
783 continue;
784 if (sc->sc_splitmic) {
785 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
786 continue;
787 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
788 continue;
789 }
790
791 if (!test_bit(i, sc->sc_keymap))
792 return i; /* Found a free slot for a key */
793 }
794
795 /* No free slot found */
796 return -1;
133} 797}
134 798
135static int ath_key_config(struct ath_softc *sc, 799static int ath_key_config(struct ath_softc *sc,
136 const u8 *addr, 800 const u8 *addr,
137 struct ieee80211_key_conf *key) 801 struct ieee80211_key_conf *key)
138{ 802{
139 struct ieee80211_vif *vif;
140 struct ath9k_keyval hk; 803 struct ath9k_keyval hk;
141 const u8 *mac = NULL; 804 const u8 *mac = NULL;
142 int ret = 0; 805 int ret = 0;
143 enum nl80211_iftype opmode; 806 int idx;
144 807
145 memset(&hk, 0, sizeof(hk)); 808 memset(&hk, 0, sizeof(hk));
146 809
@@ -158,226 +821,103 @@ static int ath_key_config(struct ath_softc *sc,
158 return -EINVAL; 821 return -EINVAL;
159 } 822 }
160 823
161 hk.kv_len = key->keylen; 824 hk.kv_len = key->keylen;
162 memcpy(hk.kv_val, key->key, key->keylen); 825 memcpy(hk.kv_val, key->key, key->keylen);
163 826
164 if (!sc->sc_vaps[0]) 827 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
165 return -EIO; 828 /* For now, use the default keys for broadcast keys. This may
829 * need to change with virtual interfaces. */
830 idx = key->keyidx;
831 } else if (key->keyidx) {
832 struct ieee80211_vif *vif;
166 833
167 vif = sc->sc_vaps[0]->av_if_data; 834 mac = addr;
168 opmode = vif->type; 835 vif = sc->sc_vaps[0];
169 836 if (vif->type != NL80211_IFTYPE_AP) {
170 /* 837 /* Only keyidx 0 should be used with unicast key, but
171 * Strategy: 838 * allow this for client mode for now. */
172 * For _M_STA mc tx, we will not setup a key at all since we never 839 idx = key->keyidx;
173 * tx mc. 840 } else
174 * _M_STA mc rx, we will use the keyID. 841 return -EIO;
175 * for _M_IBSS mc tx, we will use the keyID, and no macaddr.
176 * for _M_IBSS mc rx, we will alloc a slot and plumb the mac of the
177 * peer node. BUT we will plumb a cleartext key so that we can do
178 * perSta default key table lookup in software.
179 */
180 if (is_broadcast_ether_addr(addr)) {
181 switch (opmode) {
182 case NL80211_IFTYPE_STATION:
183 /* default key: could be group WPA key
184 * or could be static WEP key */
185 mac = NULL;
186 break;
187 case NL80211_IFTYPE_ADHOC:
188 break;
189 case NL80211_IFTYPE_AP:
190 break;
191 default:
192 ASSERT(0);
193 break;
194 }
195 } else { 842 } else {
196 mac = addr; 843 mac = addr;
844 if (key->alg == ALG_TKIP)
845 idx = ath_reserve_key_cache_slot_tkip(sc);
846 else
847 idx = ath_reserve_key_cache_slot(sc);
848 if (idx < 0)
849 return -EIO; /* no free key cache entries */
197 } 850 }
198 851
199 if (key->alg == ALG_TKIP) 852 if (key->alg == ALG_TKIP)
200 ret = ath_setkey_tkip(sc, key, &hk, mac); 853 ret = ath_setkey_tkip(sc, idx, key->key, &hk, mac);
201 else 854 else
202 ret = ath_keyset(sc, key->keyidx, &hk, mac); 855 ret = ath_keyset(sc, idx, &hk, mac);
203 856
204 if (!ret) 857 if (!ret)
205 return -EIO; 858 return -EIO;
206 859
207 return 0; 860 set_bit(idx, sc->sc_keymap);
861 if (key->alg == ALG_TKIP) {
862 set_bit(idx + 64, sc->sc_keymap);
863 if (sc->sc_splitmic) {
864 set_bit(idx + 32, sc->sc_keymap);
865 set_bit(idx + 64 + 32, sc->sc_keymap);
866 }
867 }
868
869 return idx;
208} 870}
209 871
210static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key) 872static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
211{ 873{
212 int freeslot; 874 ath9k_hw_keyreset(sc->sc_ah, key->hw_key_idx);
875 if (key->hw_key_idx < IEEE80211_WEP_NKID)
876 return;
213 877
214 freeslot = (key->keyidx >= 4) ? 1 : 0; 878 clear_bit(key->hw_key_idx, sc->sc_keymap);
215 ath_key_reset(sc, key->keyidx, freeslot); 879 if (key->alg != ALG_TKIP)
880 return;
881
882 clear_bit(key->hw_key_idx + 64, sc->sc_keymap);
883 if (sc->sc_splitmic) {
884 clear_bit(key->hw_key_idx + 32, sc->sc_keymap);
885 clear_bit(key->hw_key_idx + 64 + 32, sc->sc_keymap);
886 }
216} 887}
217 888
218static void setup_ht_cap(struct ieee80211_ht_info *ht_info) 889static void setup_ht_cap(struct ieee80211_sta_ht_cap *ht_info)
219{ 890{
220#define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */ 891#define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
221#define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */ 892#define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
222 893
223 ht_info->ht_supported = 1; 894 ht_info->ht_supported = true;
224 ht_info->cap = (u16)IEEE80211_HT_CAP_SUP_WIDTH 895 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
225 |(u16)IEEE80211_HT_CAP_SM_PS 896 IEEE80211_HT_CAP_SM_PS |
226 |(u16)IEEE80211_HT_CAP_SGI_40 897 IEEE80211_HT_CAP_SGI_40 |
227 |(u16)IEEE80211_HT_CAP_DSSSCCK40; 898 IEEE80211_HT_CAP_DSSSCCK40;
228 899
229 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536; 900 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
230 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8; 901 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
231 /* setup supported mcs set */ 902 /* set up supported mcs set */
232 memset(ht_info->supp_mcs_set, 0, 16); 903 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
233 ht_info->supp_mcs_set[0] = 0xff; 904 ht_info->mcs.rx_mask[0] = 0xff;
234 ht_info->supp_mcs_set[1] = 0xff; 905 ht_info->mcs.rx_mask[1] = 0xff;
235 ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED; 906 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
236}
237
238static int ath_rate2idx(struct ath_softc *sc, int rate)
239{
240 int i = 0, cur_band, n_rates;
241 struct ieee80211_hw *hw = sc->hw;
242
243 cur_band = hw->conf.channel->band;
244 n_rates = sc->sbands[cur_band].n_bitrates;
245
246 for (i = 0; i < n_rates; i++) {
247 if (sc->sbands[cur_band].bitrates[i].bitrate == rate)
248 break;
249 }
250
251 /*
252 * NB:mac80211 validates rx rate index against the supported legacy rate
253 * index only (should be done against ht rates also), return the highest
254 * legacy rate index for rx rate which does not match any one of the
255 * supported basic and extended rates to make mac80211 happy.
256 * The following hack will be cleaned up once the issue with
257 * the rx rate index validation in mac80211 is fixed.
258 */
259 if (i == n_rates)
260 return n_rates - 1;
261 return i;
262}
263
264static void ath9k_rx_prepare(struct ath_softc *sc,
265 struct sk_buff *skb,
266 struct ath_recv_status *status,
267 struct ieee80211_rx_status *rx_status)
268{
269 struct ieee80211_hw *hw = sc->hw;
270 struct ieee80211_channel *curchan = hw->conf.channel;
271
272 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
273
274 rx_status->mactime = status->tsf;
275 rx_status->band = curchan->band;
276 rx_status->freq = curchan->center_freq;
277 rx_status->noise = sc->sc_ani.sc_noise_floor;
278 rx_status->signal = rx_status->noise + status->rssi;
279 rx_status->rate_idx = ath_rate2idx(sc, (status->rateKbps / 100));
280 rx_status->antenna = status->antenna;
281
282 /* XXX Fix me, 64 cannot be the max rssi value, rigure it out */
283 rx_status->qual = status->rssi * 100 / 64;
284
285 if (status->flags & ATH_RX_MIC_ERROR)
286 rx_status->flag |= RX_FLAG_MMIC_ERROR;
287 if (status->flags & ATH_RX_FCS_ERROR)
288 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
289
290 rx_status->flag |= RX_FLAG_TSFT;
291}
292
293static u8 parse_mpdudensity(u8 mpdudensity)
294{
295 /*
296 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
297 * 0 for no restriction
298 * 1 for 1/4 us
299 * 2 for 1/2 us
300 * 3 for 1 us
301 * 4 for 2 us
302 * 5 for 4 us
303 * 6 for 8 us
304 * 7 for 16 us
305 */
306 switch (mpdudensity) {
307 case 0:
308 return 0;
309 case 1:
310 case 2:
311 case 3:
312 /* Our lower layer calculations limit our precision to
313 1 microsecond */
314 return 1;
315 case 4:
316 return 2;
317 case 5:
318 return 4;
319 case 6:
320 return 8;
321 case 7:
322 return 16;
323 default:
324 return 0;
325 }
326}
327
328static void ath9k_ht_conf(struct ath_softc *sc,
329 struct ieee80211_bss_conf *bss_conf)
330{
331#define IEEE80211_HT_CAP_40MHZ_INTOLERANT BIT(14)
332 struct ath_ht_info *ht_info = &sc->sc_ht_info;
333
334 if (bss_conf->assoc_ht) {
335 ht_info->ext_chan_offset =
336 bss_conf->ht_bss_conf->bss_cap &
337 IEEE80211_HT_IE_CHA_SEC_OFFSET;
338
339 if (!(bss_conf->ht_conf->cap &
340 IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
341 (bss_conf->ht_bss_conf->bss_cap &
342 IEEE80211_HT_IE_CHA_WIDTH))
343 ht_info->tx_chan_width = ATH9K_HT_MACMODE_2040;
344 else
345 ht_info->tx_chan_width = ATH9K_HT_MACMODE_20;
346
347 ath9k_hw_set11nmac2040(sc->sc_ah, ht_info->tx_chan_width);
348 ht_info->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
349 bss_conf->ht_conf->ampdu_factor);
350 ht_info->mpdudensity =
351 parse_mpdudensity(bss_conf->ht_conf->ampdu_density);
352
353 }
354
355#undef IEEE80211_HT_CAP_40MHZ_INTOLERANT
356} 907}
357 908
358static void ath9k_bss_assoc_info(struct ath_softc *sc, 909static void ath9k_bss_assoc_info(struct ath_softc *sc,
910 struct ieee80211_vif *vif,
359 struct ieee80211_bss_conf *bss_conf) 911 struct ieee80211_bss_conf *bss_conf)
360{ 912{
361 struct ieee80211_hw *hw = sc->hw; 913 struct ath_vap *avp = (void *)vif->drv_priv;
362 struct ieee80211_channel *curchan = hw->conf.channel;
363 struct ath_vap *avp;
364 int pos;
365 DECLARE_MAC_BUF(mac);
366 914
367 if (bss_conf->assoc) { 915 if (bss_conf->assoc) {
368 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Bss Info ASSOC %d\n", 916 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
369 __func__, 917 bss_conf->aid, sc->sc_curbssid);
370 bss_conf->aid);
371
372 avp = sc->sc_vaps[0];
373 if (avp == NULL) {
374 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
375 __func__);
376 return;
377 }
378 918
379 /* New association, store aid */ 919 /* New association, store aid */
380 if (avp->av_opmode == ATH9K_M_STA) { 920 if (avp->av_opmode == NL80211_IFTYPE_STATION) {
381 sc->sc_curaid = bss_conf->aid; 921 sc->sc_curaid = bss_conf->aid;
382 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid, 922 ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid,
383 sc->sc_curaid); 923 sc->sc_curaid);
@@ -393,175 +933,16 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
393 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; 933 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
394 sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER; 934 sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
395 935
396 /* Update chainmask */
397 ath_update_chainmask(sc, bss_conf->assoc_ht);
398
399 DPRINTF(sc, ATH_DBG_CONFIG,
400 "%s: bssid %s aid 0x%x\n",
401 __func__,
402 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
403
404 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n",
405 __func__,
406 curchan->center_freq);
407
408 pos = ath_get_channel(sc, curchan);
409 if (pos == -1) {
410 DPRINTF(sc, ATH_DBG_FATAL,
411 "%s: Invalid channel\n", __func__);
412 return;
413 }
414
415 if (hw->conf.ht_conf.ht_supported)
416 sc->sc_ah->ah_channels[pos].chanmode =
417 ath_get_extchanmode(sc, curchan);
418 else
419 sc->sc_ah->ah_channels[pos].chanmode =
420 (curchan->band == IEEE80211_BAND_2GHZ) ?
421 CHANNEL_G : CHANNEL_A;
422
423 /* set h/w channel */
424 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0)
425 DPRINTF(sc, ATH_DBG_FATAL,
426 "%s: Unable to set channel\n",
427 __func__);
428
429 ath_rate_newstate(sc, avp);
430 /* Update ratectrl about the new state */
431 ath_rc_node_update(hw, avp->rc_node);
432
433 /* Start ANI */ 936 /* Start ANI */
434 mod_timer(&sc->sc_ani.timer, 937 mod_timer(&sc->sc_ani.timer,
435 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); 938 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
436 939
437 } else { 940 } else {
438 DPRINTF(sc, ATH_DBG_CONFIG, 941 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISSOC\n");
439 "%s: Bss Info DISSOC\n", __func__);
440 sc->sc_curaid = 0; 942 sc->sc_curaid = 0;
441 } 943 }
442} 944}
443 945
444void ath_get_beaconconfig(struct ath_softc *sc,
445 int if_id,
446 struct ath_beacon_config *conf)
447{
448 struct ieee80211_hw *hw = sc->hw;
449
450 /* fill in beacon config data */
451
452 conf->beacon_interval = hw->conf.beacon_int;
453 conf->listen_interval = 100;
454 conf->dtim_count = 1;
455 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
456}
457
458void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
459 struct ath_xmit_status *tx_status, struct ath_node *an)
460{
461 struct ieee80211_hw *hw = sc->hw;
462 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
463
464 DPRINTF(sc, ATH_DBG_XMIT,
465 "%s: TX complete: skb: %p\n", __func__, skb);
466
467 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
468 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
469 /* free driver's private data area of tx_info */
470 if (tx_info->driver_data[0] != NULL)
471 kfree(tx_info->driver_data[0]);
472 tx_info->driver_data[0] = NULL;
473 }
474
475 if (tx_status->flags & ATH_TX_BAR) {
476 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
477 tx_status->flags &= ~ATH_TX_BAR;
478 }
479
480 if (tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY)) {
481 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
482 /* Frame was not ACKed, but an ACK was expected */
483 tx_info->status.excessive_retries = 1;
484 }
485 } else {
486 /* Frame was ACKed */
487 tx_info->flags |= IEEE80211_TX_STAT_ACK;
488 }
489
490 tx_info->status.retry_count = tx_status->retries;
491
492 ieee80211_tx_status(hw, skb);
493 if (an)
494 ath_node_put(sc, an, ATH9K_BH_STATUS_CHANGE);
495}
496
497int _ath_rx_indicate(struct ath_softc *sc,
498 struct sk_buff *skb,
499 struct ath_recv_status *status,
500 u16 keyix)
501{
502 struct ieee80211_hw *hw = sc->hw;
503 struct ath_node *an = NULL;
504 struct ieee80211_rx_status rx_status;
505 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
506 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
507 int padsize;
508 enum ATH_RX_TYPE st;
509
510 /* see if any padding is done by the hw and remove it */
511 if (hdrlen & 3) {
512 padsize = hdrlen % 4;
513 memmove(skb->data + padsize, skb->data, hdrlen);
514 skb_pull(skb, padsize);
515 }
516
517 /* Prepare rx status */
518 ath9k_rx_prepare(sc, skb, status, &rx_status);
519
520 if (!(keyix == ATH9K_RXKEYIX_INVALID) &&
521 !(status->flags & ATH_RX_DECRYPT_ERROR)) {
522 rx_status.flag |= RX_FLAG_DECRYPTED;
523 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
524 && !(status->flags & ATH_RX_DECRYPT_ERROR)
525 && skb->len >= hdrlen + 4) {
526 keyix = skb->data[hdrlen + 3] >> 6;
527
528 if (test_bit(keyix, sc->sc_keymap))
529 rx_status.flag |= RX_FLAG_DECRYPTED;
530 }
531
532 spin_lock_bh(&sc->node_lock);
533 an = ath_node_find(sc, hdr->addr2);
534 spin_unlock_bh(&sc->node_lock);
535
536 if (an) {
537 ath_rx_input(sc, an,
538 hw->conf.ht_conf.ht_supported,
539 skb, status, &st);
540 }
541 if (!an || (st != ATH_RX_CONSUMED))
542 __ieee80211_rx(hw, skb, &rx_status);
543
544 return 0;
545}
546
547int ath_rx_subframe(struct ath_node *an,
548 struct sk_buff *skb,
549 struct ath_recv_status *status)
550{
551 struct ath_softc *sc = an->an_sc;
552 struct ieee80211_hw *hw = sc->hw;
553 struct ieee80211_rx_status rx_status;
554
555 /* Prepare rx status */
556 ath9k_rx_prepare(sc, skb, status, &rx_status);
557 if (!(status->flags & ATH_RX_DECRYPT_ERROR))
558 rx_status.flag |= RX_FLAG_DECRYPTED;
559
560 __ieee80211_rx(hw, skb, &rx_status);
561
562 return 0;
563}
564
565/********************************/ 946/********************************/
566/* LED functions */ 947/* LED functions */
567/********************************/ 948/********************************/
@@ -677,7 +1058,8 @@ fail:
677 ath_deinit_leds(sc); 1058 ath_deinit_leds(sc);
678} 1059}
679 1060
680#ifdef CONFIG_RFKILL 1061#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1062
681/*******************/ 1063/*******************/
682/* Rfkill */ 1064/* Rfkill */
683/*******************/ 1065/*******************/
@@ -689,14 +1071,14 @@ static void ath_radio_enable(struct ath_softc *sc)
689 1071
690 spin_lock_bh(&sc->sc_resetlock); 1072 spin_lock_bh(&sc->sc_resetlock);
691 if (!ath9k_hw_reset(ah, ah->ah_curchan, 1073 if (!ath9k_hw_reset(ah, ah->ah_curchan,
692 sc->sc_ht_info.tx_chan_width, 1074 sc->tx_chan_width,
693 sc->sc_tx_chainmask, 1075 sc->sc_tx_chainmask,
694 sc->sc_rx_chainmask, 1076 sc->sc_rx_chainmask,
695 sc->sc_ht_extprotspacing, 1077 sc->sc_ht_extprotspacing,
696 false, &status)) { 1078 false, &status)) {
697 DPRINTF(sc, ATH_DBG_FATAL, 1079 DPRINTF(sc, ATH_DBG_FATAL,
698 "%s: unable to reset channel %u (%uMhz) " 1080 "Unable to reset channel %u (%uMhz) "
699 "flags 0x%x hal status %u\n", __func__, 1081 "flags 0x%x hal status %u\n",
700 ath9k_hw_mhz2ieee(ah, 1082 ath9k_hw_mhz2ieee(ah,
701 ah->ah_curchan->channel, 1083 ah->ah_curchan->channel,
702 ah->ah_curchan->channelFlags), 1084 ah->ah_curchan->channelFlags),
@@ -708,7 +1090,7 @@ static void ath_radio_enable(struct ath_softc *sc)
708 ath_update_txpow(sc); 1090 ath_update_txpow(sc);
709 if (ath_startrecv(sc) != 0) { 1091 if (ath_startrecv(sc) != 0) {
710 DPRINTF(sc, ATH_DBG_FATAL, 1092 DPRINTF(sc, ATH_DBG_FATAL,
711 "%s: unable to restart recv logic\n", __func__); 1093 "Unable to restart recv logic\n");
712 return; 1094 return;
713 } 1095 }
714 1096
@@ -747,14 +1129,14 @@ static void ath_radio_disable(struct ath_softc *sc)
747 1129
748 spin_lock_bh(&sc->sc_resetlock); 1130 spin_lock_bh(&sc->sc_resetlock);
749 if (!ath9k_hw_reset(ah, ah->ah_curchan, 1131 if (!ath9k_hw_reset(ah, ah->ah_curchan,
750 sc->sc_ht_info.tx_chan_width, 1132 sc->tx_chan_width,
751 sc->sc_tx_chainmask, 1133 sc->sc_tx_chainmask,
752 sc->sc_rx_chainmask, 1134 sc->sc_rx_chainmask,
753 sc->sc_ht_extprotspacing, 1135 sc->sc_ht_extprotspacing,
754 false, &status)) { 1136 false, &status)) {
755 DPRINTF(sc, ATH_DBG_FATAL, 1137 DPRINTF(sc, ATH_DBG_FATAL,
756 "%s: unable to reset channel %u (%uMhz) " 1138 "Unable to reset channel %u (%uMhz) "
757 "flags 0x%x hal status %u\n", __func__, 1139 "flags 0x%x hal status %u\n",
758 ath9k_hw_mhz2ieee(ah, 1140 ath9k_hw_mhz2ieee(ah,
759 ah->ah_curchan->channel, 1141 ah->ah_curchan->channel,
760 ah->ah_curchan->channelFlags), 1142 ah->ah_curchan->channelFlags),
@@ -834,7 +1216,7 @@ static int ath_sw_toggle_radio(void *data, enum rfkill_state state)
834 sc->sc_flags &= ~SC_OP_RFKILL_SW_BLOCKED; 1216 sc->sc_flags &= ~SC_OP_RFKILL_SW_BLOCKED;
835 if (sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED) { 1217 if (sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED) {
836 DPRINTF(sc, ATH_DBG_FATAL, "Can't turn on the" 1218 DPRINTF(sc, ATH_DBG_FATAL, "Can't turn on the"
837 "radio as it is disabled by h/w \n"); 1219 "radio as it is disabled by h/w\n");
838 return -EPERM; 1220 return -EPERM;
839 } 1221 }
840 ath_radio_enable(sc); 1222 ath_radio_enable(sc);
@@ -878,61 +1260,258 @@ static void ath_deinit_rfkill(struct ath_softc *sc)
878 sc->rf_kill.rfkill = NULL; 1260 sc->rf_kill.rfkill = NULL;
879 } 1261 }
880} 1262}
1263
1264static int ath_start_rfkill_poll(struct ath_softc *sc)
1265{
1266 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1267 queue_delayed_work(sc->hw->workqueue,
1268 &sc->rf_kill.rfkill_poll, 0);
1269
1270 if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
1271 if (rfkill_register(sc->rf_kill.rfkill)) {
1272 DPRINTF(sc, ATH_DBG_FATAL,
1273 "Unable to register rfkill\n");
1274 rfkill_free(sc->rf_kill.rfkill);
1275
1276 /* Deinitialize the device */
1277 ath_detach(sc);
1278 if (sc->pdev->irq)
1279 free_irq(sc->pdev->irq, sc);
1280 pci_iounmap(sc->pdev, sc->mem);
1281 pci_release_region(sc->pdev, 0);
1282 pci_disable_device(sc->pdev);
1283 ieee80211_free_hw(sc->hw);
1284 return -EIO;
1285 } else {
1286 sc->sc_flags |= SC_OP_RFKILL_REGISTERED;
1287 }
1288 }
1289
1290 return 0;
1291}
881#endif /* CONFIG_RFKILL */ 1292#endif /* CONFIG_RFKILL */
882 1293
883static int ath_detach(struct ath_softc *sc) 1294static void ath_detach(struct ath_softc *sc)
884{ 1295{
885 struct ieee80211_hw *hw = sc->hw; 1296 struct ieee80211_hw *hw = sc->hw;
1297 int i = 0;
886 1298
887 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach ATH hw\n", __func__); 1299 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
888
889 /* Deinit LED control */
890 ath_deinit_leds(sc);
891 1300
892#ifdef CONFIG_RFKILL 1301#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
893 /* deinit rfkill */
894 ath_deinit_rfkill(sc); 1302 ath_deinit_rfkill(sc);
895#endif 1303#endif
896 1304 ath_deinit_leds(sc);
897 /* Unregister hw */
898 1305
899 ieee80211_unregister_hw(hw); 1306 ieee80211_unregister_hw(hw);
900
901 /* unregister Rate control */
902 ath_rate_control_unregister();
903
904 /* tx/rx cleanup */
905
906 ath_rx_cleanup(sc); 1307 ath_rx_cleanup(sc);
907 ath_tx_cleanup(sc); 1308 ath_tx_cleanup(sc);
908 1309
909 /* Deinit */ 1310 tasklet_kill(&sc->intr_tq);
1311 tasklet_kill(&sc->bcon_tasklet);
910 1312
911 ath_deinit(sc); 1313 if (!(sc->sc_flags & SC_OP_INVALID))
1314 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
912 1315
913 return 0; 1316 /* cleanup tx queues */
1317 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1318 if (ATH_TXQ_SETUP(sc, i))
1319 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1320
1321 ath9k_hw_detach(sc->sc_ah);
1322 ath9k_exit_debug(sc);
914} 1323}
915 1324
916static int ath_attach(u16 devid, 1325static int ath_init(u16 devid, struct ath_softc *sc)
917 struct ath_softc *sc)
918{ 1326{
919 struct ieee80211_hw *hw = sc->hw; 1327 struct ath_hal *ah = NULL;
920 int error = 0; 1328 int status;
1329 int error = 0, i;
1330 int csz = 0;
921 1331
922 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach ATH hw\n", __func__); 1332 /* XXX: hardware will not be ready until ath_open() being called */
1333 sc->sc_flags |= SC_OP_INVALID;
923 1334
924 error = ath_init(devid, sc); 1335 if (ath9k_init_debug(sc) < 0)
925 if (error != 0) 1336 printk(KERN_ERR "Unable to create debugfs files\n");
926 return error;
927 1337
928 /* Init nodes */ 1338 spin_lock_init(&sc->sc_resetlock);
1339 mutex_init(&sc->mutex);
1340 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1341 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1342 (unsigned long)sc);
929 1343
930 INIT_LIST_HEAD(&sc->node_list); 1344 /*
931 spin_lock_init(&sc->node_lock); 1345 * Cache line size is used to size and align various
1346 * structures used to communicate with the hardware.
1347 */
1348 bus_read_cachesize(sc, &csz);
1349 /* XXX assert csz is non-zero */
1350 sc->sc_cachelsz = csz << 2; /* convert to bytes */
932 1351
933 /* get mac address from hardware and set in mac80211 */ 1352 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1353 if (ah == NULL) {
1354 DPRINTF(sc, ATH_DBG_FATAL,
1355 "Unable to attach hardware; HAL status %u\n", status);
1356 error = -ENXIO;
1357 goto bad;
1358 }
1359 sc->sc_ah = ah;
934 1360
935 SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr); 1361 /* Get the hardware key cache size. */
1362 sc->sc_keymax = ah->ah_caps.keycache_size;
1363 if (sc->sc_keymax > ATH_KEYMAX) {
1364 DPRINTF(sc, ATH_DBG_KEYCACHE,
1365 "Warning, using only %u entries in %u key cache\n",
1366 ATH_KEYMAX, sc->sc_keymax);
1367 sc->sc_keymax = ATH_KEYMAX;
1368 }
1369
1370 /*
1371 * Reset the key cache since some parts do not
1372 * reset the contents on initial power up.
1373 */
1374 for (i = 0; i < sc->sc_keymax; i++)
1375 ath9k_hw_keyreset(ah, (u16) i);
1376
1377 /* Collect the channel list using the default country code */
1378
1379 error = ath_setup_channels(sc);
1380 if (error)
1381 goto bad;
1382
1383 /* default to MONITOR mode */
1384 sc->sc_ah->ah_opmode = NL80211_IFTYPE_MONITOR;
1385
1386
1387 /* Setup rate tables */
1388
1389 ath_rate_attach(sc);
1390 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1391 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1392
1393 /*
1394 * Allocate hardware transmit queues: one queue for
1395 * beacon frames and one data queue for each QoS
1396 * priority. Note that the hal handles reseting
1397 * these queues at the needed time.
1398 */
1399 sc->beacon.beaconq = ath_beaconq_setup(ah);
1400 if (sc->beacon.beaconq == -1) {
1401 DPRINTF(sc, ATH_DBG_FATAL,
1402 "Unable to setup a beacon xmit queue\n");
1403 error = -EIO;
1404 goto bad2;
1405 }
1406 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1407 if (sc->beacon.cabq == NULL) {
1408 DPRINTF(sc, ATH_DBG_FATAL,
1409 "Unable to setup CAB xmit queue\n");
1410 error = -EIO;
1411 goto bad2;
1412 }
1413
1414 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1415 ath_cabq_update(sc);
1416
1417 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
1418 sc->tx.hwq_map[i] = -1;
1419
1420 /* Setup data queues */
1421 /* NB: ensure BK queue is the lowest priority h/w queue */
1422 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1423 DPRINTF(sc, ATH_DBG_FATAL,
1424 "Unable to setup xmit queue for BK traffic\n");
1425 error = -EIO;
1426 goto bad2;
1427 }
1428
1429 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1430 DPRINTF(sc, ATH_DBG_FATAL,
1431 "Unable to setup xmit queue for BE traffic\n");
1432 error = -EIO;
1433 goto bad2;
1434 }
1435 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1436 DPRINTF(sc, ATH_DBG_FATAL,
1437 "Unable to setup xmit queue for VI traffic\n");
1438 error = -EIO;
1439 goto bad2;
1440 }
1441 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1442 DPRINTF(sc, ATH_DBG_FATAL,
1443 "Unable to setup xmit queue for VO traffic\n");
1444 error = -EIO;
1445 goto bad2;
1446 }
1447
1448 /* Initializes the noise floor to a reasonable default value.
1449 * Later on this will be updated during ANI processing. */
1450
1451 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1452 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
1453
1454 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1455 ATH9K_CIPHER_TKIP, NULL)) {
1456 /*
1457 * Whether we should enable h/w TKIP MIC.
1458 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1459 * report WMM capable, so it's always safe to turn on
1460 * TKIP MIC in this case.
1461 */
1462 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1463 0, 1, NULL);
1464 }
1465
1466 /*
1467 * Check whether the separate key cache entries
1468 * are required to handle both tx+rx MIC keys.
1469 * With split mic keys the number of stations is limited
1470 * to 27 otherwise 59.
1471 */
1472 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1473 ATH9K_CIPHER_TKIP, NULL)
1474 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1475 ATH9K_CIPHER_MIC, NULL)
1476 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1477 0, NULL))
1478 sc->sc_splitmic = 1;
1479
1480 /* turn on mcast key search if possible */
1481 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1482 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1483 1, NULL);
1484
1485 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1486 sc->sc_config.txpowlimit_override = 0;
1487
1488 /* 11n Capabilities */
1489 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1490 sc->sc_flags |= SC_OP_TXAGGR;
1491 sc->sc_flags |= SC_OP_RXAGGR;
1492 }
1493
1494 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1495 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1496
1497 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1498 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1499
1500 ath9k_hw_getmac(ah, sc->sc_myaddr);
1501 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1502 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1503 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1504 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1505 }
1506
1507 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1508
1509 /* initialize beacon slots */
1510 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
1511 sc->beacon.bslot[i] = ATH_IF_ID_ANY;
1512
1513 /* save MISC configurations */
1514 sc->sc_config.swBeaconProcess = 1;
936 1515
937 /* setup channels and rates */ 1516 /* setup channels and rates */
938 1517
@@ -942,55 +1521,81 @@ static int ath_attach(u16 devid,
942 sc->rates[IEEE80211_BAND_2GHZ]; 1521 sc->rates[IEEE80211_BAND_2GHZ];
943 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; 1522 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
944 1523
945 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
946 /* Setup HT capabilities for 2.4Ghz*/
947 setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_info);
948
949 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
950 &sc->sbands[IEEE80211_BAND_2GHZ];
951
952 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) { 1524 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
953 sc->sbands[IEEE80211_BAND_5GHZ].channels = 1525 sc->sbands[IEEE80211_BAND_5GHZ].channels =
954 sc->channels[IEEE80211_BAND_5GHZ]; 1526 sc->channels[IEEE80211_BAND_5GHZ];
955 sc->sbands[IEEE80211_BAND_5GHZ].bitrates = 1527 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
956 sc->rates[IEEE80211_BAND_5GHZ]; 1528 sc->rates[IEEE80211_BAND_5GHZ];
957 sc->sbands[IEEE80211_BAND_5GHZ].band = 1529 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
958 IEEE80211_BAND_5GHZ; 1530 }
959 1531
960 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) 1532 return 0;
961 /* Setup HT capabilities for 5Ghz*/ 1533bad2:
962 setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_info); 1534 /* cleanup tx queues */
1535 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1536 if (ATH_TXQ_SETUP(sc, i))
1537 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1538bad:
1539 if (ah)
1540 ath9k_hw_detach(ah);
963 1541
964 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 1542 return error;
965 &sc->sbands[IEEE80211_BAND_5GHZ]; 1543}
966 } 1544
1545static int ath_attach(u16 devid, struct ath_softc *sc)
1546{
1547 struct ieee80211_hw *hw = sc->hw;
1548 int error = 0;
1549
1550 DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n");
1551
1552 error = ath_init(devid, sc);
1553 if (error != 0)
1554 return error;
1555
1556 /* get mac address from hardware and set in mac80211 */
1557
1558 SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr);
1559
1560 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1561 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1562 IEEE80211_HW_SIGNAL_DBM |
1563 IEEE80211_HW_AMPDU_AGGREGATION;
967 1564
968 /* FIXME: Have to figure out proper hw init values later */ 1565 hw->wiphy->interface_modes =
1566 BIT(NL80211_IFTYPE_AP) |
1567 BIT(NL80211_IFTYPE_STATION) |
1568 BIT(NL80211_IFTYPE_ADHOC);
969 1569
970 hw->queues = 4; 1570 hw->queues = 4;
971 hw->ampdu_queues = 1; 1571 hw->max_rates = 4;
1572 hw->max_rate_tries = ATH_11N_TXMAXTRY;
1573 hw->sta_data_size = sizeof(struct ath_node);
1574 hw->vif_data_size = sizeof(struct ath_vap);
972 1575
973 /* Register rate control */
974 hw->rate_control_algorithm = "ath9k_rate_control"; 1576 hw->rate_control_algorithm = "ath9k_rate_control";
975 error = ath_rate_control_register();
976 if (error != 0) {
977 DPRINTF(sc, ATH_DBG_FATAL,
978 "%s: Unable to register rate control "
979 "algorithm:%d\n", __func__, error);
980 ath_rate_control_unregister();
981 goto bad;
982 }
983 1577
984 error = ieee80211_register_hw(hw); 1578 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
985 if (error != 0) { 1579 setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
986 ath_rate_control_unregister(); 1580 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes))
987 goto bad; 1581 setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
988 } 1582 }
989 1583
990 /* Initialize LED control */ 1584 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &sc->sbands[IEEE80211_BAND_2GHZ];
991 ath_init_leds(sc); 1585 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes))
1586 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1587 &sc->sbands[IEEE80211_BAND_5GHZ];
992 1588
993#ifdef CONFIG_RFKILL 1589 /* initialize tx/rx engine */
1590 error = ath_tx_init(sc, ATH_TXBUF);
1591 if (error != 0)
1592 goto detach;
1593
1594 error = ath_rx_init(sc, ATH_RXBUF);
1595 if (error != 0)
1596 goto detach;
1597
1598#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
994 /* Initialze h/w Rfkill */ 1599 /* Initialze h/w Rfkill */
995 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 1600 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
996 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll); 1601 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
@@ -1000,88 +1605,371 @@ static int ath_attach(u16 devid,
1000 goto detach; 1605 goto detach;
1001#endif 1606#endif
1002 1607
1003 /* initialize tx/rx engine */ 1608 error = ieee80211_register_hw(hw);
1004
1005 error = ath_tx_init(sc, ATH_TXBUF);
1006 if (error != 0)
1007 goto detach;
1008 1609
1009 error = ath_rx_init(sc, ATH_RXBUF); 1610 /* Initialize LED control */
1010 if (error != 0) 1611 ath_init_leds(sc);
1011 goto detach;
1012 1612
1013 return 0; 1613 return 0;
1014detach: 1614detach:
1015 ath_detach(sc); 1615 ath_detach(sc);
1016bad:
1017 return error; 1616 return error;
1018} 1617}
1019 1618
1619int ath_reset(struct ath_softc *sc, bool retry_tx)
1620{
1621 struct ath_hal *ah = sc->sc_ah;
1622 int status;
1623 int error = 0;
1624
1625 ath9k_hw_set_interrupts(ah, 0);
1626 ath_draintxq(sc, retry_tx);
1627 ath_stoprecv(sc);
1628 ath_flushrecv(sc);
1629
1630 spin_lock_bh(&sc->sc_resetlock);
1631 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
1632 sc->tx_chan_width,
1633 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1634 sc->sc_ht_extprotspacing, false, &status)) {
1635 DPRINTF(sc, ATH_DBG_FATAL,
1636 "Unable to reset hardware; hal status %u\n", status);
1637 error = -EIO;
1638 }
1639 spin_unlock_bh(&sc->sc_resetlock);
1640
1641 if (ath_startrecv(sc) != 0)
1642 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
1643
1644 /*
1645 * We may be doing a reset in response to a request
1646 * that changes the channel so update any state that
1647 * might change as a result.
1648 */
1649 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
1650
1651 ath_update_txpow(sc);
1652
1653 if (sc->sc_flags & SC_OP_BEACONS)
1654 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
1655
1656 ath9k_hw_set_interrupts(ah, sc->sc_imask);
1657
1658 if (retry_tx) {
1659 int i;
1660 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1661 if (ATH_TXQ_SETUP(sc, i)) {
1662 spin_lock_bh(&sc->tx.txq[i].axq_lock);
1663 ath_txq_schedule(sc, &sc->tx.txq[i]);
1664 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
1665 }
1666 }
1667 }
1668
1669 return error;
1670}
1671
1672/*
1673 * This function will allocate both the DMA descriptor structure, and the
1674 * buffers it contains. These are used to contain the descriptors used
1675 * by the system.
1676*/
1677int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1678 struct list_head *head, const char *name,
1679 int nbuf, int ndesc)
1680{
1681#define DS2PHYS(_dd, _ds) \
1682 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1683#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1684#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1685
1686 struct ath_desc *ds;
1687 struct ath_buf *bf;
1688 int i, bsize, error;
1689
1690 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
1691 name, nbuf, ndesc);
1692
1693 /* ath_desc must be a multiple of DWORDs */
1694 if ((sizeof(struct ath_desc) % 4) != 0) {
1695 DPRINTF(sc, ATH_DBG_FATAL, "ath_desc not DWORD aligned\n");
1696 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1697 error = -ENOMEM;
1698 goto fail;
1699 }
1700
1701 dd->dd_name = name;
1702 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1703
1704 /*
1705 * Need additional DMA memory because we can't use
1706 * descriptors that cross the 4K page boundary. Assume
1707 * one skipped descriptor per 4K page.
1708 */
1709 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1710 u32 ndesc_skipped =
1711 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1712 u32 dma_len;
1713
1714 while (ndesc_skipped) {
1715 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1716 dd->dd_desc_len += dma_len;
1717
1718 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1719 };
1720 }
1721
1722 /* allocate descriptors */
1723 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1724 dd->dd_desc_len,
1725 &dd->dd_desc_paddr);
1726 if (dd->dd_desc == NULL) {
1727 error = -ENOMEM;
1728 goto fail;
1729 }
1730 ds = dd->dd_desc;
1731 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
1732 dd->dd_name, ds, (u32) dd->dd_desc_len,
1733 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1734
1735 /* allocate buffers */
1736 bsize = sizeof(struct ath_buf) * nbuf;
1737 bf = kmalloc(bsize, GFP_KERNEL);
1738 if (bf == NULL) {
1739 error = -ENOMEM;
1740 goto fail2;
1741 }
1742 memset(bf, 0, bsize);
1743 dd->dd_bufptr = bf;
1744
1745 INIT_LIST_HEAD(head);
1746 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1747 bf->bf_desc = ds;
1748 bf->bf_daddr = DS2PHYS(dd, ds);
1749
1750 if (!(sc->sc_ah->ah_caps.hw_caps &
1751 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1752 /*
1753 * Skip descriptor addresses which can cause 4KB
1754 * boundary crossing (addr + length) with a 32 dword
1755 * descriptor fetch.
1756 */
1757 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1758 ASSERT((caddr_t) bf->bf_desc <
1759 ((caddr_t) dd->dd_desc +
1760 dd->dd_desc_len));
1761
1762 ds += ndesc;
1763 bf->bf_desc = ds;
1764 bf->bf_daddr = DS2PHYS(dd, ds);
1765 }
1766 }
1767 list_add_tail(&bf->list, head);
1768 }
1769 return 0;
1770fail2:
1771 pci_free_consistent(sc->pdev,
1772 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1773fail:
1774 memset(dd, 0, sizeof(*dd));
1775 return error;
1776#undef ATH_DESC_4KB_BOUND_CHECK
1777#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1778#undef DS2PHYS
1779}
1780
1781void ath_descdma_cleanup(struct ath_softc *sc,
1782 struct ath_descdma *dd,
1783 struct list_head *head)
1784{
1785 pci_free_consistent(sc->pdev,
1786 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1787
1788 INIT_LIST_HEAD(head);
1789 kfree(dd->dd_bufptr);
1790 memset(dd, 0, sizeof(*dd));
1791}
1792
1793int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1794{
1795 int qnum;
1796
1797 switch (queue) {
1798 case 0:
1799 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO];
1800 break;
1801 case 1:
1802 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI];
1803 break;
1804 case 2:
1805 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1806 break;
1807 case 3:
1808 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK];
1809 break;
1810 default:
1811 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1812 break;
1813 }
1814
1815 return qnum;
1816}
1817
1818int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1819{
1820 int qnum;
1821
1822 switch (queue) {
1823 case ATH9K_WME_AC_VO:
1824 qnum = 0;
1825 break;
1826 case ATH9K_WME_AC_VI:
1827 qnum = 1;
1828 break;
1829 case ATH9K_WME_AC_BE:
1830 qnum = 2;
1831 break;
1832 case ATH9K_WME_AC_BK:
1833 qnum = 3;
1834 break;
1835 default:
1836 qnum = -1;
1837 break;
1838 }
1839
1840 return qnum;
1841}
1842
1843/**********************/
1844/* mac80211 callbacks */
1845/**********************/
1846
1020static int ath9k_start(struct ieee80211_hw *hw) 1847static int ath9k_start(struct ieee80211_hw *hw)
1021{ 1848{
1022 struct ath_softc *sc = hw->priv; 1849 struct ath_softc *sc = hw->priv;
1023 struct ieee80211_channel *curchan = hw->conf.channel; 1850 struct ieee80211_channel *curchan = hw->conf.channel;
1024 int error = 0, pos; 1851 struct ath9k_channel *init_channel;
1852 int error = 0, pos, status;
1025 1853
1026 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Starting driver with " 1854 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with "
1027 "initial channel: %d MHz\n", __func__, curchan->center_freq); 1855 "initial channel: %d MHz\n", curchan->center_freq);
1028 1856
1029 /* setup initial channel */ 1857 /* setup initial channel */
1030 1858
1031 pos = ath_get_channel(sc, curchan); 1859 pos = ath_get_channel(sc, curchan);
1032 if (pos == -1) { 1860 if (pos == -1) {
1033 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__); 1861 DPRINTF(sc, ATH_DBG_FATAL, "Invalid channel: %d\n", curchan->center_freq);
1034 return -EINVAL; 1862 error = -EINVAL;
1863 goto error;
1035 } 1864 }
1036 1865
1866 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1037 sc->sc_ah->ah_channels[pos].chanmode = 1867 sc->sc_ah->ah_channels[pos].chanmode =
1038 (curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A; 1868 (curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A;
1869 init_channel = &sc->sc_ah->ah_channels[pos];
1039 1870
1040 /* open ath_dev */ 1871 /* Reset SERDES registers */
1041 error = ath_open(sc, &sc->sc_ah->ah_channels[pos]); 1872 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
1042 if (error) { 1873
1874 /*
1875 * The basic interface to setting the hardware in a good
1876 * state is ``reset''. On return the hardware is known to
1877 * be powered up and with interrupts disabled. This must
1878 * be followed by initialization of the appropriate bits
1879 * and then setup of the interrupt mask.
1880 */
1881 spin_lock_bh(&sc->sc_resetlock);
1882 if (!ath9k_hw_reset(sc->sc_ah, init_channel,
1883 sc->tx_chan_width,
1884 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1885 sc->sc_ht_extprotspacing, false, &status)) {
1043 DPRINTF(sc, ATH_DBG_FATAL, 1886 DPRINTF(sc, ATH_DBG_FATAL,
1044 "%s: Unable to complete ath_open\n", __func__); 1887 "Unable to reset hardware; hal status %u "
1045 return error; 1888 "(freq %u flags 0x%x)\n", status,
1889 init_channel->channel, init_channel->channelFlags);
1890 error = -EIO;
1891 spin_unlock_bh(&sc->sc_resetlock);
1892 goto error;
1046 } 1893 }
1894 spin_unlock_bh(&sc->sc_resetlock);
1047 1895
1048#ifdef CONFIG_RFKILL 1896 /*
1049 /* Start rfkill polling */ 1897 * This is needed only to setup initial state
1050 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 1898 * but it's best done after a reset.
1051 queue_delayed_work(sc->hw->workqueue, 1899 */
1052 &sc->rf_kill.rfkill_poll, 0); 1900 ath_update_txpow(sc);
1053
1054 if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
1055 if (rfkill_register(sc->rf_kill.rfkill)) {
1056 DPRINTF(sc, ATH_DBG_FATAL,
1057 "Unable to register rfkill\n");
1058 rfkill_free(sc->rf_kill.rfkill);
1059 1901
1060 /* Deinitialize the device */ 1902 /*
1061 if (sc->pdev->irq) 1903 * Setup the hardware after reset:
1062 free_irq(sc->pdev->irq, sc); 1904 * The receive engine is set going.
1063 ath_detach(sc); 1905 * Frame transmit is handled entirely
1064 pci_iounmap(sc->pdev, sc->mem); 1906 * in the frame output path; there's nothing to do
1065 pci_release_region(sc->pdev, 0); 1907 * here except setup the interrupt mask.
1066 pci_disable_device(sc->pdev); 1908 */
1067 ieee80211_free_hw(hw); 1909 if (ath_startrecv(sc) != 0) {
1068 return -EIO; 1910 DPRINTF(sc, ATH_DBG_FATAL,
1069 } else { 1911 "Unable to start recv logic\n");
1070 sc->sc_flags |= SC_OP_RFKILL_REGISTERED; 1912 error = -EIO;
1071 } 1913 goto error;
1072 } 1914 }
1915
1916 /* Setup our intr mask. */
1917 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
1918 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
1919 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
1920
1921 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
1922 sc->sc_imask |= ATH9K_INT_GTT;
1923
1924 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1925 sc->sc_imask |= ATH9K_INT_CST;
1926
1927 /*
1928 * Enable MIB interrupts when there are hardware phy counters.
1929 * Note we only do this (at the moment) for station mode.
1930 */
1931 if (ath9k_hw_phycounters(sc->sc_ah) &&
1932 ((sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION) ||
1933 (sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC)))
1934 sc->sc_imask |= ATH9K_INT_MIB;
1935 /*
1936 * Some hardware processes the TIM IE and fires an
1937 * interrupt when the TIM bit is set. For hardware
1938 * that does, if not overridden by configuration,
1939 * enable the TIM interrupt when operating as station.
1940 */
1941 if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
1942 (sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION) &&
1943 !sc->sc_config.swBeaconProcess)
1944 sc->sc_imask |= ATH9K_INT_TIM;
1945
1946 ath_setcurmode(sc, ath_chan2mode(init_channel));
1947
1948 sc->sc_flags &= ~SC_OP_INVALID;
1949
1950 /* Disable BMISS interrupt when we're not associated */
1951 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1952 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1953
1954 ieee80211_wake_queues(sc->hw);
1955
1956#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1957 error = ath_start_rfkill_poll(sc);
1073#endif 1958#endif
1074 1959
1075 ieee80211_wake_queues(hw); 1960error:
1076 return 0; 1961 return error;
1077} 1962}
1078 1963
1079static int ath9k_tx(struct ieee80211_hw *hw, 1964static int ath9k_tx(struct ieee80211_hw *hw,
1080 struct sk_buff *skb) 1965 struct sk_buff *skb)
1081{ 1966{
1967 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1082 struct ath_softc *sc = hw->priv; 1968 struct ath_softc *sc = hw->priv;
1969 struct ath_tx_control txctl;
1083 int hdrlen, padsize; 1970 int hdrlen, padsize;
1084 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1971
1972 memset(&txctl, 0, sizeof(struct ath_tx_control));
1085 1973
1086 /* 1974 /*
1087 * As a temporary workaround, assign seq# here; this will likely need 1975 * As a temporary workaround, assign seq# here; this will likely need
@@ -1091,9 +1979,9 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1091 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 1979 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1092 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1980 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1093 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 1981 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1094 sc->seq_no += 0x10; 1982 sc->tx.seq_no += 0x10;
1095 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 1983 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1096 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no); 1984 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1097 } 1985 }
1098 1986
1099 /* Add the padding after the header if this is not already done */ 1987 /* Add the padding after the header if this is not already done */
@@ -1106,45 +1994,68 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1106 memmove(skb->data, skb->data + padsize, hdrlen); 1994 memmove(skb->data, skb->data + padsize, hdrlen);
1107 } 1995 }
1108 1996
1109 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting packet, skb: %p\n", 1997 /* Check if a tx queue is available */
1110 __func__,
1111 skb);
1112 1998
1113 if (ath_tx_start(sc, skb) != 0) { 1999 txctl.txq = ath_test_get_txq(sc, skb);
1114 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__); 2000 if (!txctl.txq)
1115 dev_kfree_skb_any(skb); 2001 goto exit;
1116 /* FIXME: Check for proper return value from ATH_DEV */ 2002
1117 return 0; 2003 DPRINTF(sc, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
2004
2005 if (ath_tx_start(sc, skb, &txctl) != 0) {
2006 DPRINTF(sc, ATH_DBG_XMIT, "TX failed\n");
2007 goto exit;
1118 } 2008 }
1119 2009
1120 return 0; 2010 return 0;
2011exit:
2012 dev_kfree_skb_any(skb);
2013 return 0;
1121} 2014}
1122 2015
1123static void ath9k_stop(struct ieee80211_hw *hw) 2016static void ath9k_stop(struct ieee80211_hw *hw)
1124{ 2017{
1125 struct ath_softc *sc = hw->priv; 2018 struct ath_softc *sc = hw->priv;
1126 int error;
1127 2019
1128 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Driver halt\n", __func__); 2020 if (sc->sc_flags & SC_OP_INVALID) {
2021 DPRINTF(sc, ATH_DBG_ANY, "Device not present\n");
2022 return;
2023 }
1129 2024
1130 error = ath_suspend(sc); 2025 DPRINTF(sc, ATH_DBG_CONFIG, "Cleaning up\n");
1131 if (error) 2026
1132 DPRINTF(sc, ATH_DBG_CONFIG, 2027 ieee80211_stop_queues(sc->hw);
1133 "%s: Device is no longer present\n", __func__); 2028
2029 /* make sure h/w will not generate any interrupt
2030 * before setting the invalid flag. */
2031 ath9k_hw_set_interrupts(sc->sc_ah, 0);
1134 2032
1135 ieee80211_stop_queues(hw); 2033 if (!(sc->sc_flags & SC_OP_INVALID)) {
2034 ath_draintxq(sc, false);
2035 ath_stoprecv(sc);
2036 ath9k_hw_phy_disable(sc->sc_ah);
2037 } else
2038 sc->rx.rxlink = NULL;
1136 2039
1137#ifdef CONFIG_RFKILL 2040#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1138 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 2041 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1139 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll); 2042 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1140#endif 2043#endif
2044 /* disable HAL and put h/w to sleep */
2045 ath9k_hw_disable(sc->sc_ah);
2046 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2047
2048 sc->sc_flags |= SC_OP_INVALID;
2049
2050 DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n");
1141} 2051}
1142 2052
1143static int ath9k_add_interface(struct ieee80211_hw *hw, 2053static int ath9k_add_interface(struct ieee80211_hw *hw,
1144 struct ieee80211_if_init_conf *conf) 2054 struct ieee80211_if_init_conf *conf)
1145{ 2055{
1146 struct ath_softc *sc = hw->priv; 2056 struct ath_softc *sc = hw->priv;
1147 int error, ic_opmode = 0; 2057 struct ath_vap *avp = (void *)conf->vif->drv_priv;
2058 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
1148 2059
1149 /* Support only vap for now */ 2060 /* Support only vap for now */
1150 2061
@@ -1153,32 +2064,34 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1153 2064
1154 switch (conf->type) { 2065 switch (conf->type) {
1155 case NL80211_IFTYPE_STATION: 2066 case NL80211_IFTYPE_STATION:
1156 ic_opmode = ATH9K_M_STA; 2067 ic_opmode = NL80211_IFTYPE_STATION;
1157 break; 2068 break;
1158 case NL80211_IFTYPE_ADHOC: 2069 case NL80211_IFTYPE_ADHOC:
1159 ic_opmode = ATH9K_M_IBSS; 2070 ic_opmode = NL80211_IFTYPE_ADHOC;
1160 break; 2071 break;
1161 case NL80211_IFTYPE_AP: 2072 case NL80211_IFTYPE_AP:
1162 ic_opmode = ATH9K_M_HOSTAP; 2073 ic_opmode = NL80211_IFTYPE_AP;
1163 break; 2074 break;
1164 default: 2075 default:
1165 DPRINTF(sc, ATH_DBG_FATAL, 2076 DPRINTF(sc, ATH_DBG_FATAL,
1166 "%s: Interface type %d not yet supported\n", 2077 "Interface type %d not yet supported\n", conf->type);
1167 __func__, conf->type);
1168 return -EOPNOTSUPP; 2078 return -EOPNOTSUPP;
1169 } 2079 }
1170 2080
1171 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a VAP of type: %d\n", 2081 DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VAP of type: %d\n", ic_opmode);
1172 __func__,
1173 ic_opmode);
1174 2082
1175 error = ath_vap_attach(sc, 0, conf->vif, ic_opmode); 2083 /* Set the VAP opmode */
1176 if (error) { 2084 avp->av_opmode = ic_opmode;
1177 DPRINTF(sc, ATH_DBG_FATAL, 2085 avp->av_bslot = -1;
1178 "%s: Unable to attach vap, error: %d\n", 2086
1179 __func__, error); 2087 if (ic_opmode == NL80211_IFTYPE_AP)
1180 return error; 2088 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
1181 } 2089
2090 sc->sc_vaps[0] = conf->vif;
2091 sc->sc_nvaps++;
2092
2093 /* Set the device opmode */
2094 sc->sc_ah->ah_opmode = ic_opmode;
1182 2095
1183 if (conf->type == NL80211_IFTYPE_AP) { 2096 if (conf->type == NL80211_IFTYPE_AP) {
1184 /* TODO: is this a suitable place to start ANI for AP mode? */ 2097 /* TODO: is this a suitable place to start ANI for AP mode? */
@@ -1194,78 +2107,76 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1194 struct ieee80211_if_init_conf *conf) 2107 struct ieee80211_if_init_conf *conf)
1195{ 2108{
1196 struct ath_softc *sc = hw->priv; 2109 struct ath_softc *sc = hw->priv;
1197 struct ath_vap *avp; 2110 struct ath_vap *avp = (void *)conf->vif->drv_priv;
1198 int error;
1199
1200 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach VAP\n", __func__);
1201 2111
1202 avp = sc->sc_vaps[0]; 2112 DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n");
1203 if (avp == NULL) {
1204 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
1205 __func__);
1206 return;
1207 }
1208 2113
1209#ifdef CONFIG_SLOW_ANT_DIV
1210 ath_slow_ant_div_stop(&sc->sc_antdiv);
1211#endif
1212 /* Stop ANI */ 2114 /* Stop ANI */
1213 del_timer_sync(&sc->sc_ani.timer); 2115 del_timer_sync(&sc->sc_ani.timer);
1214 2116
1215 /* Update ratectrl */
1216 ath_rate_newstate(sc, avp);
1217
1218 /* Reclaim beacon resources */ 2117 /* Reclaim beacon resources */
1219 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP || 2118 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP ||
1220 sc->sc_ah->ah_opmode == ATH9K_M_IBSS) { 2119 sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC) {
1221 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); 2120 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1222 ath_beacon_return(sc, avp); 2121 ath_beacon_return(sc, avp);
1223 } 2122 }
1224 2123
1225 /* Set interrupt mask */
1226 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1227 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL);
1228 sc->sc_flags &= ~SC_OP_BEACONS; 2124 sc->sc_flags &= ~SC_OP_BEACONS;
1229 2125
1230 error = ath_vap_detach(sc, 0); 2126 sc->sc_vaps[0] = NULL;
1231 if (error) 2127 sc->sc_nvaps--;
1232 DPRINTF(sc, ATH_DBG_FATAL,
1233 "%s: Unable to detach vap, error: %d\n",
1234 __func__, error);
1235} 2128}
1236 2129
1237static int ath9k_config(struct ieee80211_hw *hw, 2130static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1238 struct ieee80211_conf *conf)
1239{ 2131{
1240 struct ath_softc *sc = hw->priv; 2132 struct ath_softc *sc = hw->priv;
1241 struct ieee80211_channel *curchan = hw->conf.channel; 2133 struct ieee80211_conf *conf = &hw->conf;
1242 int pos;
1243 2134
1244 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n", 2135 mutex_lock(&sc->mutex);
1245 __func__, 2136 if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
1246 curchan->center_freq); 2137 IEEE80211_CONF_CHANGE_HT)) {
2138 struct ieee80211_channel *curchan = hw->conf.channel;
2139 int pos;
1247 2140
1248 pos = ath_get_channel(sc, curchan); 2141 DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
1249 if (pos == -1) { 2142 curchan->center_freq);
1250 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
1251 return -EINVAL;
1252 }
1253 2143
1254 sc->sc_ah->ah_channels[pos].chanmode = 2144 pos = ath_get_channel(sc, curchan);
1255 (curchan->band == IEEE80211_BAND_2GHZ) ? 2145 if (pos == -1) {
1256 CHANNEL_G : CHANNEL_A; 2146 DPRINTF(sc, ATH_DBG_FATAL, "Invalid channel: %d\n",
2147 curchan->center_freq);
2148 mutex_unlock(&sc->mutex);
2149 return -EINVAL;
2150 }
1257 2151
1258 if (sc->sc_curaid && hw->conf.ht_conf.ht_supported) 2152 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1259 sc->sc_ah->ah_channels[pos].chanmode = 2153 sc->sc_ah->ah_channels[pos].chanmode =
1260 ath_get_extchanmode(sc, curchan); 2154 (curchan->band == IEEE80211_BAND_2GHZ) ?
2155 CHANNEL_G : CHANNEL_A;
1261 2156
1262 sc->sc_config.txpowlimit = 2 * conf->power_level; 2157 if (conf->ht.enabled) {
2158 if (conf->ht.channel_type == NL80211_CHAN_HT40PLUS ||
2159 conf->ht.channel_type == NL80211_CHAN_HT40MINUS)
2160 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1263 2161
1264 /* set h/w channel */ 2162 sc->sc_ah->ah_channels[pos].chanmode =
1265 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0) 2163 ath_get_extchanmode(sc, curchan,
1266 DPRINTF(sc, ATH_DBG_FATAL, "%s: Unable to set channel\n", 2164 conf->ht.channel_type);
1267 __func__); 2165 }
2166
2167 if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0) {
2168 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n");
2169 mutex_unlock(&sc->mutex);
2170 return -EINVAL;
2171 }
2172
2173 ath_update_chainmask(sc, conf->ht.enabled);
2174 }
2175
2176 if (changed & IEEE80211_CONF_CHANGE_POWER)
2177 sc->sc_config.txpowlimit = 2 * conf->power_level;
1268 2178
2179 mutex_unlock(&sc->mutex);
1269 return 0; 2180 return 0;
1270} 2181}
1271 2182
@@ -1275,23 +2186,15 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
1275{ 2186{
1276 struct ath_softc *sc = hw->priv; 2187 struct ath_softc *sc = hw->priv;
1277 struct ath_hal *ah = sc->sc_ah; 2188 struct ath_hal *ah = sc->sc_ah;
1278 struct ath_vap *avp; 2189 struct ath_vap *avp = (void *)vif->drv_priv;
1279 u32 rfilt = 0; 2190 u32 rfilt = 0;
1280 int error, i; 2191 int error, i;
1281 DECLARE_MAC_BUF(mac);
1282
1283 avp = sc->sc_vaps[0];
1284 if (avp == NULL) {
1285 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n",
1286 __func__);
1287 return -EINVAL;
1288 }
1289 2192
1290 /* TODO: Need to decide which hw opmode to use for multi-interface 2193 /* TODO: Need to decide which hw opmode to use for multi-interface
1291 * cases */ 2194 * cases */
1292 if (vif->type == NL80211_IFTYPE_AP && 2195 if (vif->type == NL80211_IFTYPE_AP &&
1293 ah->ah_opmode != ATH9K_M_HOSTAP) { 2196 ah->ah_opmode != NL80211_IFTYPE_AP) {
1294 ah->ah_opmode = ATH9K_M_HOSTAP; 2197 ah->ah_opmode = NL80211_IFTYPE_STATION;
1295 ath9k_hw_setopmode(ah); 2198 ath9k_hw_setopmode(ah);
1296 ath9k_hw_write_associd(ah, sc->sc_myaddr, 0); 2199 ath9k_hw_write_associd(ah, sc->sc_myaddr, 0);
1297 /* Request full reset to get hw opmode changed properly */ 2200 /* Request full reset to get hw opmode changed properly */
@@ -1303,9 +2206,6 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
1303 switch (vif->type) { 2206 switch (vif->type) {
1304 case NL80211_IFTYPE_STATION: 2207 case NL80211_IFTYPE_STATION:
1305 case NL80211_IFTYPE_ADHOC: 2208 case NL80211_IFTYPE_ADHOC:
1306 /* Update ratectrl about the new state */
1307 ath_rate_newstate(sc, avp);
1308
1309 /* Set BSSID */ 2209 /* Set BSSID */
1310 memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN); 2210 memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN);
1311 sc->sc_curaid = 0; 2211 sc->sc_curaid = 0;
@@ -1315,27 +2215,9 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
1315 /* Set aggregation protection mode parameters */ 2215 /* Set aggregation protection mode parameters */
1316 sc->sc_config.ath_aggr_prot = 0; 2216 sc->sc_config.ath_aggr_prot = 0;
1317 2217
1318 /*
1319 * Reset our TSF so that its value is lower than the
1320 * beacon that we are trying to catch.
1321 * Only then hw will update its TSF register with the
1322 * new beacon. Reset the TSF before setting the BSSID
1323 * to avoid allowing in any frames that would update
1324 * our TSF only to have us clear it
1325 * immediately thereafter.
1326 */
1327 ath9k_hw_reset_tsf(sc->sc_ah);
1328
1329 /* Disable BMISS interrupt when we're not associated */
1330 ath9k_hw_set_interrupts(sc->sc_ah,
1331 sc->sc_imask &
1332 ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
1333 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1334
1335 DPRINTF(sc, ATH_DBG_CONFIG, 2218 DPRINTF(sc, ATH_DBG_CONFIG,
1336 "%s: RX filter 0x%x bssid %s aid 0x%x\n", 2219 "RX filter 0x%x bssid %pM aid 0x%x\n",
1337 __func__, rfilt, 2220 rfilt, sc->sc_curbssid, sc->sc_curaid);
1338 print_mac(mac, sc->sc_curbssid), sc->sc_curaid);
1339 2221
1340 /* need to reconfigure the beacon */ 2222 /* need to reconfigure the beacon */
1341 sc->sc_flags &= ~SC_OP_BEACONS ; 2223 sc->sc_flags &= ~SC_OP_BEACONS ;
@@ -1357,7 +2239,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw,
1357 * causes reconfiguration; we may be called 2239 * causes reconfiguration; we may be called
1358 * with beacon transmission active. 2240 * with beacon transmission active.
1359 */ 2241 */
1360 ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); 2242 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1361 2243
1362 error = ath_beacon_alloc(sc, 0); 2244 error = ath_beacon_alloc(sc, 0);
1363 if (error != 0) 2245 if (error != 0)
@@ -1403,7 +2285,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
1403 changed_flags &= SUPPORTED_FILTERS; 2285 changed_flags &= SUPPORTED_FILTERS;
1404 *total_flags &= SUPPORTED_FILTERS; 2286 *total_flags &= SUPPORTED_FILTERS;
1405 2287
1406 sc->rx_filter = *total_flags; 2288 sc->rx.rxfilter = *total_flags;
1407 rfilt = ath_calcrxfilter(sc); 2289 rfilt = ath_calcrxfilter(sc);
1408 ath9k_hw_setrxfilter(sc->sc_ah, rfilt); 2290 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
1409 2291
@@ -1412,8 +2294,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
1412 ath9k_hw_write_associd(sc->sc_ah, ath_bcast_mac, 0); 2294 ath9k_hw_write_associd(sc->sc_ah, ath_bcast_mac, 0);
1413 } 2295 }
1414 2296
1415 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set HW RX filter: 0x%x\n", 2297 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter);
1416 __func__, sc->rx_filter);
1417} 2298}
1418 2299
1419static void ath9k_sta_notify(struct ieee80211_hw *hw, 2300static void ath9k_sta_notify(struct ieee80211_hw *hw,
@@ -1422,37 +2303,13 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
1422 struct ieee80211_sta *sta) 2303 struct ieee80211_sta *sta)
1423{ 2304{
1424 struct ath_softc *sc = hw->priv; 2305 struct ath_softc *sc = hw->priv;
1425 struct ath_node *an;
1426 unsigned long flags;
1427 DECLARE_MAC_BUF(mac);
1428
1429 spin_lock_irqsave(&sc->node_lock, flags);
1430 an = ath_node_find(sc, sta->addr);
1431 spin_unlock_irqrestore(&sc->node_lock, flags);
1432 2306
1433 switch (cmd) { 2307 switch (cmd) {
1434 case STA_NOTIFY_ADD: 2308 case STA_NOTIFY_ADD:
1435 spin_lock_irqsave(&sc->node_lock, flags); 2309 ath_node_attach(sc, sta);
1436 if (!an) {
1437 ath_node_attach(sc, sta->addr, 0);
1438 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n",
1439 __func__, print_mac(mac, sta->addr));
1440 } else {
1441 ath_node_get(sc, sta->addr);
1442 }
1443 spin_unlock_irqrestore(&sc->node_lock, flags);
1444 break; 2310 break;
1445 case STA_NOTIFY_REMOVE: 2311 case STA_NOTIFY_REMOVE:
1446 if (!an) 2312 ath_node_detach(sc, sta);
1447 DPRINTF(sc, ATH_DBG_FATAL,
1448 "%s: Removal of a non-existent node\n",
1449 __func__);
1450 else {
1451 ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT);
1452 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n",
1453 __func__,
1454 print_mac(mac, sta->addr));
1455 }
1456 break; 2313 break;
1457 default: 2314 default:
1458 break; 2315 break;
@@ -1477,20 +2334,14 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw,
1477 qnum = ath_get_hal_qnum(queue, sc); 2334 qnum = ath_get_hal_qnum(queue, sc);
1478 2335
1479 DPRINTF(sc, ATH_DBG_CONFIG, 2336 DPRINTF(sc, ATH_DBG_CONFIG,
1480 "%s: Configure tx [queue/halq] [%d/%d], " 2337 "Configure tx [queue/halq] [%d/%d], "
1481 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", 2338 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
1482 __func__, 2339 queue, qnum, params->aifs, params->cw_min,
1483 queue, 2340 params->cw_max, params->txop);
1484 qnum,
1485 params->aifs,
1486 params->cw_min,
1487 params->cw_max,
1488 params->txop);
1489 2341
1490 ret = ath_txq_update(sc, qnum, &qi); 2342 ret = ath_txq_update(sc, qnum, &qi);
1491 if (ret) 2343 if (ret)
1492 DPRINTF(sc, ATH_DBG_FATAL, 2344 DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n");
1493 "%s: TXQ Update failed\n", __func__);
1494 2345
1495 return ret; 2346 return ret;
1496} 2347}
@@ -1504,23 +2355,22 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1504 struct ath_softc *sc = hw->priv; 2355 struct ath_softc *sc = hw->priv;
1505 int ret = 0; 2356 int ret = 0;
1506 2357
1507 DPRINTF(sc, ATH_DBG_KEYCACHE, " %s: Set HW Key\n", __func__); 2358 DPRINTF(sc, ATH_DBG_KEYCACHE, "Set HW Key\n");
1508 2359
1509 switch (cmd) { 2360 switch (cmd) {
1510 case SET_KEY: 2361 case SET_KEY:
1511 ret = ath_key_config(sc, addr, key); 2362 ret = ath_key_config(sc, addr, key);
1512 if (!ret) { 2363 if (ret >= 0) {
1513 set_bit(key->keyidx, sc->sc_keymap); 2364 key->hw_key_idx = ret;
1514 key->hw_key_idx = key->keyidx;
1515 /* push IV and Michael MIC generation to stack */ 2365 /* push IV and Michael MIC generation to stack */
1516 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 2366 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1517 if (key->alg == ALG_TKIP) 2367 if (key->alg == ALG_TKIP)
1518 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 2368 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2369 ret = 0;
1519 } 2370 }
1520 break; 2371 break;
1521 case DISABLE_KEY: 2372 case DISABLE_KEY:
1522 ath_key_delete(sc, key); 2373 ath_key_delete(sc, key);
1523 clear_bit(key->keyidx, sc->sc_keymap);
1524 break; 2374 break;
1525 default: 2375 default:
1526 ret = -EINVAL; 2376 ret = -EINVAL;
@@ -1537,8 +2387,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1537 struct ath_softc *sc = hw->priv; 2387 struct ath_softc *sc = hw->priv;
1538 2388
1539 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 2389 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1540 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed PREAMBLE %d\n", 2390 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
1541 __func__,
1542 bss_conf->use_short_preamble); 2391 bss_conf->use_short_preamble);
1543 if (bss_conf->use_short_preamble) 2392 if (bss_conf->use_short_preamble)
1544 sc->sc_flags |= SC_OP_PREAMBLE_SHORT; 2393 sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
@@ -1547,8 +2396,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1547 } 2396 }
1548 2397
1549 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 2398 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1550 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed CTS PROT %d\n", 2399 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
1551 __func__,
1552 bss_conf->use_cts_prot); 2400 bss_conf->use_cts_prot);
1553 if (bss_conf->use_cts_prot && 2401 if (bss_conf->use_cts_prot &&
1554 hw->conf.channel->band != IEEE80211_BAND_5GHZ) 2402 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
@@ -1557,18 +2405,10 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1557 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE; 2405 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
1558 } 2406 }
1559 2407
1560 if (changed & BSS_CHANGED_HT) {
1561 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed HT %d\n",
1562 __func__,
1563 bss_conf->assoc_ht);
1564 ath9k_ht_conf(sc, bss_conf);
1565 }
1566
1567 if (changed & BSS_CHANGED_ASSOC) { 2408 if (changed & BSS_CHANGED_ASSOC) {
1568 DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed ASSOC %d\n", 2409 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
1569 __func__,
1570 bss_conf->assoc); 2410 bss_conf->assoc);
1571 ath9k_bss_assoc_info(sc, bss_conf); 2411 ath9k_bss_assoc_info(sc, vif, bss_conf);
1572 } 2412 }
1573} 2413}
1574 2414
@@ -1601,50 +2441,37 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1601 2441
1602 switch (action) { 2442 switch (action) {
1603 case IEEE80211_AMPDU_RX_START: 2443 case IEEE80211_AMPDU_RX_START:
1604 ret = ath_rx_aggr_start(sc, sta->addr, tid, ssn); 2444 if (!(sc->sc_flags & SC_OP_RXAGGR))
1605 if (ret < 0) 2445 ret = -ENOTSUPP;
1606 DPRINTF(sc, ATH_DBG_FATAL,
1607 "%s: Unable to start RX aggregation\n",
1608 __func__);
1609 break; 2446 break;
1610 case IEEE80211_AMPDU_RX_STOP: 2447 case IEEE80211_AMPDU_RX_STOP:
1611 ret = ath_rx_aggr_stop(sc, sta->addr, tid);
1612 if (ret < 0)
1613 DPRINTF(sc, ATH_DBG_FATAL,
1614 "%s: Unable to stop RX aggregation\n",
1615 __func__);
1616 break; 2448 break;
1617 case IEEE80211_AMPDU_TX_START: 2449 case IEEE80211_AMPDU_TX_START:
1618 ret = ath_tx_aggr_start(sc, sta->addr, tid, ssn); 2450 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
1619 if (ret < 0) 2451 if (ret < 0)
1620 DPRINTF(sc, ATH_DBG_FATAL, 2452 DPRINTF(sc, ATH_DBG_FATAL,
1621 "%s: Unable to start TX aggregation\n", 2453 "Unable to start TX aggregation\n");
1622 __func__);
1623 else 2454 else
1624 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid); 2455 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
1625 break; 2456 break;
1626 case IEEE80211_AMPDU_TX_STOP: 2457 case IEEE80211_AMPDU_TX_STOP:
1627 ret = ath_tx_aggr_stop(sc, sta->addr, tid); 2458 ret = ath_tx_aggr_stop(sc, sta, tid);
1628 if (ret < 0) 2459 if (ret < 0)
1629 DPRINTF(sc, ATH_DBG_FATAL, 2460 DPRINTF(sc, ATH_DBG_FATAL,
1630 "%s: Unable to stop TX aggregation\n", 2461 "Unable to stop TX aggregation\n");
1631 __func__);
1632 2462
1633 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid); 2463 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
1634 break; 2464 break;
2465 case IEEE80211_AMPDU_TX_RESUME:
2466 ath_tx_aggr_resume(sc, sta, tid);
2467 break;
1635 default: 2468 default:
1636 DPRINTF(sc, ATH_DBG_FATAL, 2469 DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
1637 "%s: Unknown AMPDU action\n", __func__);
1638 } 2470 }
1639 2471
1640 return ret; 2472 return ret;
1641} 2473}
1642 2474
1643static int ath9k_no_fragmentation(struct ieee80211_hw *hw, u32 value)
1644{
1645 return -EOPNOTSUPP;
1646}
1647
1648static struct ieee80211_ops ath9k_ops = { 2475static struct ieee80211_ops ath9k_ops = {
1649 .tx = ath9k_tx, 2476 .tx = ath9k_tx,
1650 .start = ath9k_start, 2477 .start = ath9k_start,
@@ -1654,42 +2481,97 @@ static struct ieee80211_ops ath9k_ops = {
1654 .config = ath9k_config, 2481 .config = ath9k_config,
1655 .config_interface = ath9k_config_interface, 2482 .config_interface = ath9k_config_interface,
1656 .configure_filter = ath9k_configure_filter, 2483 .configure_filter = ath9k_configure_filter,
1657 .get_stats = NULL,
1658 .sta_notify = ath9k_sta_notify, 2484 .sta_notify = ath9k_sta_notify,
1659 .conf_tx = ath9k_conf_tx, 2485 .conf_tx = ath9k_conf_tx,
1660 .get_tx_stats = NULL,
1661 .bss_info_changed = ath9k_bss_info_changed, 2486 .bss_info_changed = ath9k_bss_info_changed,
1662 .set_tim = NULL,
1663 .set_key = ath9k_set_key, 2487 .set_key = ath9k_set_key,
1664 .hw_scan = NULL,
1665 .get_tkip_seq = NULL,
1666 .set_rts_threshold = NULL,
1667 .set_frag_threshold = NULL,
1668 .set_retry_limit = NULL,
1669 .get_tsf = ath9k_get_tsf, 2488 .get_tsf = ath9k_get_tsf,
1670 .reset_tsf = ath9k_reset_tsf, 2489 .reset_tsf = ath9k_reset_tsf,
1671 .tx_last_beacon = NULL,
1672 .ampdu_action = ath9k_ampdu_action, 2490 .ampdu_action = ath9k_ampdu_action,
1673 .set_frag_threshold = ath9k_no_fragmentation,
1674}; 2491};
1675 2492
2493static struct {
2494 u32 version;
2495 const char * name;
2496} ath_mac_bb_names[] = {
2497 { AR_SREV_VERSION_5416_PCI, "5416" },
2498 { AR_SREV_VERSION_5416_PCIE, "5418" },
2499 { AR_SREV_VERSION_9100, "9100" },
2500 { AR_SREV_VERSION_9160, "9160" },
2501 { AR_SREV_VERSION_9280, "9280" },
2502 { AR_SREV_VERSION_9285, "9285" }
2503};
2504
2505static struct {
2506 u16 version;
2507 const char * name;
2508} ath_rf_names[] = {
2509 { 0, "5133" },
2510 { AR_RAD5133_SREV_MAJOR, "5133" },
2511 { AR_RAD5122_SREV_MAJOR, "5122" },
2512 { AR_RAD2133_SREV_MAJOR, "2133" },
2513 { AR_RAD2122_SREV_MAJOR, "2122" }
2514};
2515
2516/*
2517 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
2518 */
2519static const char *
2520ath_mac_bb_name(u32 mac_bb_version)
2521{
2522 int i;
2523
2524 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
2525 if (ath_mac_bb_names[i].version == mac_bb_version) {
2526 return ath_mac_bb_names[i].name;
2527 }
2528 }
2529
2530 return "????";
2531}
2532
2533/*
2534 * Return the RF name. "????" is returned if the RF is unknown.
2535 */
2536static const char *
2537ath_rf_name(u16 rf_version)
2538{
2539 int i;
2540
2541 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
2542 if (ath_rf_names[i].version == rf_version) {
2543 return ath_rf_names[i].name;
2544 }
2545 }
2546
2547 return "????";
2548}
2549
1676static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2550static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1677{ 2551{
1678 void __iomem *mem; 2552 void __iomem *mem;
1679 struct ath_softc *sc; 2553 struct ath_softc *sc;
1680 struct ieee80211_hw *hw; 2554 struct ieee80211_hw *hw;
1681 const char *athname;
1682 u8 csz; 2555 u8 csz;
1683 u32 val; 2556 u32 val;
1684 int ret = 0; 2557 int ret = 0;
2558 struct ath_hal *ah;
1685 2559
1686 if (pci_enable_device(pdev)) 2560 if (pci_enable_device(pdev))
1687 return -EIO; 2561 return -EIO;
1688 2562
1689 /* XXX 32-bit addressing only */ 2563 ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1690 if (pci_set_dma_mask(pdev, 0xffffffff)) { 2564
1691 printk(KERN_ERR "ath_pci: 32-bit DMA not available\n"); 2565 if (ret) {
1692 ret = -ENODEV; 2566 printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
2567 goto bad;
2568 }
2569
2570 ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2571
2572 if (ret) {
2573 printk(KERN_ERR "ath9k: 32-bit DMA consistent "
2574 "DMA enable failed\n");
1693 goto bad; 2575 goto bad;
1694 } 2576 }
1695 2577
@@ -1746,16 +2628,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1746 goto bad2; 2628 goto bad2;
1747 } 2629 }
1748 2630
1749 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1750 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1751 IEEE80211_HW_SIGNAL_DBM |
1752 IEEE80211_HW_NOISE_DBM;
1753
1754 hw->wiphy->interface_modes =
1755 BIT(NL80211_IFTYPE_AP) |
1756 BIT(NL80211_IFTYPE_STATION) |
1757 BIT(NL80211_IFTYPE_ADHOC);
1758
1759 SET_IEEE80211_DEV(hw, &pdev->dev); 2631 SET_IEEE80211_DEV(hw, &pdev->dev);
1760 pci_set_drvdata(pdev, hw); 2632 pci_set_drvdata(pdev, hw);
1761 2633
@@ -1778,11 +2650,15 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1778 goto bad4; 2650 goto bad4;
1779 } 2651 }
1780 2652
1781 athname = ath9k_hw_probe(id->vendor, id->device); 2653 ah = sc->sc_ah;
1782 2654 printk(KERN_INFO
1783 printk(KERN_INFO "%s: %s: mem=0x%lx, irq=%d\n", 2655 "%s: Atheros AR%s MAC/BB Rev:%x "
2656 "AR%s RF Rev:%x: mem=0x%lx, irq=%d\n",
1784 wiphy_name(hw->wiphy), 2657 wiphy_name(hw->wiphy),
1785 athname ? athname : "Atheros ???", 2658 ath_mac_bb_name(ah->ah_macVersion),
2659 ah->ah_macRev,
2660 ath_rf_name((ah->ah_analog5GhzRev & AR_RADIO_SREV_MAJOR)),
2661 ah->ah_phyRev,
1786 (unsigned long)mem, pdev->irq); 2662 (unsigned long)mem, pdev->irq);
1787 2663
1788 return 0; 2664 return 0;
@@ -1803,17 +2679,10 @@ static void ath_pci_remove(struct pci_dev *pdev)
1803{ 2679{
1804 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 2680 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1805 struct ath_softc *sc = hw->priv; 2681 struct ath_softc *sc = hw->priv;
1806 enum ath9k_int status;
1807 2682
1808 if (pdev->irq) {
1809 ath9k_hw_set_interrupts(sc->sc_ah, 0);
1810 /* clear the ISR */
1811 ath9k_hw_getisr(sc->sc_ah, &status);
1812 sc->sc_flags |= SC_OP_INVALID;
1813 free_irq(pdev->irq, sc);
1814 }
1815 ath_detach(sc); 2683 ath_detach(sc);
1816 2684 if (pdev->irq)
2685 free_irq(pdev->irq, sc);
1817 pci_iounmap(pdev, sc->mem); 2686 pci_iounmap(pdev, sc->mem);
1818 pci_release_region(pdev, 0); 2687 pci_release_region(pdev, 0);
1819 pci_disable_device(pdev); 2688 pci_disable_device(pdev);
@@ -1829,7 +2698,7 @@ static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1829 2698
1830 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1); 2699 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1831 2700
1832#ifdef CONFIG_RFKILL 2701#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1833 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 2702 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1834 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll); 2703 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1835#endif 2704#endif
@@ -1866,7 +2735,7 @@ static int ath_pci_resume(struct pci_dev *pdev)
1866 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 2735 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1867 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1); 2736 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1868 2737
1869#ifdef CONFIG_RFKILL 2738#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1870 /* 2739 /*
1871 * check the h/w rfkill state on resume 2740 * check the h/w rfkill state on resume
1872 * and start the rfkill poll timer 2741 * and start the rfkill poll timer
@@ -1896,11 +2765,24 @@ static struct pci_driver ath_pci_driver = {
1896 2765
1897static int __init init_ath_pci(void) 2766static int __init init_ath_pci(void)
1898{ 2767{
2768 int error;
2769
1899 printk(KERN_INFO "%s: %s\n", dev_info, ATH_PCI_VERSION); 2770 printk(KERN_INFO "%s: %s\n", dev_info, ATH_PCI_VERSION);
1900 2771
2772 /* Register rate control algorithm */
2773 error = ath_rate_control_register();
2774 if (error != 0) {
2775 printk(KERN_ERR
2776 "Unable to register rate control algorithm: %d\n",
2777 error);
2778 ath_rate_control_unregister();
2779 return error;
2780 }
2781
1901 if (pci_register_driver(&ath_pci_driver) < 0) { 2782 if (pci_register_driver(&ath_pci_driver) < 0) {
1902 printk(KERN_ERR 2783 printk(KERN_ERR
1903 "ath_pci: No devices found, driver not installed.\n"); 2784 "ath_pci: No devices found, driver not installed.\n");
2785 ath_rate_control_unregister();
1904 pci_unregister_driver(&ath_pci_driver); 2786 pci_unregister_driver(&ath_pci_driver);
1905 return -ENODEV; 2787 return -ENODEV;
1906 } 2788 }
@@ -1911,7 +2793,8 @@ module_init(init_ath_pci);
1911 2793
1912static void __exit exit_ath_pci(void) 2794static void __exit exit_ath_pci(void)
1913{ 2795{
2796 ath_rate_control_unregister();
1914 pci_unregister_driver(&ath_pci_driver); 2797 pci_unregister_driver(&ath_pci_driver);
1915 printk(KERN_INFO "%s: driver unloaded\n", dev_info); 2798 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
1916} 2799}
1917module_exit(exit_ath_pci); 2800module_exit(exit_ath_pci);
diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath9k/phy.c
index eb9121fdfd38..766982a8196e 100644
--- a/drivers/net/wireless/ath9k/phy.c
+++ b/drivers/net/wireless/ath9k/phy.c
@@ -52,8 +52,7 @@ ath9k_hw_set_channel(struct ath_hal *ah, struct ath9k_channel *chan)
52 bModeSynth = 1; 52 bModeSynth = 1;
53 } else { 53 } else {
54 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, 54 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
55 "%s: invalid channel %u MHz\n", __func__, 55 "Invalid channel %u MHz\n", freq);
56 freq);
57 return false; 56 return false;
58 } 57 }
59 58
@@ -86,7 +85,7 @@ ath9k_hw_set_channel(struct ath_hal *ah, struct ath9k_channel *chan)
86 aModeRefSel = ath9k_hw_reverse_bits(1, 2); 85 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
87 } else { 86 } else {
88 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, 87 DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL,
89 "%s: invalid channel %u MHz\n", __func__, freq); 88 "Invalid channel %u MHz\n", freq);
90 return false; 89 return false;
91 } 90 }
92 91
@@ -215,7 +214,7 @@ ath9k_hw_set_rf_regs(struct ath_hal *ah, struct ath9k_channel *chan,
215 if (AR_SREV_9280_10_OR_LATER(ah)) 214 if (AR_SREV_9280_10_OR_LATER(ah))
216 return true; 215 return true;
217 216
218 eepMinorRev = ath9k_hw_get_eeprom(ahp, EEP_MINOR_REV); 217 eepMinorRev = ath9k_hw_get_eeprom(ah, EEP_MINOR_REV);
219 218
220 RF_BANK_SETUP(ahp->ah_analogBank0Data, &ahp->ah_iniBank0, 1); 219 RF_BANK_SETUP(ahp->ah_analogBank0Data, &ahp->ah_iniBank0, 1);
221 220
@@ -235,15 +234,15 @@ ath9k_hw_set_rf_regs(struct ath_hal *ah, struct ath9k_channel *chan,
235 234
236 if (eepMinorRev >= 2) { 235 if (eepMinorRev >= 2) {
237 if (IS_CHAN_2GHZ(chan)) { 236 if (IS_CHAN_2GHZ(chan)) {
238 ob2GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_2); 237 ob2GHz = ath9k_hw_get_eeprom(ah, EEP_OB_2);
239 db2GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_2); 238 db2GHz = ath9k_hw_get_eeprom(ah, EEP_DB_2);
240 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, 239 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
241 ob2GHz, 3, 197, 0); 240 ob2GHz, 3, 197, 0);
242 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, 241 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
243 db2GHz, 3, 194, 0); 242 db2GHz, 3, 194, 0);
244 } else { 243 } else {
245 ob5GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_5); 244 ob5GHz = ath9k_hw_get_eeprom(ah, EEP_OB_5);
246 db5GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_5); 245 db5GHz = ath9k_hw_get_eeprom(ah, EEP_DB_5);
247 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, 246 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
248 ob5GHz, 3, 203, 0); 247 ob5GHz, 3, 203, 0);
249 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, 248 ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data,
@@ -348,8 +347,7 @@ bool ath9k_hw_init_rf(struct ath_hal *ah, int *status)
348 || ahp->ah_analogBank6TPCData == NULL 347 || ahp->ah_analogBank6TPCData == NULL
349 || ahp->ah_analogBank7Data == NULL) { 348 || ahp->ah_analogBank7Data == NULL) {
350 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 349 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
351 "%s: cannot allocate RF banks\n", 350 "Cannot allocate RF banks\n");
352 __func__);
353 *status = -ENOMEM; 351 *status = -ENOMEM;
354 return false; 352 return false;
355 } 353 }
@@ -360,8 +358,7 @@ bool ath9k_hw_init_rf(struct ath_hal *ah, int *status)
360 ahp->ah_iniAddac.ia_columns), GFP_KERNEL); 358 ahp->ah_iniAddac.ia_columns), GFP_KERNEL);
361 if (ahp->ah_addac5416_21 == NULL) { 359 if (ahp->ah_addac5416_21 == NULL) {
362 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 360 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
363 "%s: cannot allocate ah_addac5416_21\n", 361 "Cannot allocate ah_addac5416_21\n");
364 __func__);
365 *status = -ENOMEM; 362 *status = -ENOMEM;
366 return false; 363 return false;
367 } 364 }
@@ -371,8 +368,7 @@ bool ath9k_hw_init_rf(struct ath_hal *ah, int *status)
371 ahp->ah_iniBank6.ia_rows), GFP_KERNEL); 368 ahp->ah_iniBank6.ia_rows), GFP_KERNEL);
372 if (ahp->ah_bank6Temp == NULL) { 369 if (ahp->ah_bank6Temp == NULL) {
373 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 370 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
374 "%s: cannot allocate ah_bank6Temp\n", 371 "Cannot allocate ah_bank6Temp\n");
375 __func__);
376 *status = -ENOMEM; 372 *status = -ENOMEM;
377 return false; 373 return false;
378 } 374 }
diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h
index 14702344448b..3a406a5c0593 100644
--- a/drivers/net/wireless/ath9k/phy.h
+++ b/drivers/net/wireless/ath9k/phy.h
@@ -50,6 +50,9 @@ bool ath9k_hw_init_rf(struct ath_hal *ah,
50#define AR_PHY_FC_SHORT_GI_40 0x00000080 50#define AR_PHY_FC_SHORT_GI_40 0x00000080
51#define AR_PHY_FC_WALSH 0x00000100 51#define AR_PHY_FC_WALSH 0x00000100
52#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200 52#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
53#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
54
55#define AR_PHY_TEST2 0x9808
53 56
54#define AR_PHY_TIMING2 0x9810 57#define AR_PHY_TIMING2 0x9810
55#define AR_PHY_TIMING3 0x9814 58#define AR_PHY_TIMING3 0x9814
@@ -100,6 +103,8 @@ bool ath9k_hw_init_rf(struct ath_hal *ah,
100#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF 103#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
101#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0 104#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
102 105
106#define AR_PHY_TSTDAC_CONST 0x983c
107
103#define AR_PHY_SETTLING 0x9844 108#define AR_PHY_SETTLING 0x9844
104#define AR_PHY_SETTLING_SWITCH 0x00003F80 109#define AR_PHY_SETTLING_SWITCH 0x00003F80
105#define AR_PHY_SETTLING_SWITCH_S 7 110#define AR_PHY_SETTLING_SWITCH_S 7
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
index cca2fc5b0765..04ab457a8faa 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -15,143 +15,136 @@
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */ 16 */
17 17
18/*
19 * Atheros rate control algorithm
20 */
21
22#include "core.h" 18#include "core.h"
23/* FIXME: remove this include! */
24#include "../net/mac80211/rate.h"
25
26static u32 tx_triglevel_max;
27 19
28static struct ath_rate_table ar5416_11na_ratetable = { 20static struct ath_rate_table ar5416_11na_ratetable = {
29 42, 21 42,
22 {0},
30 { 23 {
31 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */ 24 { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
32 5400, 0x0b, 0x00, 12, 25 5400, 0x0b, 0x00, 12,
33 0, 2, 1, 0, 0, 0, 0, 0 }, 26 0, 2, 1, 0, 0, 0, 0, 0 },
34 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */ 27 { VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
35 7800, 0x0f, 0x00, 18, 28 7800, 0x0f, 0x00, 18,
36 0, 3, 1, 1, 1, 1, 1, 0 }, 29 0, 3, 1, 1, 1, 1, 1, 0 },
37 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */ 30 { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
38 10000, 0x0a, 0x00, 24, 31 10000, 0x0a, 0x00, 24,
39 2, 4, 2, 2, 2, 2, 2, 0 }, 32 2, 4, 2, 2, 2, 2, 2, 0 },
40 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */ 33 { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
41 13900, 0x0e, 0x00, 36, 34 13900, 0x0e, 0x00, 36,
42 2, 6, 2, 3, 3, 3, 3, 0 }, 35 2, 6, 2, 3, 3, 3, 3, 0 },
43 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */ 36 { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
44 17300, 0x09, 0x00, 48, 37 17300, 0x09, 0x00, 48,
45 4, 10, 3, 4, 4, 4, 4, 0 }, 38 4, 10, 3, 4, 4, 4, 4, 0 },
46 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */ 39 { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
47 23000, 0x0d, 0x00, 72, 40 23000, 0x0d, 0x00, 72,
48 4, 14, 3, 5, 5, 5, 5, 0 }, 41 4, 14, 3, 5, 5, 5, 5, 0 },
49 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */ 42 { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
50 27400, 0x08, 0x00, 96, 43 27400, 0x08, 0x00, 96,
51 4, 20, 3, 6, 6, 6, 6, 0 }, 44 4, 20, 3, 6, 6, 6, 6, 0 },
52 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */ 45 { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
53 29300, 0x0c, 0x00, 108, 46 29300, 0x0c, 0x00, 108,
54 4, 23, 3, 7, 7, 7, 7, 0 }, 47 4, 23, 3, 7, 7, 7, 7, 0 },
55 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */ 48 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */
56 6400, 0x80, 0x00, 0, 49 6400, 0x80, 0x00, 0,
57 0, 2, 3, 8, 24, 8, 24, 3216 }, 50 0, 2, 3, 8, 24, 8, 24, 3216 },
58 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */ 51 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */
59 12700, 0x81, 0x00, 1, 52 12700, 0x81, 0x00, 1,
60 2, 4, 3, 9, 25, 9, 25, 6434 }, 53 2, 4, 3, 9, 25, 9, 25, 6434 },
61 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */ 54 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */
62 18800, 0x82, 0x00, 2, 55 18800, 0x82, 0x00, 2,
63 2, 6, 3, 10, 26, 10, 26, 9650 }, 56 2, 6, 3, 10, 26, 10, 26, 9650 },
64 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */ 57 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */
65 25000, 0x83, 0x00, 3, 58 25000, 0x83, 0x00, 3,
66 4, 10, 3, 11, 27, 11, 27, 12868 }, 59 4, 10, 3, 11, 27, 11, 27, 12868 },
67 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */ 60 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */
68 36700, 0x84, 0x00, 4, 61 36700, 0x84, 0x00, 4,
69 4, 14, 3, 12, 28, 12, 28, 19304 }, 62 4, 14, 3, 12, 28, 12, 28, 19304 },
70 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */ 63 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */
71 48100, 0x85, 0x00, 5, 64 48100, 0x85, 0x00, 5,
72 4, 20, 3, 13, 29, 13, 29, 25740 }, 65 4, 20, 3, 13, 29, 13, 29, 25740 },
73 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */ 66 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */
74 53500, 0x86, 0x00, 6, 67 53500, 0x86, 0x00, 6,
75 4, 23, 3, 14, 30, 14, 30, 28956 }, 68 4, 23, 3, 14, 30, 14, 30, 28956 },
76 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */ 69 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */
77 59000, 0x87, 0x00, 7, 70 59000, 0x87, 0x00, 7,
78 4, 25, 3, 15, 31, 15, 32, 32180 }, 71 4, 25, 3, 15, 31, 15, 32, 32180 },
79 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */ 72 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */
80 12700, 0x88, 0x00, 73 12700, 0x88, 0x00,
81 8, 0, 2, 3, 16, 33, 16, 33, 6430 }, 74 8, 0, 2, 3, 16, 33, 16, 33, 6430 },
82 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */ 75 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */
83 24800, 0x89, 0x00, 9, 76 24800, 0x89, 0x00, 9,
84 2, 4, 3, 17, 34, 17, 34, 12860 }, 77 2, 4, 3, 17, 34, 17, 34, 12860 },
85 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */ 78 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */
86 36600, 0x8a, 0x00, 10, 79 36600, 0x8a, 0x00, 10,
87 2, 6, 3, 18, 35, 18, 35, 19300 }, 80 2, 6, 3, 18, 35, 18, 35, 19300 },
88 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */ 81 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */
89 48100, 0x8b, 0x00, 11, 82 48100, 0x8b, 0x00, 11,
90 4, 10, 3, 19, 36, 19, 36, 25736 }, 83 4, 10, 3, 19, 36, 19, 36, 25736 },
91 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */ 84 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */
92 69500, 0x8c, 0x00, 12, 85 69500, 0x8c, 0x00, 12,
93 4, 14, 3, 20, 37, 20, 37, 38600 }, 86 4, 14, 3, 20, 37, 20, 37, 38600 },
94 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */ 87 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */
95 89500, 0x8d, 0x00, 13, 88 89500, 0x8d, 0x00, 13,
96 4, 20, 3, 21, 38, 21, 38, 51472 }, 89 4, 20, 3, 21, 38, 21, 38, 51472 },
97 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */ 90 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */
98 98900, 0x8e, 0x00, 14, 91 98900, 0x8e, 0x00, 14,
99 4, 23, 3, 22, 39, 22, 39, 57890 }, 92 4, 23, 3, 22, 39, 22, 39, 57890 },
100 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */ 93 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */
101 108300, 0x8f, 0x00, 15, 94 108300, 0x8f, 0x00, 15,
102 4, 25, 3, 23, 40, 23, 41, 64320 }, 95 4, 25, 3, 23, 40, 23, 41, 64320 },
103 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */ 96 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */
104 13200, 0x80, 0x00, 0, 97 13200, 0x80, 0x00, 0,
105 0, 2, 3, 8, 24, 24, 24, 6684 }, 98 0, 2, 3, 8, 24, 24, 24, 6684 },
106 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */ 99 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */
107 25900, 0x81, 0x00, 1, 100 25900, 0x81, 0x00, 1,
108 2, 4, 3, 9, 25, 25, 25, 13368 }, 101 2, 4, 3, 9, 25, 25, 25, 13368 },
109 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */ 102 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */
110 38600, 0x82, 0x00, 2, 103 38600, 0x82, 0x00, 2,
111 2, 6, 3, 10, 26, 26, 26, 20052 }, 104 2, 6, 3, 10, 26, 26, 26, 20052 },
112 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */ 105 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */
113 49800, 0x83, 0x00, 3, 106 49800, 0x83, 0x00, 3,
114 4, 10, 3, 11, 27, 27, 27, 26738 }, 107 4, 10, 3, 11, 27, 27, 27, 26738 },
115 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */ 108 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */
116 72200, 0x84, 0x00, 4, 109 72200, 0x84, 0x00, 4,
117 4, 14, 3, 12, 28, 28, 28, 40104 }, 110 4, 14, 3, 12, 28, 28, 28, 40104 },
118 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */ 111 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */
119 92900, 0x85, 0x00, 5, 112 92900, 0x85, 0x00, 5,
120 4, 20, 3, 13, 29, 29, 29, 53476 }, 113 4, 20, 3, 13, 29, 29, 29, 53476 },
121 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */ 114 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */
122 102700, 0x86, 0x00, 6, 115 102700, 0x86, 0x00, 6,
123 4, 23, 3, 14, 30, 30, 30, 60156 }, 116 4, 23, 3, 14, 30, 30, 30, 60156 },
124 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */ 117 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */
125 112000, 0x87, 0x00, 7, 118 112000, 0x87, 0x00, 7,
126 4, 25, 3, 15, 31, 32, 32, 66840 }, 119 4, 25, 3, 15, 31, 32, 32, 66840 },
127 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */ 120 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
128 122000, 0x87, 0x00, 7, 121 122000, 0x87, 0x00, 7,
129 4, 25, 3, 15, 31, 32, 32, 74200 }, 122 4, 25, 3, 15, 31, 32, 32, 74200 },
130 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */ 123 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */
131 25800, 0x88, 0x00, 8, 124 25800, 0x88, 0x00, 8,
132 0, 2, 3, 16, 33, 33, 33, 13360 }, 125 0, 2, 3, 16, 33, 33, 33, 13360 },
133 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */ 126 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */
134 49800, 0x89, 0x00, 9, 127 49800, 0x89, 0x00, 9,
135 2, 4, 3, 17, 34, 34, 34, 26720 }, 128 2, 4, 3, 17, 34, 34, 34, 26720 },
136 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */ 129 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */
137 71900, 0x8a, 0x00, 10, 130 71900, 0x8a, 0x00, 10,
138 2, 6, 3, 18, 35, 35, 35, 40080 }, 131 2, 6, 3, 18, 35, 35, 35, 40080 },
139 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */ 132 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */
140 92500, 0x8b, 0x00, 11, 133 92500, 0x8b, 0x00, 11,
141 4, 10, 3, 19, 36, 36, 36, 53440 }, 134 4, 10, 3, 19, 36, 36, 36, 53440 },
142 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */ 135 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */
143 130300, 0x8c, 0x00, 12, 136 130300, 0x8c, 0x00, 12,
144 4, 14, 3, 20, 37, 37, 37, 80160 }, 137 4, 14, 3, 20, 37, 37, 37, 80160 },
145 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */ 138 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */
146 162800, 0x8d, 0x00, 13, 139 162800, 0x8d, 0x00, 13,
147 4, 20, 3, 21, 38, 38, 38, 106880 }, 140 4, 20, 3, 21, 38, 38, 38, 106880 },
148 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */ 141 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */
149 178200, 0x8e, 0x00, 14, 142 178200, 0x8e, 0x00, 14,
150 4, 23, 3, 22, 39, 39, 39, 120240 }, 143 4, 23, 3, 22, 39, 39, 39, 120240 },
151 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */ 144 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */
152 192100, 0x8f, 0x00, 15, 145 192100, 0x8f, 0x00, 15,
153 4, 25, 3, 23, 40, 41, 41, 133600 }, 146 4, 25, 3, 23, 40, 41, 41, 133600 },
154 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */ 147 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
155 207000, 0x8f, 0x00, 15, 148 207000, 0x8f, 0x00, 15,
156 4, 25, 3, 23, 40, 41, 41, 148400 }, 149 4, 25, 3, 23, 40, 41, 41, 148400 },
157 }, 150 },
@@ -160,153 +153,149 @@ static struct ath_rate_table ar5416_11na_ratetable = {
160 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ 153 WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
161}; 154};
162 155
163/* TRUE_ALL - valid for 20/40/Legacy,
164 * TRUE - Legacy only,
165 * TRUE_20 - HT 20 only,
166 * TRUE_40 - HT 40 only */
167
168/* 4ms frame limit not used for NG mode. The values filled 156/* 4ms frame limit not used for NG mode. The values filled
169 * for HT are the 64K max aggregate limit */ 157 * for HT are the 64K max aggregate limit */
170 158
171static struct ath_rate_table ar5416_11ng_ratetable = { 159static struct ath_rate_table ar5416_11ng_ratetable = {
172 46, 160 46,
161 {0},
173 { 162 {
174 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 1000, /* 1 Mb */ 163 { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
175 900, 0x1b, 0x00, 2, 164 900, 0x1b, 0x00, 2,
176 0, 0, 1, 0, 0, 0, 0, 0 }, 165 0, 0, 1, 0, 0, 0, 0, 0 },
177 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 2000, /* 2 Mb */ 166 { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
178 1900, 0x1a, 0x04, 4, 167 1900, 0x1a, 0x04, 4,
179 1, 1, 1, 1, 1, 1, 1, 0 }, 168 1, 1, 1, 1, 1, 1, 1, 0 },
180 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 5500, /* 5.5 Mb */ 169 { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
181 4900, 0x19, 0x04, 11, 170 4900, 0x19, 0x04, 11,
182 2, 2, 2, 2, 2, 2, 2, 0 }, 171 2, 2, 2, 2, 2, 2, 2, 0 },
183 { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 11000, /* 11 Mb */ 172 { VALID_ALL, VALID_ALL, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
184 8100, 0x18, 0x04, 22, 173 8100, 0x18, 0x04, 22,
185 3, 3, 2, 3, 3, 3, 3, 0 }, 174 3, 3, 2, 3, 3, 3, 3, 0 },
186 { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */ 175 { INVALID, INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
187 5400, 0x0b, 0x00, 12, 176 5400, 0x0b, 0x00, 12,
188 4, 2, 1, 4, 4, 4, 4, 0 }, 177 4, 2, 1, 4, 4, 4, 4, 0 },
189 { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */ 178 { INVALID, INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
190 7800, 0x0f, 0x00, 18, 179 7800, 0x0f, 0x00, 18,
191 4, 3, 1, 5, 5, 5, 5, 0 }, 180 4, 3, 1, 5, 5, 5, 5, 0 },
192 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */ 181 { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
193 10100, 0x0a, 0x00, 24, 182 10100, 0x0a, 0x00, 24,
194 6, 4, 1, 6, 6, 6, 6, 0 }, 183 6, 4, 1, 6, 6, 6, 6, 0 },
195 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */ 184 { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
196 14100, 0x0e, 0x00, 36, 185 14100, 0x0e, 0x00, 36,
197 6, 6, 2, 7, 7, 7, 7, 0 }, 186 6, 6, 2, 7, 7, 7, 7, 0 },
198 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */ 187 { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
199 17700, 0x09, 0x00, 48, 188 17700, 0x09, 0x00, 48,
200 8, 10, 3, 8, 8, 8, 8, 0 }, 189 8, 10, 3, 8, 8, 8, 8, 0 },
201 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */ 190 { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
202 23700, 0x0d, 0x00, 72, 191 23700, 0x0d, 0x00, 72,
203 8, 14, 3, 9, 9, 9, 9, 0 }, 192 8, 14, 3, 9, 9, 9, 9, 0 },
204 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */ 193 { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
205 27400, 0x08, 0x00, 96, 194 27400, 0x08, 0x00, 96,
206 8, 20, 3, 10, 10, 10, 10, 0 }, 195 8, 20, 3, 10, 10, 10, 10, 0 },
207 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */ 196 { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
208 30900, 0x0c, 0x00, 108, 197 30900, 0x0c, 0x00, 108,
209 8, 23, 3, 11, 11, 11, 11, 0 }, 198 8, 23, 3, 11, 11, 11, 11, 0 },
210 { FALSE, FALSE, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */ 199 { INVALID, INVALID, WLAN_RC_PHY_HT_20_SS, 6500, /* 6.5 Mb */
211 6400, 0x80, 0x00, 0, 200 6400, 0x80, 0x00, 0,
212 4, 2, 3, 12, 28, 12, 28, 3216 }, 201 4, 2, 3, 12, 28, 12, 28, 3216 },
213 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */ 202 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 13000, /* 13 Mb */
214 12700, 0x81, 0x00, 1, 203 12700, 0x81, 0x00, 1,
215 6, 4, 3, 13, 29, 13, 29, 6434 }, 204 6, 4, 3, 13, 29, 13, 29, 6434 },
216 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */ 205 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 19500, /* 19.5 Mb */
217 18800, 0x82, 0x00, 2, 206 18800, 0x82, 0x00, 2,
218 6, 6, 3, 14, 30, 14, 30, 9650 }, 207 6, 6, 3, 14, 30, 14, 30, 9650 },
219 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */ 208 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 26000, /* 26 Mb */
220 25000, 0x83, 0x00, 3, 209 25000, 0x83, 0x00, 3,
221 8, 10, 3, 15, 31, 15, 31, 12868 }, 210 8, 10, 3, 15, 31, 15, 31, 12868 },
222 { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */ 211 { VALID_20, VALID_20, WLAN_RC_PHY_HT_20_SS, 39000, /* 39 Mb */
223 36700, 0x84, 0x00, 4, 212 36700, 0x84, 0x00, 4,
224 8, 14, 3, 16, 32, 16, 32, 19304 }, 213 8, 14, 3, 16, 32, 16, 32, 19304 },
225 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */ 214 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 52000, /* 52 Mb */
226 48100, 0x85, 0x00, 5, 215 48100, 0x85, 0x00, 5,
227 8, 20, 3, 17, 33, 17, 33, 25740 }, 216 8, 20, 3, 17, 33, 17, 33, 25740 },
228 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */ 217 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 58500, /* 58.5 Mb */
229 53500, 0x86, 0x00, 6, 218 53500, 0x86, 0x00, 6,
230 8, 23, 3, 18, 34, 18, 34, 28956 }, 219 8, 23, 3, 18, 34, 18, 34, 28956 },
231 { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */ 220 { INVALID, VALID_20, WLAN_RC_PHY_HT_20_SS, 65000, /* 65 Mb */
232 59000, 0x87, 0x00, 7, 221 59000, 0x87, 0x00, 7,
233 8, 25, 3, 19, 35, 19, 36, 32180 }, 222 8, 25, 3, 19, 35, 19, 36, 32180 },
234 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */ 223 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 13000, /* 13 Mb */
235 12700, 0x88, 0x00, 8, 224 12700, 0x88, 0x00, 8,
236 4, 2, 3, 20, 37, 20, 37, 6430 }, 225 4, 2, 3, 20, 37, 20, 37, 6430 },
237 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */ 226 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 26000, /* 26 Mb */
238 24800, 0x89, 0x00, 9, 227 24800, 0x89, 0x00, 9,
239 6, 4, 3, 21, 38, 21, 38, 12860 }, 228 6, 4, 3, 21, 38, 21, 38, 12860 },
240 { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */ 229 { INVALID, INVALID, WLAN_RC_PHY_HT_20_DS, 39000, /* 39 Mb */
241 36600, 0x8a, 0x00, 10, 230 36600, 0x8a, 0x00, 10,
242 6, 6, 3, 22, 39, 22, 39, 19300 }, 231 6, 6, 3, 22, 39, 22, 39, 19300 },
243 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */ 232 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 52000, /* 52 Mb */
244 48100, 0x8b, 0x00, 11, 233 48100, 0x8b, 0x00, 11,
245 8, 10, 3, 23, 40, 23, 40, 25736 }, 234 8, 10, 3, 23, 40, 23, 40, 25736 },
246 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */ 235 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 78000, /* 78 Mb */
247 69500, 0x8c, 0x00, 12, 236 69500, 0x8c, 0x00, 12,
248 8, 14, 3, 24, 41, 24, 41, 38600 }, 237 8, 14, 3, 24, 41, 24, 41, 38600 },
249 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */ 238 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 104000, /* 104 Mb */
250 89500, 0x8d, 0x00, 13, 239 89500, 0x8d, 0x00, 13,
251 8, 20, 3, 25, 42, 25, 42, 51472 }, 240 8, 20, 3, 25, 42, 25, 42, 51472 },
252 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */ 241 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 117000, /* 117 Mb */
253 98900, 0x8e, 0x00, 14, 242 98900, 0x8e, 0x00, 14,
254 8, 23, 3, 26, 43, 26, 44, 57890 }, 243 8, 23, 3, 26, 43, 26, 44, 57890 },
255 { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */ 244 { VALID_20, INVALID, WLAN_RC_PHY_HT_20_DS, 130000, /* 130 Mb */
256 108300, 0x8f, 0x00, 15, 245 108300, 0x8f, 0x00, 15,
257 8, 25, 3, 27, 44, 27, 45, 64320 }, 246 8, 25, 3, 27, 44, 27, 45, 64320 },
258 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */ 247 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 13500, /* 13.5 Mb */
259 13200, 0x80, 0x00, 0, 248 13200, 0x80, 0x00, 0,
260 8, 2, 3, 12, 28, 28, 28, 6684 }, 249 8, 2, 3, 12, 28, 28, 28, 6684 },
261 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */ 250 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 27500, /* 27.0 Mb */
262 25900, 0x81, 0x00, 1, 251 25900, 0x81, 0x00, 1,
263 8, 4, 3, 13, 29, 29, 29, 13368 }, 252 8, 4, 3, 13, 29, 29, 29, 13368 },
264 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */ 253 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 40500, /* 40.5 Mb */
265 38600, 0x82, 0x00, 2, 254 38600, 0x82, 0x00, 2,
266 8, 6, 3, 14, 30, 30, 30, 20052 }, 255 8, 6, 3, 14, 30, 30, 30, 20052 },
267 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */ 256 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 54000, /* 54 Mb */
268 49800, 0x83, 0x00, 3, 257 49800, 0x83, 0x00, 3,
269 8, 10, 3, 15, 31, 31, 31, 26738 }, 258 8, 10, 3, 15, 31, 31, 31, 26738 },
270 { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */ 259 { VALID_40, VALID_40, WLAN_RC_PHY_HT_40_SS, 81500, /* 81 Mb */
271 72200, 0x84, 0x00, 4, 260 72200, 0x84, 0x00, 4,
272 8, 14, 3, 16, 32, 32, 32, 40104 }, 261 8, 14, 3, 16, 32, 32, 32, 40104 },
273 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */ 262 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 108000, /* 108 Mb */
274 92900, 0x85, 0x00, 5, 263 92900, 0x85, 0x00, 5,
275 8, 20, 3, 17, 33, 33, 33, 53476 }, 264 8, 20, 3, 17, 33, 33, 33, 53476 },
276 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */ 265 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 121500, /* 121.5 Mb */
277 102700, 0x86, 0x00, 6, 266 102700, 0x86, 0x00, 6,
278 8, 23, 3, 18, 34, 34, 34, 60156 }, 267 8, 23, 3, 18, 34, 34, 34, 60156 },
279 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */ 268 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS, 135000, /* 135 Mb */
280 112000, 0x87, 0x00, 7, 269 112000, 0x87, 0x00, 7,
281 8, 23, 3, 19, 35, 36, 36, 66840 }, 270 8, 23, 3, 19, 35, 36, 36, 66840 },
282 { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */ 271 { INVALID, VALID_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */
283 122000, 0x87, 0x00, 7, 272 122000, 0x87, 0x00, 7,
284 8, 25, 3, 19, 35, 36, 36, 74200 }, 273 8, 25, 3, 19, 35, 36, 36, 74200 },
285 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */ 274 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 27000, /* 27 Mb */
286 25800, 0x88, 0x00, 8, 275 25800, 0x88, 0x00, 8,
287 8, 2, 3, 20, 37, 37, 37, 13360 }, 276 8, 2, 3, 20, 37, 37, 37, 13360 },
288 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */ 277 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 54000, /* 54 Mb */
289 49800, 0x89, 0x00, 9, 278 49800, 0x89, 0x00, 9,
290 8, 4, 3, 21, 38, 38, 38, 26720 }, 279 8, 4, 3, 21, 38, 38, 38, 26720 },
291 { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */ 280 { INVALID, INVALID, WLAN_RC_PHY_HT_40_DS, 81000, /* 81 Mb */
292 71900, 0x8a, 0x00, 10, 281 71900, 0x8a, 0x00, 10,
293 8, 6, 3, 22, 39, 39, 39, 40080 }, 282 8, 6, 3, 22, 39, 39, 39, 40080 },
294 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */ 283 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 108000, /* 108 Mb */
295 92500, 0x8b, 0x00, 11, 284 92500, 0x8b, 0x00, 11,
296 8, 10, 3, 23, 40, 40, 40, 53440 }, 285 8, 10, 3, 23, 40, 40, 40, 53440 },
297 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */ 286 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 162000, /* 162 Mb */
298 130300, 0x8c, 0x00, 12, 287 130300, 0x8c, 0x00, 12,
299 8, 14, 3, 24, 41, 41, 41, 80160 }, 288 8, 14, 3, 24, 41, 41, 41, 80160 },
300 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */ 289 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 216000, /* 216 Mb */
301 162800, 0x8d, 0x00, 13, 290 162800, 0x8d, 0x00, 13,
302 8, 20, 3, 25, 42, 42, 42, 106880 }, 291 8, 20, 3, 25, 42, 42, 42, 106880 },
303 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */ 292 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 243000, /* 243 Mb */
304 178200, 0x8e, 0x00, 14, 293 178200, 0x8e, 0x00, 14,
305 8, 23, 3, 26, 43, 43, 43, 120240 }, 294 8, 23, 3, 26, 43, 43, 43, 120240 },
306 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */ 295 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS, 270000, /* 270 Mb */
307 192100, 0x8f, 0x00, 15, 296 192100, 0x8f, 0x00, 15,
308 8, 23, 3, 27, 44, 45, 45, 133600 }, 297 8, 23, 3, 27, 44, 45, 45, 133600 },
309 { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */ 298 { VALID_40, INVALID, WLAN_RC_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */
310 207000, 0x8f, 0x00, 15, 299 207000, 0x8f, 0x00, 15,
311 8, 25, 3, 27, 44, 45, 45, 148400 }, 300 8, 25, 3, 27, 44, 45, 45, 148400 },
312 }, 301 },
@@ -317,29 +306,30 @@ static struct ath_rate_table ar5416_11ng_ratetable = {
317 306
318static struct ath_rate_table ar5416_11a_ratetable = { 307static struct ath_rate_table ar5416_11a_ratetable = {
319 8, 308 8,
309 {0},
320 { 310 {
321 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */ 311 { VALID, VALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
322 5400, 0x0b, 0x00, (0x80|12), 312 5400, 0x0b, 0x00, (0x80|12),
323 0, 2, 1, 0, 0 }, 313 0, 2, 1, 0, 0 },
324 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */ 314 { VALID, VALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
325 7800, 0x0f, 0x00, 18, 315 7800, 0x0f, 0x00, 18,
326 0, 3, 1, 1, 0 }, 316 0, 3, 1, 1, 0 },
327 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */ 317 { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
328 10000, 0x0a, 0x00, (0x80|24), 318 10000, 0x0a, 0x00, (0x80|24),
329 2, 4, 2, 2, 0 }, 319 2, 4, 2, 2, 0 },
330 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */ 320 { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
331 13900, 0x0e, 0x00, 36, 321 13900, 0x0e, 0x00, 36,
332 2, 6, 2, 3, 0 }, 322 2, 6, 2, 3, 0 },
333 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */ 323 { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
334 17300, 0x09, 0x00, (0x80|48), 324 17300, 0x09, 0x00, (0x80|48),
335 4, 10, 3, 4, 0 }, 325 4, 10, 3, 4, 0 },
336 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */ 326 { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
337 23000, 0x0d, 0x00, 72, 327 23000, 0x0d, 0x00, 72,
338 4, 14, 3, 5, 0 }, 328 4, 14, 3, 5, 0 },
339 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */ 329 { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
340 27400, 0x08, 0x00, 96, 330 27400, 0x08, 0x00, 96,
341 4, 19, 3, 6, 0 }, 331 4, 19, 3, 6, 0 },
342 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */ 332 { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
343 29300, 0x0c, 0x00, 108, 333 29300, 0x0c, 0x00, 108,
344 4, 23, 3, 7, 0 }, 334 4, 23, 3, 7, 0 },
345 }, 335 },
@@ -348,109 +338,44 @@ static struct ath_rate_table ar5416_11a_ratetable = {
348 0, /* Phy rates allowed initially */ 338 0, /* Phy rates allowed initially */
349}; 339};
350 340
351static struct ath_rate_table ar5416_11a_ratetable_Half = {
352 8,
353 {
354 { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 6 Mb */
355 2700, 0x0b, 0x00, (0x80|6),
356 0, 2, 1, 0, 0},
357 { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 9 Mb */
358 3900, 0x0f, 0x00, 9,
359 0, 3, 1, 1, 0 },
360 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 12 Mb */
361 5000, 0x0a, 0x00, (0x80|12),
362 2, 4, 2, 2, 0 },
363 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 18 Mb */
364 6950, 0x0e, 0x00, 18,
365 2, 6, 2, 3, 0 },
366 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 24 Mb */
367 8650, 0x09, 0x00, (0x80|24),
368 4, 10, 3, 4, 0 },
369 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 36 Mb */
370 11500, 0x0d, 0x00, 36,
371 4, 14, 3, 5, 0 },
372 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 48 Mb */
373 13700, 0x08, 0x00, 48,
374 4, 19, 3, 6, 0 },
375 { TRUE, TRUE, WLAN_PHY_OFDM, 27000, /* 54 Mb */
376 14650, 0x0c, 0x00, 54,
377 4, 23, 3, 7, 0 },
378 },
379 50, /* probe interval */
380 50, /* rssi reduce interval */
381 0, /* Phy rates allowed initially */
382};
383
384static struct ath_rate_table ar5416_11a_ratetable_Quarter = {
385 8,
386 {
387 { TRUE, TRUE, WLAN_PHY_OFDM, 1500, /* 6 Mb */
388 1350, 0x0b, 0x00, (0x80|3),
389 0, 2, 1, 0, 0 },
390 { TRUE, TRUE, WLAN_PHY_OFDM, 2250, /* 9 Mb */
391 1950, 0x0f, 0x00, 4,
392 0, 3, 1, 1, 0 },
393 { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 12 Mb */
394 2500, 0x0a, 0x00, (0x80|6),
395 2, 4, 2, 2, 0 },
396 { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 18 Mb */
397 3475, 0x0e, 0x00, 9,
398 2, 6, 2, 3, 0 },
399 { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 25 Mb */
400 4325, 0x09, 0x00, (0x80|12),
401 4, 10, 3, 4, 0 },
402 { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 36 Mb */
403 5750, 0x0d, 0x00, 18,
404 4, 14, 3, 5, 0 },
405 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 48 Mb */
406 6850, 0x08, 0x00, 24,
407 4, 19, 3, 6, 0 },
408 { TRUE, TRUE, WLAN_PHY_OFDM, 13500, /* 54 Mb */
409 7325, 0x0c, 0x00, 27,
410 4, 23, 3, 7, 0 },
411 },
412 50, /* probe interval */
413 50, /* rssi reduce interval */
414 0, /* Phy rates allowed initially */
415};
416
417static struct ath_rate_table ar5416_11g_ratetable = { 341static struct ath_rate_table ar5416_11g_ratetable = {
418 12, 342 12,
343 {0},
419 { 344 {
420 { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */ 345 { VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
421 900, 0x1b, 0x00, 2, 346 900, 0x1b, 0x00, 2,
422 0, 0, 1, 0, 0 }, 347 0, 0, 1, 0, 0 },
423 { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */ 348 { VALID, VALID, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
424 1900, 0x1a, 0x04, 4, 349 1900, 0x1a, 0x04, 4,
425 1, 1, 1, 1, 0 }, 350 1, 1, 1, 1, 0 },
426 { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */ 351 { VALID, VALID, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
427 4900, 0x19, 0x04, 11, 352 4900, 0x19, 0x04, 11,
428 2, 2, 2, 2, 0 }, 353 2, 2, 2, 2, 0 },
429 { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */ 354 { VALID, VALID, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
430 8100, 0x18, 0x04, 22, 355 8100, 0x18, 0x04, 22,
431 3, 3, 2, 3, 0 }, 356 3, 3, 2, 3, 0 },
432 { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */ 357 { INVALID, INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
433 5400, 0x0b, 0x00, 12, 358 5400, 0x0b, 0x00, 12,
434 4, 2, 1, 4, 0 }, 359 4, 2, 1, 4, 0 },
435 { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */ 360 { INVALID, INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
436 7800, 0x0f, 0x00, 18, 361 7800, 0x0f, 0x00, 18,
437 4, 3, 1, 5, 0 }, 362 4, 3, 1, 5, 0 },
438 { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */ 363 { VALID, VALID, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
439 10000, 0x0a, 0x00, 24, 364 10000, 0x0a, 0x00, 24,
440 6, 4, 1, 6, 0 }, 365 6, 4, 1, 6, 0 },
441 { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */ 366 { VALID, VALID, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
442 13900, 0x0e, 0x00, 36, 367 13900, 0x0e, 0x00, 36,
443 6, 6, 2, 7, 0 }, 368 6, 6, 2, 7, 0 },
444 { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */ 369 { VALID, VALID, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
445 17300, 0x09, 0x00, 48, 370 17300, 0x09, 0x00, 48,
446 8, 10, 3, 8, 0 }, 371 8, 10, 3, 8, 0 },
447 { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */ 372 { VALID, VALID, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
448 23000, 0x0d, 0x00, 72, 373 23000, 0x0d, 0x00, 72,
449 8, 14, 3, 9, 0 }, 374 8, 14, 3, 9, 0 },
450 { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */ 375 { VALID, VALID, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
451 27400, 0x08, 0x00, 96, 376 27400, 0x08, 0x00, 96,
452 8, 19, 3, 10, 0 }, 377 8, 19, 3, 10, 0 },
453 { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */ 378 { VALID, VALID, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
454 29300, 0x0c, 0x00, 108, 379 29300, 0x0c, 0x00, 108,
455 8, 23, 3, 11, 0 }, 380 8, 23, 3, 11, 0 },
456 }, 381 },
@@ -461,17 +386,18 @@ static struct ath_rate_table ar5416_11g_ratetable = {
461 386
462static struct ath_rate_table ar5416_11b_ratetable = { 387static struct ath_rate_table ar5416_11b_ratetable = {
463 4, 388 4,
389 {0},
464 { 390 {
465 { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */ 391 { VALID, VALID, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
466 900, 0x1b, 0x00, (0x80|2), 392 900, 0x1b, 0x00, (0x80|2),
467 0, 0, 1, 0, 0 }, 393 0, 0, 1, 0, 0 },
468 { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */ 394 { VALID, VALID, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
469 1800, 0x1a, 0x04, (0x80|4), 395 1800, 0x1a, 0x04, (0x80|4),
470 1, 1, 1, 1, 0 }, 396 1, 1, 1, 1, 0 },
471 { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */ 397 { VALID, VALID, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
472 4300, 0x19, 0x04, (0x80|11), 398 4300, 0x19, 0x04, (0x80|11),
473 1, 2, 2, 2, 0 }, 399 1, 2, 2, 2, 0 },
474 { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */ 400 { VALID, VALID, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
475 7100, 0x18, 0x04, (0x80|22), 401 7100, 0x18, 0x04, (0x80|22),
476 1, 4, 100, 3, 0 }, 402 1, 4, 100, 3, 0 },
477 }, 403 },
@@ -480,48 +406,6 @@ static struct ath_rate_table ar5416_11b_ratetable = {
480 0, /* Phy rates allowed initially */ 406 0, /* Phy rates allowed initially */
481}; 407};
482 408
483static void ar5416_attach_ratetables(struct ath_rate_softc *sc)
484{
485 /*
486 * Attach rate tables.
487 */
488 sc->hw_rate_table[ATH9K_MODE_11B] = &ar5416_11b_ratetable;
489 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable;
490 sc->hw_rate_table[ATH9K_MODE_11G] = &ar5416_11g_ratetable;
491
492 sc->hw_rate_table[ATH9K_MODE_11NA_HT20] = &ar5416_11na_ratetable;
493 sc->hw_rate_table[ATH9K_MODE_11NG_HT20] = &ar5416_11ng_ratetable;
494 sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS] =
495 &ar5416_11na_ratetable;
496 sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS] =
497 &ar5416_11na_ratetable;
498 sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS] =
499 &ar5416_11ng_ratetable;
500 sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS] =
501 &ar5416_11ng_ratetable;
502}
503
504static void ar5416_setquarter_ratetable(struct ath_rate_softc *sc)
505{
506 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Quarter;
507 return;
508}
509
510static void ar5416_sethalf_ratetable(struct ath_rate_softc *sc)
511{
512 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Half;
513 return;
514}
515
516static void ar5416_setfull_ratetable(struct ath_rate_softc *sc)
517{
518 sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable;
519 return;
520}
521
522/*
523 * Return the median of three numbers
524 */
525static inline int8_t median(int8_t a, int8_t b, int8_t c) 409static inline int8_t median(int8_t a, int8_t b, int8_t c)
526{ 410{
527 if (a >= b) { 411 if (a >= b) {
@@ -541,68 +425,65 @@ static inline int8_t median(int8_t a, int8_t b, int8_t c)
541 } 425 }
542} 426}
543 427
544static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table, 428static void ath_rc_sort_validrates(struct ath_rate_table *rate_table,
545 struct ath_tx_ratectrl *rate_ctrl) 429 struct ath_rate_priv *ath_rc_priv)
546{ 430{
547 u8 i, j, idx, idx_next; 431 u8 i, j, idx, idx_next;
548 432
549 for (i = rate_ctrl->max_valid_rate - 1; i > 0; i--) { 433 for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
550 for (j = 0; j <= i-1; j++) { 434 for (j = 0; j <= i-1; j++) {
551 idx = rate_ctrl->valid_rate_index[j]; 435 idx = ath_rc_priv->valid_rate_index[j];
552 idx_next = rate_ctrl->valid_rate_index[j+1]; 436 idx_next = ath_rc_priv->valid_rate_index[j+1];
553 437
554 if (rate_table->info[idx].ratekbps > 438 if (rate_table->info[idx].ratekbps >
555 rate_table->info[idx_next].ratekbps) { 439 rate_table->info[idx_next].ratekbps) {
556 rate_ctrl->valid_rate_index[j] = idx_next; 440 ath_rc_priv->valid_rate_index[j] = idx_next;
557 rate_ctrl->valid_rate_index[j+1] = idx; 441 ath_rc_priv->valid_rate_index[j+1] = idx;
558 } 442 }
559 } 443 }
560 } 444 }
561} 445}
562 446
563/* Access functions for valid_txrate_mask */ 447static void ath_rc_init_valid_txmask(struct ath_rate_priv *ath_rc_priv)
564
565static void ath_rc_init_valid_txmask(struct ath_tx_ratectrl *rate_ctrl)
566{ 448{
567 u8 i; 449 u8 i;
568 450
569 for (i = 0; i < rate_ctrl->rate_table_size; i++) 451 for (i = 0; i < ath_rc_priv->rate_table_size; i++)
570 rate_ctrl->valid_rate_index[i] = FALSE; 452 ath_rc_priv->valid_rate_index[i] = 0;
571} 453}
572 454
573static inline void ath_rc_set_valid_txmask(struct ath_tx_ratectrl *rate_ctrl, 455static inline void ath_rc_set_valid_txmask(struct ath_rate_priv *ath_rc_priv,
574 u8 index, int valid_tx_rate) 456 u8 index, int valid_tx_rate)
575{ 457{
576 ASSERT(index <= rate_ctrl->rate_table_size); 458 ASSERT(index <= ath_rc_priv->rate_table_size);
577 rate_ctrl->valid_rate_index[index] = valid_tx_rate ? TRUE : FALSE; 459 ath_rc_priv->valid_rate_index[index] = valid_tx_rate ? 1 : 0;
578} 460}
579 461
580static inline int ath_rc_isvalid_txmask(struct ath_tx_ratectrl *rate_ctrl, 462static inline int ath_rc_isvalid_txmask(struct ath_rate_priv *ath_rc_priv,
581 u8 index) 463 u8 index)
582{ 464{
583 ASSERT(index <= rate_ctrl->rate_table_size); 465 ASSERT(index <= ath_rc_priv->rate_table_size);
584 return rate_ctrl->valid_rate_index[index]; 466 return ath_rc_priv->valid_rate_index[index];
585} 467}
586 468
587/* Iterators for valid_txrate_mask */ 469static inline int ath_rc_get_nextvalid_txrate(struct ath_rate_table *rate_table,
588static inline int 470 struct ath_rate_priv *ath_rc_priv,
589ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table, 471 u8 cur_valid_txrate,
590 struct ath_tx_ratectrl *rate_ctrl, 472 u8 *next_idx)
591 u8 cur_valid_txrate,
592 u8 *next_idx)
593{ 473{
594 u8 i; 474 u8 i;
595 475
596 for (i = 0; i < rate_ctrl->max_valid_rate - 1; i++) { 476 for (i = 0; i < ath_rc_priv->max_valid_rate - 1; i++) {
597 if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) { 477 if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
598 *next_idx = rate_ctrl->valid_rate_index[i+1]; 478 *next_idx = ath_rc_priv->valid_rate_index[i+1];
599 return TRUE; 479 return 1;
600 } 480 }
601 } 481 }
602 482
603 /* No more valid rates */ 483 /* No more valid rates */
604 *next_idx = 0; 484 *next_idx = 0;
605 return FALSE; 485
486 return 0;
606} 487}
607 488
608/* Return true only for single stream */ 489/* Return true only for single stream */
@@ -610,83 +491,72 @@ ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
610static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw) 491static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
611{ 492{
612 if (WLAN_RC_PHY_HT(phy) & !(capflag & WLAN_RC_HT_FLAG)) 493 if (WLAN_RC_PHY_HT(phy) & !(capflag & WLAN_RC_HT_FLAG))
613 return FALSE; 494 return 0;
614 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG)) 495 if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
615 return FALSE; 496 return 0;
616 if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG)) 497 if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
617 return FALSE; 498 return 0;
618 if (!ignore_cw && WLAN_RC_PHY_HT(phy)) 499 if (!ignore_cw && WLAN_RC_PHY_HT(phy))
619 if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG)) 500 if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG))
620 return FALSE; 501 return 0;
621 if (!WLAN_RC_PHY_40(phy) && (capflag & WLAN_RC_40_FLAG)) 502 if (!WLAN_RC_PHY_40(phy) && (capflag & WLAN_RC_40_FLAG))
622 return FALSE; 503 return 0;
623 return TRUE; 504 return 1;
624} 505}
625 506
626static inline int 507static inline int
627ath_rc_get_nextlowervalid_txrate(const struct ath_rate_table *rate_table, 508ath_rc_get_nextlowervalid_txrate(struct ath_rate_table *rate_table,
628 struct ath_tx_ratectrl *rate_ctrl, 509 struct ath_rate_priv *ath_rc_priv,
629 u8 cur_valid_txrate, u8 *next_idx) 510 u8 cur_valid_txrate, u8 *next_idx)
630{ 511{
631 int8_t i; 512 int8_t i;
632 513
633 for (i = 1; i < rate_ctrl->max_valid_rate ; i++) { 514 for (i = 1; i < ath_rc_priv->max_valid_rate ; i++) {
634 if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) { 515 if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
635 *next_idx = rate_ctrl->valid_rate_index[i-1]; 516 *next_idx = ath_rc_priv->valid_rate_index[i-1];
636 return TRUE; 517 return 1;
637 } 518 }
638 } 519 }
639 return FALSE; 520
521 return 0;
640} 522}
641 523
642/* 524static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
643 * Initialize the Valid Rate Index from valid entries in Rate Table 525 struct ath_rate_table *rate_table,
644 */ 526 u32 capflag)
645static u8
646ath_rc_sib_init_validrates(struct ath_rate_node *ath_rc_priv,
647 const struct ath_rate_table *rate_table,
648 u32 capflag)
649{ 527{
650 struct ath_tx_ratectrl *rate_ctrl;
651 u8 i, hi = 0; 528 u8 i, hi = 0;
652 u32 valid; 529 u32 valid;
653 530
654 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
655 for (i = 0; i < rate_table->rate_cnt; i++) { 531 for (i = 0; i < rate_table->rate_cnt; i++) {
656 valid = (ath_rc_priv->single_stream ? 532 valid = (ath_rc_priv->single_stream ?
657 rate_table->info[i].valid_single_stream : 533 rate_table->info[i].valid_single_stream :
658 rate_table->info[i].valid); 534 rate_table->info[i].valid);
659 if (valid == TRUE) { 535 if (valid == 1) {
660 u32 phy = rate_table->info[i].phy; 536 u32 phy = rate_table->info[i].phy;
661 u8 valid_rate_count = 0; 537 u8 valid_rate_count = 0;
662 538
663 if (!ath_rc_valid_phyrate(phy, capflag, FALSE)) 539 if (!ath_rc_valid_phyrate(phy, capflag, 0))
664 continue; 540 continue;
665 541
666 valid_rate_count = rate_ctrl->valid_phy_ratecnt[phy]; 542 valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
667 543
668 rate_ctrl->valid_phy_rateidx[phy][valid_rate_count] = i; 544 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
669 rate_ctrl->valid_phy_ratecnt[phy] += 1; 545 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
670 ath_rc_set_valid_txmask(rate_ctrl, i, TRUE); 546 ath_rc_set_valid_txmask(ath_rc_priv, i, 1);
671 hi = A_MAX(hi, i); 547 hi = A_MAX(hi, i);
672 } 548 }
673 } 549 }
550
674 return hi; 551 return hi;
675} 552}
676 553
677/* 554static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
678 * Initialize the Valid Rate Index from Rate Set 555 struct ath_rate_table *rate_table,
679 */ 556 struct ath_rateset *rateset,
680static u8 557 u32 capflag)
681ath_rc_sib_setvalid_rates(struct ath_rate_node *ath_rc_priv,
682 const struct ath_rate_table *rate_table,
683 struct ath_rateset *rateset,
684 u32 capflag)
685{ 558{
686 /* XXX: Clean me up and make identation friendly */
687 u8 i, j, hi = 0; 559 u8 i, j, hi = 0;
688 struct ath_tx_ratectrl *rate_ctrl =
689 (struct ath_tx_ratectrl *)(ath_rc_priv);
690 560
691 /* Use intersection of working rates and valid rates */ 561 /* Use intersection of working rates and valid rates */
692 for (i = 0; i < rateset->rs_nrates; i++) { 562 for (i = 0; i < rateset->rs_nrates; i++) {
@@ -695,196 +565,89 @@ ath_rc_sib_setvalid_rates(struct ath_rate_node *ath_rc_priv,
695 u32 valid = (ath_rc_priv->single_stream ? 565 u32 valid = (ath_rc_priv->single_stream ?
696 rate_table->info[j].valid_single_stream : 566 rate_table->info[j].valid_single_stream :
697 rate_table->info[j].valid); 567 rate_table->info[j].valid);
568 u8 rate = rateset->rs_rates[i];
569 u8 dot11rate = rate_table->info[j].dot11rate;
698 570
699 /* We allow a rate only if its valid and the 571 /* We allow a rate only if its valid and the
700 * capflag matches one of the validity 572 * capflag matches one of the validity
701 * (TRUE/TRUE_20/TRUE_40) flags */ 573 * (VALID/VALID_20/VALID_40) flags */
702
703 /* XXX: catch the negative of this branch
704 * first and then continue */
705 if (((rateset->rs_rates[i] & 0x7F) ==
706 (rate_table->info[j].dot11rate & 0x7F)) &&
707 ((valid & WLAN_RC_CAP_MODE(capflag)) ==
708 WLAN_RC_CAP_MODE(capflag)) &&
709 !WLAN_RC_PHY_HT(phy)) {
710 574
575 if (((rate & 0x7F) == (dot11rate & 0x7F)) &&
576 ((valid & WLAN_RC_CAP_MODE(capflag)) ==
577 WLAN_RC_CAP_MODE(capflag)) &&
578 !WLAN_RC_PHY_HT(phy)) {
711 u8 valid_rate_count = 0; 579 u8 valid_rate_count = 0;
712 580
713 if (!ath_rc_valid_phyrate(phy, capflag, FALSE)) 581 if (!ath_rc_valid_phyrate(phy, capflag, 0))
714 continue; 582 continue;
715 583
716 valid_rate_count = 584 valid_rate_count =
717 rate_ctrl->valid_phy_ratecnt[phy]; 585 ath_rc_priv->valid_phy_ratecnt[phy];
718 586
719 rate_ctrl->valid_phy_rateidx[phy] 587 ath_rc_priv->valid_phy_rateidx[phy]
720 [valid_rate_count] = j; 588 [valid_rate_count] = j;
721 rate_ctrl->valid_phy_ratecnt[phy] += 1; 589 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
722 ath_rc_set_valid_txmask(rate_ctrl, j, TRUE); 590 ath_rc_set_valid_txmask(ath_rc_priv, j, 1);
723 hi = A_MAX(hi, j); 591 hi = A_MAX(hi, j);
724 } 592 }
725 } 593 }
726 } 594 }
595
727 return hi; 596 return hi;
728} 597}
729 598
730static u8 599static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
731ath_rc_sib_setvalid_htrates(struct ath_rate_node *ath_rc_priv, 600 struct ath_rate_table *rate_table,
732 const struct ath_rate_table *rate_table, 601 u8 *mcs_set, u32 capflag)
733 u8 *mcs_set, u32 capflag)
734{ 602{
603 struct ath_rateset *rateset = (struct ath_rateset *)mcs_set;
604
735 u8 i, j, hi = 0; 605 u8 i, j, hi = 0;
736 struct ath_tx_ratectrl *rate_ctrl =
737 (struct ath_tx_ratectrl *)(ath_rc_priv);
738 606
739 /* Use intersection of working rates and valid rates */ 607 /* Use intersection of working rates and valid rates */
740 for (i = 0; i < ((struct ath_rateset *)mcs_set)->rs_nrates; i++) { 608 for (i = 0; i < rateset->rs_nrates; i++) {
741 for (j = 0; j < rate_table->rate_cnt; j++) { 609 for (j = 0; j < rate_table->rate_cnt; j++) {
742 u32 phy = rate_table->info[j].phy; 610 u32 phy = rate_table->info[j].phy;
743 u32 valid = (ath_rc_priv->single_stream ? 611 u32 valid = (ath_rc_priv->single_stream ?
744 rate_table->info[j].valid_single_stream : 612 rate_table->info[j].valid_single_stream :
745 rate_table->info[j].valid); 613 rate_table->info[j].valid);
614 u8 rate = rateset->rs_rates[i];
615 u8 dot11rate = rate_table->info[j].dot11rate;
746 616
747 if (((((struct ath_rateset *) 617 if (((rate & 0x7F) != (dot11rate & 0x7F)) ||
748 mcs_set)->rs_rates[i] & 0x7F) !=
749 (rate_table->info[j].dot11rate & 0x7F)) ||
750 !WLAN_RC_PHY_HT(phy) || 618 !WLAN_RC_PHY_HT(phy) ||
751 !WLAN_RC_PHY_HT_VALID(valid, capflag)) 619 !WLAN_RC_PHY_HT_VALID(valid, capflag))
752 continue; 620 continue;
753 621
754 if (!ath_rc_valid_phyrate(phy, capflag, FALSE)) 622 if (!ath_rc_valid_phyrate(phy, capflag, 0))
755 continue; 623 continue;
756 624
757 rate_ctrl->valid_phy_rateidx[phy] 625 ath_rc_priv->valid_phy_rateidx[phy]
758 [rate_ctrl->valid_phy_ratecnt[phy]] = j; 626 [ath_rc_priv->valid_phy_ratecnt[phy]] = j;
759 rate_ctrl->valid_phy_ratecnt[phy] += 1; 627 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
760 ath_rc_set_valid_txmask(rate_ctrl, j, TRUE); 628 ath_rc_set_valid_txmask(ath_rc_priv, j, 1);
761 hi = A_MAX(hi, j); 629 hi = A_MAX(hi, j);
762 } 630 }
763 } 631 }
764 return hi;
765}
766
767/*
768 * Attach to a device instance. Setup the public definition
769 * of how much per-node space we need and setup the private
770 * phy tables that have rate control parameters.
771 */
772struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah)
773{
774 struct ath_rate_softc *asc;
775
776 /* we are only in user context so we can sleep for memory */
777 asc = kzalloc(sizeof(struct ath_rate_softc), GFP_KERNEL);
778 if (asc == NULL)
779 return NULL;
780
781 ar5416_attach_ratetables(asc);
782
783 /* Save Maximum TX Trigger Level (used for 11n) */
784 tx_triglevel_max = ah->ah_caps.tx_triglevel_max;
785 /* return alias for ath_rate_softc * */
786 return asc;
787}
788
789static struct ath_rate_node *ath_rate_node_alloc(struct ath_vap *avp,
790 struct ath_rate_softc *rsc,
791 gfp_t gfp)
792{
793 struct ath_rate_node *anode;
794
795 anode = kzalloc(sizeof(struct ath_rate_node), gfp);
796 if (anode == NULL)
797 return NULL;
798
799 anode->avp = avp;
800 anode->asc = rsc;
801 avp->rc_node = anode;
802
803 return anode;
804}
805
806static void ath_rate_node_free(struct ath_rate_node *anode)
807{
808 if (anode != NULL)
809 kfree(anode);
810}
811
812void ath_rate_detach(struct ath_rate_softc *asc)
813{
814 if (asc != NULL)
815 kfree(asc);
816}
817
818u8 ath_rate_findrateix(struct ath_softc *sc,
819 u8 dot11rate)
820{
821 const struct ath_rate_table *ratetable;
822 struct ath_rate_softc *rsc = sc->sc_rc;
823 int i;
824
825 ratetable = rsc->hw_rate_table[sc->sc_curmode];
826
827 if (WARN_ON(!ratetable))
828 return 0;
829
830 for (i = 0; i < ratetable->rate_cnt; i++) {
831 if ((ratetable->info[i].dot11rate & 0x7f) == (dot11rate & 0x7f))
832 return i;
833 }
834 632
835 return 0; 633 return hi;
836}
837
838/*
839 * Update rate-control state on a device state change. When
840 * operating as a station this includes associate/reassociate
841 * with an AP. Otherwise this gets called, for example, when
842 * the we transition to run state when operating as an AP.
843 */
844void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp)
845{
846 struct ath_rate_softc *asc = sc->sc_rc;
847
848 /* For half and quarter rate channles use different
849 * rate tables
850 */
851 if (sc->sc_ah->ah_curchan->channelFlags & CHANNEL_HALF)
852 ar5416_sethalf_ratetable(asc);
853 else if (sc->sc_ah->ah_curchan->channelFlags & CHANNEL_QUARTER)
854 ar5416_setquarter_ratetable(asc);
855 else /* full rate */
856 ar5416_setfull_ratetable(asc);
857
858 if (avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) {
859 asc->fixedrix =
860 sc->sc_rixmap[avp->av_config.av_fixed_rateset & 0xff];
861 /* NB: check the fixed rate exists */
862 if (asc->fixedrix == 0xff)
863 asc->fixedrix = IEEE80211_FIXED_RATE_NONE;
864 } else {
865 asc->fixedrix = IEEE80211_FIXED_RATE_NONE;
866 }
867} 634}
868 635
869static u8 ath_rc_ratefind_ht(struct ath_softc *sc, 636static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
870 struct ath_rate_node *ath_rc_priv, 637 struct ath_rate_priv *ath_rc_priv,
871 const struct ath_rate_table *rate_table, 638 struct ath_rate_table *rate_table,
872 int probe_allowed, int *is_probing, 639 int probe_allowed, int *is_probing,
873 int is_retry) 640 int is_retry)
874{ 641{
875 u32 dt, best_thruput, this_thruput, now_msec; 642 u32 dt, best_thruput, this_thruput, now_msec;
876 u8 rate, next_rate, best_rate, maxindex, minindex; 643 u8 rate, next_rate, best_rate, maxindex, minindex;
877 int8_t rssi_last, rssi_reduce = 0, index = 0; 644 int8_t rssi_last, rssi_reduce = 0, index = 0;
878 struct ath_tx_ratectrl *rate_ctrl = NULL;
879
880 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv ?
881 (ath_rc_priv) : NULL);
882 645
883 *is_probing = FALSE; 646 *is_probing = 0;
884 647
885 rssi_last = median(rate_ctrl->rssi_last, 648 rssi_last = median(ath_rc_priv->rssi_last,
886 rate_ctrl->rssi_last_prev, 649 ath_rc_priv->rssi_last_prev,
887 rate_ctrl->rssi_last_prev2); 650 ath_rc_priv->rssi_last_prev2);
888 651
889 /* 652 /*
890 * Age (reduce) last ack rssi based on how old it is. 653 * Age (reduce) last ack rssi based on how old it is.
@@ -896,7 +659,7 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
896 */ 659 */
897 660
898 now_msec = jiffies_to_msecs(jiffies); 661 now_msec = jiffies_to_msecs(jiffies);
899 dt = now_msec - rate_ctrl->rssi_time; 662 dt = now_msec - ath_rc_priv->rssi_time;
900 663
901 if (dt >= 185) 664 if (dt >= 185)
902 rssi_reduce = 10; 665 rssi_reduce = 10;
@@ -915,7 +678,7 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
915 */ 678 */
916 679
917 best_thruput = 0; 680 best_thruput = 0;
918 maxindex = rate_ctrl->max_valid_rate-1; 681 maxindex = ath_rc_priv->max_valid_rate-1;
919 682
920 minindex = 0; 683 minindex = 0;
921 best_rate = minindex; 684 best_rate = minindex;
@@ -927,8 +690,8 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
927 for (index = maxindex; index >= minindex ; index--) { 690 for (index = maxindex; index >= minindex ; index--) {
928 u8 per_thres; 691 u8 per_thres;
929 692
930 rate = rate_ctrl->valid_rate_index[index]; 693 rate = ath_rc_priv->valid_rate_index[index];
931 if (rate > rate_ctrl->rate_max_phy) 694 if (rate > ath_rc_priv->rate_max_phy)
932 continue; 695 continue;
933 696
934 /* 697 /*
@@ -942,7 +705,7 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
942 * 10-15 and we would be worse off then staying 705 * 10-15 and we would be worse off then staying
943 * at the current rate. 706 * at the current rate.
944 */ 707 */
945 per_thres = rate_ctrl->state[rate].per; 708 per_thres = ath_rc_priv->state[rate].per;
946 if (per_thres < 12) 709 if (per_thres < 12)
947 per_thres = 12; 710 per_thres = 12;
948 711
@@ -961,41 +724,35 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
961 * of max retries, use the min rate for the next retry 724 * of max retries, use the min rate for the next retry
962 */ 725 */
963 if (is_retry) 726 if (is_retry)
964 rate = rate_ctrl->valid_rate_index[minindex]; 727 rate = ath_rc_priv->valid_rate_index[minindex];
965 728
966 rate_ctrl->rssi_last_lookup = rssi_last; 729 ath_rc_priv->rssi_last_lookup = rssi_last;
967 730
968 /* 731 /*
969 * Must check the actual rate (ratekbps) to account for 732 * Must check the actual rate (ratekbps) to account for
970 * non-monoticity of 11g's rate table 733 * non-monoticity of 11g's rate table
971 */ 734 */
972 735
973 if (rate >= rate_ctrl->rate_max_phy && probe_allowed) { 736 if (rate >= ath_rc_priv->rate_max_phy && probe_allowed) {
974 rate = rate_ctrl->rate_max_phy; 737 rate = ath_rc_priv->rate_max_phy;
975 738
976 /* Probe the next allowed phy state */ 739 /* Probe the next allowed phy state */
977 /* FIXME:XXXX Check to make sure ratMax is checked properly */ 740 /* FIXME:XXXX Check to make sure ratMax is checked properly */
978 if (ath_rc_get_nextvalid_txrate(rate_table, 741 if (ath_rc_get_nextvalid_txrate(rate_table,
979 rate_ctrl, rate, &next_rate) && 742 ath_rc_priv, rate, &next_rate) &&
980 (now_msec - rate_ctrl->probe_time > 743 (now_msec - ath_rc_priv->probe_time >
981 rate_table->probe_interval) && 744 rate_table->probe_interval) &&
982 (rate_ctrl->hw_maxretry_pktcnt >= 1)) { 745 (ath_rc_priv->hw_maxretry_pktcnt >= 1)) {
983 rate = next_rate; 746 rate = next_rate;
984 rate_ctrl->probe_rate = rate; 747 ath_rc_priv->probe_rate = rate;
985 rate_ctrl->probe_time = now_msec; 748 ath_rc_priv->probe_time = now_msec;
986 rate_ctrl->hw_maxretry_pktcnt = 0; 749 ath_rc_priv->hw_maxretry_pktcnt = 0;
987 *is_probing = TRUE; 750 *is_probing = 1;
988 } 751 }
989 } 752 }
990 753
991 /* 754 if (rate > (ath_rc_priv->rate_table_size - 1))
992 * Make sure rate is not higher than the allowed maximum. 755 rate = ath_rc_priv->rate_table_size - 1;
993 * We should also enforce the min, but I suspect the min is
994 * normally 1 rather than 0 because of the rate 9 vs 6 issue
995 * in the old code.
996 */
997 if (rate > (rate_ctrl->rate_table_size - 1))
998 rate = rate_ctrl->rate_table_size - 1;
999 756
1000 ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) || 757 ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) ||
1001 (rate_table->info[rate].valid_single_stream && 758 (rate_table->info[rate].valid_single_stream &&
@@ -1004,40 +761,36 @@ static u8 ath_rc_ratefind_ht(struct ath_softc *sc,
1004 return rate; 761 return rate;
1005} 762}
1006 763
1007static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table , 764static void ath_rc_rate_set_series(struct ath_rate_table *rate_table ,
1008 struct ath_rc_series *series, 765 struct ieee80211_tx_rate *rate,
1009 u8 tries, 766 u8 tries, u8 rix, int rtsctsenable)
1010 u8 rix,
1011 int rtsctsenable)
1012{ 767{
1013 series->tries = tries; 768 rate->count = tries;
1014 series->flags = (rtsctsenable ? ATH_RC_RTSCTS_FLAG : 0) | 769 rate->idx = rix;
1015 (WLAN_RC_PHY_DS(rate_table->info[rix].phy) ? 770
1016 ATH_RC_DS_FLAG : 0) | 771 if (rtsctsenable)
1017 (WLAN_RC_PHY_40(rate_table->info[rix].phy) ? 772 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
1018 ATH_RC_CW40_FLAG : 0) | 773 if (WLAN_RC_PHY_40(rate_table->info[rix].phy))
1019 (WLAN_RC_PHY_SGI(rate_table->info[rix].phy) ? 774 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1020 ATH_RC_SGI_FLAG : 0); 775 if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
1021 776 rate->flags |= IEEE80211_TX_RC_SHORT_GI;
1022 series->rix = rate_table->info[rix].base_index; 777 if (WLAN_RC_PHY_HT(rate_table->info[rix].phy))
1023 series->max_4ms_framelen = rate_table->info[rix].max_4ms_framelen; 778 rate->flags |= IEEE80211_TX_RC_MCS;
1024} 779}
1025 780
1026static u8 ath_rc_rate_getidx(struct ath_softc *sc, 781static u8 ath_rc_rate_getidx(struct ath_softc *sc,
1027 struct ath_rate_node *ath_rc_priv, 782 struct ath_rate_priv *ath_rc_priv,
1028 const struct ath_rate_table *rate_table, 783 struct ath_rate_table *rate_table,
1029 u8 rix, u16 stepdown, 784 u8 rix, u16 stepdown,
1030 u16 min_rate) 785 u16 min_rate)
1031{ 786{
1032 u32 j; 787 u32 j;
1033 u8 nextindex; 788 u8 nextindex;
1034 struct ath_tx_ratectrl *rate_ctrl =
1035 (struct ath_tx_ratectrl *)(ath_rc_priv);
1036 789
1037 if (min_rate) { 790 if (min_rate) {
1038 for (j = RATE_TABLE_SIZE; j > 0; j--) { 791 for (j = RATE_TABLE_SIZE; j > 0; j--) {
1039 if (ath_rc_get_nextlowervalid_txrate(rate_table, 792 if (ath_rc_get_nextlowervalid_txrate(rate_table,
1040 rate_ctrl, rix, &nextindex)) 793 ath_rc_priv, rix, &nextindex))
1041 rix = nextindex; 794 rix = nextindex;
1042 else 795 else
1043 break; 796 break;
@@ -1045,7 +798,7 @@ static u8 ath_rc_rate_getidx(struct ath_softc *sc,
1045 } else { 798 } else {
1046 for (j = stepdown; j > 0; j--) { 799 for (j = stepdown; j > 0; j--) {
1047 if (ath_rc_get_nextlowervalid_txrate(rate_table, 800 if (ath_rc_get_nextlowervalid_txrate(rate_table,
1048 rate_ctrl, rix, &nextindex)) 801 ath_rc_priv, rix, &nextindex))
1049 rix = nextindex; 802 rix = nextindex;
1050 else 803 else
1051 break; 804 break;
@@ -1055,41 +808,39 @@ static u8 ath_rc_rate_getidx(struct ath_softc *sc,
1055} 808}
1056 809
1057static void ath_rc_ratefind(struct ath_softc *sc, 810static void ath_rc_ratefind(struct ath_softc *sc,
1058 struct ath_rate_node *ath_rc_priv, 811 struct ath_rate_priv *ath_rc_priv,
1059 int num_tries, int num_rates, unsigned int rcflag, 812 int num_tries, int num_rates,
1060 struct ath_rc_series series[], int *is_probe, 813 struct ieee80211_tx_info *tx_info, int *is_probe,
1061 int is_retry) 814 int is_retry)
1062{ 815{
1063 u8 try_per_rate = 0, i = 0, rix, nrix; 816 u8 try_per_rate = 0, i = 0, rix, nrix;
1064 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1065 struct ath_rate_table *rate_table; 817 struct ath_rate_table *rate_table;
818 struct ieee80211_tx_rate *rates = tx_info->control.rates;
1066 819
1067 rate_table = 820 rate_table = sc->cur_rate_table;
1068 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode]; 821 rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table, 1,
1069 rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table,
1070 (rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0,
1071 is_probe, is_retry); 822 is_probe, is_retry);
1072 nrix = rix; 823 nrix = rix;
1073 824
1074 if ((rcflag & ATH_RC_PROBE_ALLOWED) && (*is_probe)) { 825 if (*is_probe) {
1075 /* set one try for probe rates. For the 826 /* set one try for probe rates. For the
1076 * probes don't enable rts */ 827 * probes don't enable rts */
1077 ath_rc_rate_set_series(rate_table, 828 ath_rc_rate_set_series(rate_table,
1078 &series[i++], 1, nrix, FALSE); 829 &rates[i++], 1, nrix, 0);
1079 830
1080 try_per_rate = (num_tries/num_rates); 831 try_per_rate = (num_tries/num_rates);
1081 /* Get the next tried/allowed rate. No RTS for the next series 832 /* Get the next tried/allowed rate. No RTS for the next series
1082 * after the probe rate 833 * after the probe rate
1083 */ 834 */
1084 nrix = ath_rc_rate_getidx(sc, 835 nrix = ath_rc_rate_getidx(sc,
1085 ath_rc_priv, rate_table, nrix, 1, FALSE); 836 ath_rc_priv, rate_table, nrix, 1, 0);
1086 ath_rc_rate_set_series(rate_table, 837 ath_rc_rate_set_series(rate_table,
1087 &series[i++], try_per_rate, nrix, 0); 838 &rates[i++], try_per_rate, nrix, 0);
1088 } else { 839 } else {
1089 try_per_rate = (num_tries/num_rates); 840 try_per_rate = (num_tries/num_rates);
1090 /* Set the choosen rate. No RTS for first series entry. */ 841 /* Set the choosen rate. No RTS for first series entry. */
1091 ath_rc_rate_set_series(rate_table, 842 ath_rc_rate_set_series(rate_table,
1092 &series[i++], try_per_rate, nrix, FALSE); 843 &rates[i++], try_per_rate, nrix, 0);
1093 } 844 }
1094 845
1095 /* Fill in the other rates for multirate retry */ 846 /* Fill in the other rates for multirate retry */
@@ -1099,14 +850,13 @@ static void ath_rc_ratefind(struct ath_softc *sc,
1099 850
1100 try_num = ((i + 1) == num_rates) ? 851 try_num = ((i + 1) == num_rates) ?
1101 num_tries - (try_per_rate * i) : try_per_rate ; 852 num_tries - (try_per_rate * i) : try_per_rate ;
1102 min_rate = (((i + 1) == num_rates) && 853 min_rate = (((i + 1) == num_rates) && 0);
1103 (rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0;
1104 854
1105 nrix = ath_rc_rate_getidx(sc, ath_rc_priv, 855 nrix = ath_rc_rate_getidx(sc, ath_rc_priv,
1106 rate_table, nrix, 1, min_rate); 856 rate_table, nrix, 1, min_rate);
1107 /* All other rates in the series have RTS enabled */ 857 /* All other rates in the series have RTS enabled */
1108 ath_rc_rate_set_series(rate_table, 858 ath_rc_rate_set_series(rate_table,
1109 &series[i], try_num, nrix, TRUE); 859 &rates[i], try_num, nrix, 1);
1110 } 860 }
1111 861
1112 /* 862 /*
@@ -1124,115 +874,29 @@ static void ath_rc_ratefind(struct ath_softc *sc,
1124 * So, set fourth rate in series to be same as third one for 874 * So, set fourth rate in series to be same as third one for
1125 * above conditions. 875 * above conditions.
1126 */ 876 */
1127 if ((sc->sc_curmode == ATH9K_MODE_11NG_HT20) || 877 if ((sc->hw->conf.channel->band == IEEE80211_BAND_2GHZ) &&
1128 (sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) || 878 (sc->hw->conf.ht.enabled)) {
1129 (sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) { 879 u8 dot11rate = rate_table->info[rix].dot11rate;
1130 u8 dot11rate = rate_table->info[rix].dot11rate;
1131 u8 phy = rate_table->info[rix].phy; 880 u8 phy = rate_table->info[rix].phy;
1132 if (i == 4 && 881 if (i == 4 &&
1133 ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) || 882 ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
1134 (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) { 883 (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
1135 series[3].rix = series[2].rix; 884 rates[3].idx = rates[2].idx;
1136 series[3].flags = series[2].flags; 885 rates[3].flags = rates[2].flags;
1137 series[3].max_4ms_framelen = series[2].max_4ms_framelen;
1138 }
1139 }
1140}
1141
1142/*
1143 * Return the Tx rate series.
1144 */
1145static void ath_rate_findrate(struct ath_softc *sc,
1146 struct ath_rate_node *ath_rc_priv,
1147 int num_tries,
1148 int num_rates,
1149 unsigned int rcflag,
1150 struct ath_rc_series series[],
1151 int *is_probe,
1152 int is_retry)
1153{
1154 struct ath_vap *avp = ath_rc_priv->avp;
1155
1156 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
1157
1158 if (!num_rates || !num_tries)
1159 return;
1160
1161 if (avp->av_config.av_fixed_rateset == IEEE80211_FIXED_RATE_NONE) {
1162 ath_rc_ratefind(sc, ath_rc_priv, num_tries, num_rates,
1163 rcflag, series, is_probe, is_retry);
1164 } else {
1165 /* Fixed rate */
1166 int idx;
1167 u8 flags;
1168 u32 rix;
1169 struct ath_rate_softc *asc = ath_rc_priv->asc;
1170 struct ath_rate_table *rate_table;
1171
1172 rate_table = (struct ath_rate_table *)
1173 asc->hw_rate_table[sc->sc_curmode];
1174
1175 for (idx = 0; idx < 4; idx++) {
1176 unsigned int mcs;
1177 u8 series_rix = 0;
1178
1179 series[idx].tries = IEEE80211_RATE_IDX_ENTRY(
1180 avp->av_config.av_fixed_retryset, idx);
1181
1182 mcs = IEEE80211_RATE_IDX_ENTRY(
1183 avp->av_config.av_fixed_rateset, idx);
1184
1185 if (idx == 3 && (mcs & 0xf0) == 0x70)
1186 mcs = (mcs & ~0xf0)|0x80;
1187
1188 if (!(mcs & 0x80))
1189 flags = 0;
1190 else
1191 flags = ((ath_rc_priv->ht_cap &
1192 WLAN_RC_DS_FLAG) ?
1193 ATH_RC_DS_FLAG : 0) |
1194 ((ath_rc_priv->ht_cap &
1195 WLAN_RC_40_FLAG) ?
1196 ATH_RC_CW40_FLAG : 0) |
1197 ((ath_rc_priv->ht_cap &
1198 WLAN_RC_SGI_FLAG) ?
1199 ((ath_rc_priv->ht_cap &
1200 WLAN_RC_40_FLAG) ?
1201 ATH_RC_SGI_FLAG : 0) : 0);
1202
1203 series[idx].rix = sc->sc_rixmap[mcs];
1204 series_rix = series[idx].rix;
1205
1206 /* XXX: Give me some cleanup love */
1207 if ((flags & ATH_RC_CW40_FLAG) &&
1208 (flags & ATH_RC_SGI_FLAG))
1209 rix = rate_table->info[series_rix].ht_index;
1210 else if (flags & ATH_RC_SGI_FLAG)
1211 rix = rate_table->info[series_rix].sgi_index;
1212 else if (flags & ATH_RC_CW40_FLAG)
1213 rix = rate_table->info[series_rix].cw40index;
1214 else
1215 rix = rate_table->info[series_rix].base_index;
1216 series[idx].max_4ms_framelen =
1217 rate_table->info[rix].max_4ms_framelen;
1218 series[idx].flags = flags;
1219 } 886 }
1220 } 887 }
1221} 888}
1222 889
1223static void ath_rc_update_ht(struct ath_softc *sc, 890static bool ath_rc_update_per(struct ath_softc *sc,
1224 struct ath_rate_node *ath_rc_priv, 891 struct ath_rate_table *rate_table,
1225 struct ath_tx_info_priv *info_priv, 892 struct ath_rate_priv *ath_rc_priv,
1226 int tx_rate, int xretries, int retries) 893 struct ath_tx_info_priv *tx_info_priv,
894 int tx_rate, int xretries, int retries,
895 u32 now_msec)
1227{ 896{
1228 struct ath_tx_ratectrl *rate_ctrl; 897 bool state_change = false;
1229 u32 now_msec = jiffies_to_msecs(jiffies); 898 int count;
1230 int state_change = FALSE, rate, count;
1231 u8 last_per; 899 u8 last_per;
1232 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc;
1233 struct ath_rate_table *rate_table =
1234 (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode];
1235
1236 static u32 nretry_to_per_lookup[10] = { 900 static u32 nretry_to_per_lookup[10] = {
1237 100 * 0 / 1, 901 100 * 0 / 1,
1238 100 * 1 / 4, 902 100 * 1 / 4,
@@ -1246,56 +910,35 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1246 100 * 9 / 10 910 100 * 9 / 10
1247 }; 911 };
1248 912
1249 if (!ath_rc_priv) 913 last_per = ath_rc_priv->state[tx_rate].per;
1250 return;
1251
1252 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1253
1254 ASSERT(tx_rate >= 0);
1255 if (tx_rate < 0)
1256 return;
1257
1258 /* To compensate for some imbalance between ctrl and ext. channel */
1259
1260 if (WLAN_RC_PHY_40(rate_table->info[tx_rate].phy))
1261 info_priv->tx.ts_rssi =
1262 info_priv->tx.ts_rssi < 3 ? 0 :
1263 info_priv->tx.ts_rssi - 3;
1264
1265 last_per = rate_ctrl->state[tx_rate].per;
1266 914
1267 if (xretries) { 915 if (xretries) {
1268 /* Update the PER. */
1269 if (xretries == 1) { 916 if (xretries == 1) {
1270 rate_ctrl->state[tx_rate].per += 30; 917 ath_rc_priv->state[tx_rate].per += 30;
1271 if (rate_ctrl->state[tx_rate].per > 100) 918 if (ath_rc_priv->state[tx_rate].per > 100)
1272 rate_ctrl->state[tx_rate].per = 100; 919 ath_rc_priv->state[tx_rate].per = 100;
1273 } else { 920 } else {
1274 /* xretries == 2 */ 921 /* xretries == 2 */
1275 count = sizeof(nretry_to_per_lookup) / 922 count = ARRAY_SIZE(nretry_to_per_lookup);
1276 sizeof(nretry_to_per_lookup[0]);
1277 if (retries >= count) 923 if (retries >= count)
1278 retries = count - 1; 924 retries = count - 1;
925
1279 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */ 926 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
1280 rate_ctrl->state[tx_rate].per = 927 ath_rc_priv->state[tx_rate].per =
1281 (u8)(rate_ctrl->state[tx_rate].per - 928 (u8)(last_per - (last_per >> 3) + (100 >> 3));
1282 (rate_ctrl->state[tx_rate].per >> 3) +
1283 ((100) >> 3));
1284 } 929 }
1285 930
1286 /* xretries == 1 or 2 */ 931 /* xretries == 1 or 2 */
1287 932
1288 if (rate_ctrl->probe_rate == tx_rate) 933 if (ath_rc_priv->probe_rate == tx_rate)
1289 rate_ctrl->probe_rate = 0; 934 ath_rc_priv->probe_rate = 0;
1290 935
1291 } else { /* xretries == 0 */ 936 } else { /* xretries == 0 */
1292 /* Update the PER. */ 937 count = ARRAY_SIZE(nretry_to_per_lookup);
1293 /* Make sure it doesn't index out of array's bounds. */
1294 count = sizeof(nretry_to_per_lookup) /
1295 sizeof(nretry_to_per_lookup[0]);
1296 if (retries >= count) 938 if (retries >= count)
1297 retries = count - 1; 939 retries = count - 1;
1298 if (info_priv->n_bad_frames) { 940
941 if (tx_info_priv->n_bad_frames) {
1299 /* new_PER = 7/8*old_PER + 1/8*(currentPER) 942 /* new_PER = 7/8*old_PER + 1/8*(currentPER)
1300 * Assuming that n_frames is not 0. The current PER 943 * Assuming that n_frames is not 0. The current PER
1301 * from the retries is 100 * retries / (retries+1), 944 * from the retries is 100 * retries / (retries+1),
@@ -1308,37 +951,35 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1308 * the above PER. The expression below is a 951 * the above PER. The expression below is a
1309 * simplified version of the sum of these two terms. 952 * simplified version of the sum of these two terms.
1310 */ 953 */
1311 if (info_priv->n_frames > 0) 954 if (tx_info_priv->n_frames > 0) {
1312 rate_ctrl->state[tx_rate].per 955 int n_frames, n_bad_frames;
1313 = (u8) 956 u8 cur_per, new_per;
1314 (rate_ctrl->state[tx_rate].per - 957
1315 (rate_ctrl->state[tx_rate].per >> 3) + 958 n_bad_frames = retries * tx_info_priv->n_frames +
1316 ((100*(retries*info_priv->n_frames + 959 tx_info_priv->n_bad_frames;
1317 info_priv->n_bad_frames) / 960 n_frames = tx_info_priv->n_frames * (retries + 1);
1318 (info_priv->n_frames * 961 cur_per = (100 * n_bad_frames / n_frames) >> 3;
1319 (retries+1))) >> 3)); 962 new_per = (u8)(last_per - (last_per >> 3) + cur_per);
963 ath_rc_priv->state[tx_rate].per = new_per;
964 }
1320 } else { 965 } else {
1321 /* new_PER = 7/8*old_PER + 1/8*(currentPER) */ 966 ath_rc_priv->state[tx_rate].per =
1322 967 (u8)(last_per - (last_per >> 3) +
1323 rate_ctrl->state[tx_rate].per = (u8) 968 (nretry_to_per_lookup[retries] >> 3));
1324 (rate_ctrl->state[tx_rate].per -
1325 (rate_ctrl->state[tx_rate].per >> 3) +
1326 (nretry_to_per_lookup[retries] >> 3));
1327 } 969 }
1328 970
1329 rate_ctrl->rssi_last_prev2 = rate_ctrl->rssi_last_prev; 971 ath_rc_priv->rssi_last_prev2 = ath_rc_priv->rssi_last_prev;
1330 rate_ctrl->rssi_last_prev = rate_ctrl->rssi_last; 972 ath_rc_priv->rssi_last_prev = ath_rc_priv->rssi_last;
1331 rate_ctrl->rssi_last = info_priv->tx.ts_rssi; 973 ath_rc_priv->rssi_last = tx_info_priv->tx.ts_rssi;
1332 rate_ctrl->rssi_time = now_msec; 974 ath_rc_priv->rssi_time = now_msec;
1333 975
1334 /* 976 /*
1335 * If we got at most one retry then increase the max rate if 977 * If we got at most one retry then increase the max rate if
1336 * this was a probe. Otherwise, ignore the probe. 978 * this was a probe. Otherwise, ignore the probe.
1337 */ 979 */
1338 980 if (ath_rc_priv->probe_rate && ath_rc_priv->probe_rate == tx_rate) {
1339 if (rate_ctrl->probe_rate && rate_ctrl->probe_rate == tx_rate) { 981 if (retries > 0 || 2 * tx_info_priv->n_bad_frames >
1340 if (retries > 0 || 2 * info_priv->n_bad_frames > 982 tx_info_priv->n_frames) {
1341 info_priv->n_frames) {
1342 /* 983 /*
1343 * Since we probed with just a single attempt, 984 * Since we probed with just a single attempt,
1344 * any retries means the probe failed. Also, 985 * any retries means the probe failed. Also,
@@ -1346,17 +987,18 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1346 * the subframes were bad then also consider 987 * the subframes were bad then also consider
1347 * the probe a failure. 988 * the probe a failure.
1348 */ 989 */
1349 rate_ctrl->probe_rate = 0; 990 ath_rc_priv->probe_rate = 0;
1350 } else { 991 } else {
1351 u8 probe_rate = 0; 992 u8 probe_rate = 0;
1352 993
1353 rate_ctrl->rate_max_phy = rate_ctrl->probe_rate; 994 ath_rc_priv->rate_max_phy =
1354 probe_rate = rate_ctrl->probe_rate; 995 ath_rc_priv->probe_rate;
996 probe_rate = ath_rc_priv->probe_rate;
1355 997
1356 if (rate_ctrl->state[probe_rate].per > 30) 998 if (ath_rc_priv->state[probe_rate].per > 30)
1357 rate_ctrl->state[probe_rate].per = 20; 999 ath_rc_priv->state[probe_rate].per = 20;
1358 1000
1359 rate_ctrl->probe_rate = 0; 1001 ath_rc_priv->probe_rate = 0;
1360 1002
1361 /* 1003 /*
1362 * Since this probe succeeded, we allow the next 1004 * Since this probe succeeded, we allow the next
@@ -1364,8 +1006,8 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1364 * to move up faster if the probes are 1006 * to move up faster if the probes are
1365 * succesful. 1007 * succesful.
1366 */ 1008 */
1367 rate_ctrl->probe_time = now_msec - 1009 ath_rc_priv->probe_time =
1368 rate_table->probe_interval / 2; 1010 now_msec - rate_table->probe_interval / 2;
1369 } 1011 }
1370 } 1012 }
1371 1013
@@ -1375,74 +1017,114 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1375 * this was because of collisions or poor signal. 1017 * this was because of collisions or poor signal.
1376 * 1018 *
1377 * Later: if rssi_ack is close to 1019 * Later: if rssi_ack is close to
1378 * rate_ctrl->state[txRate].rssi_thres and we see lots 1020 * ath_rc_priv->state[txRate].rssi_thres and we see lots
1379 * of retries, then we could increase 1021 * of retries, then we could increase
1380 * rate_ctrl->state[txRate].rssi_thres. 1022 * ath_rc_priv->state[txRate].rssi_thres.
1381 */ 1023 */
1382 rate_ctrl->hw_maxretry_pktcnt = 0; 1024 ath_rc_priv->hw_maxretry_pktcnt = 0;
1383 } else { 1025 } else {
1026 int32_t rssi_ackAvg;
1027 int8_t rssi_thres;
1028 int8_t rssi_ack_vmin;
1029
1384 /* 1030 /*
1385 * It worked with no retries. First ignore bogus (small) 1031 * It worked with no retries. First ignore bogus (small)
1386 * rssi_ack values. 1032 * rssi_ack values.
1387 */ 1033 */
1388 if (tx_rate == rate_ctrl->rate_max_phy && 1034 if (tx_rate == ath_rc_priv->rate_max_phy &&
1389 rate_ctrl->hw_maxretry_pktcnt < 255) { 1035 ath_rc_priv->hw_maxretry_pktcnt < 255) {
1390 rate_ctrl->hw_maxretry_pktcnt++; 1036 ath_rc_priv->hw_maxretry_pktcnt++;
1391 } 1037 }
1392 1038
1393 if (info_priv->tx.ts_rssi >= 1039 if (tx_info_priv->tx.ts_rssi <
1394 rate_table->info[tx_rate].rssi_ack_validmin) { 1040 rate_table->info[tx_rate].rssi_ack_validmin)
1395 /* Average the rssi */ 1041 goto exit;
1396 if (tx_rate != rate_ctrl->rssi_sum_rate) {
1397 rate_ctrl->rssi_sum_rate = tx_rate;
1398 rate_ctrl->rssi_sum =
1399 rate_ctrl->rssi_sum_cnt = 0;
1400 }
1401 1042
1402 rate_ctrl->rssi_sum += info_priv->tx.ts_rssi; 1043 /* Average the rssi */
1403 rate_ctrl->rssi_sum_cnt++; 1044 if (tx_rate != ath_rc_priv->rssi_sum_rate) {
1404 1045 ath_rc_priv->rssi_sum_rate = tx_rate;
1405 if (rate_ctrl->rssi_sum_cnt > 4) { 1046 ath_rc_priv->rssi_sum =
1406 int32_t rssi_ackAvg = 1047 ath_rc_priv->rssi_sum_cnt = 0;
1407 (rate_ctrl->rssi_sum + 2) / 4;
1408 int8_t rssi_thres =
1409 rate_ctrl->state[tx_rate].
1410 rssi_thres;
1411 int8_t rssi_ack_vmin =
1412 rate_table->info[tx_rate].
1413 rssi_ack_validmin;
1414
1415 rate_ctrl->rssi_sum =
1416 rate_ctrl->rssi_sum_cnt = 0;
1417
1418 /* Now reduce the current
1419 * rssi threshold. */
1420 if ((rssi_ackAvg < rssi_thres + 2) &&
1421 (rssi_thres > rssi_ack_vmin)) {
1422 rate_ctrl->state[tx_rate].
1423 rssi_thres--;
1424 }
1425
1426 state_change = TRUE;
1427 }
1428 } 1048 }
1049
1050 ath_rc_priv->rssi_sum += tx_info_priv->tx.ts_rssi;
1051 ath_rc_priv->rssi_sum_cnt++;
1052
1053 if (ath_rc_priv->rssi_sum_cnt < 4)
1054 goto exit;
1055
1056 rssi_ackAvg =
1057 (ath_rc_priv->rssi_sum + 2) / 4;
1058 rssi_thres =
1059 ath_rc_priv->state[tx_rate].rssi_thres;
1060 rssi_ack_vmin =
1061 rate_table->info[tx_rate].rssi_ack_validmin;
1062
1063 ath_rc_priv->rssi_sum =
1064 ath_rc_priv->rssi_sum_cnt = 0;
1065
1066 /* Now reduce the current rssi threshold */
1067 if ((rssi_ackAvg < rssi_thres + 2) &&
1068 (rssi_thres > rssi_ack_vmin)) {
1069 ath_rc_priv->state[tx_rate].rssi_thres--;
1070 }
1071
1072 state_change = true;
1429 } 1073 }
1430 } 1074 }
1075exit:
1076 return state_change;
1077}
1078
1079/* Update PER, RSSI and whatever else that the code thinks it is doing.
1080 If you can make sense of all this, you really need to go out more. */
1081
1082static void ath_rc_update_ht(struct ath_softc *sc,
1083 struct ath_rate_priv *ath_rc_priv,
1084 struct ath_tx_info_priv *tx_info_priv,
1085 int tx_rate, int xretries, int retries)
1086{
1087#define CHK_RSSI(rate) \
1088 ((ath_rc_priv->state[(rate)].rssi_thres + \
1089 rate_table->info[(rate)].rssi_ack_deltamin) > \
1090 ath_rc_priv->state[(rate)+1].rssi_thres)
1431 1091
1432 /* For all cases */ 1092 u32 now_msec = jiffies_to_msecs(jiffies);
1093 int rate;
1094 u8 last_per;
1095 bool state_change = false;
1096 struct ath_rate_table *rate_table = sc->cur_rate_table;
1097 int size = ath_rc_priv->rate_table_size;
1098
1099 if ((tx_rate < 0) || (tx_rate > rate_table->rate_cnt))
1100 return;
1101
1102 /* To compensate for some imbalance between ctrl and ext. channel */
1103
1104 if (WLAN_RC_PHY_40(rate_table->info[tx_rate].phy))
1105 tx_info_priv->tx.ts_rssi =
1106 tx_info_priv->tx.ts_rssi < 3 ? 0 :
1107 tx_info_priv->tx.ts_rssi - 3;
1108
1109 last_per = ath_rc_priv->state[tx_rate].per;
1110
1111 /* Update PER first */
1112 state_change = ath_rc_update_per(sc, rate_table, ath_rc_priv,
1113 tx_info_priv, tx_rate, xretries,
1114 retries, now_msec);
1433 1115
1434 /* 1116 /*
1435 * If this rate looks bad (high PER) then stop using it for 1117 * If this rate looks bad (high PER) then stop using it for
1436 * a while (except if we are probing). 1118 * a while (except if we are probing).
1437 */ 1119 */
1438 if (rate_ctrl->state[tx_rate].per >= 55 && tx_rate > 0 && 1120 if (ath_rc_priv->state[tx_rate].per >= 55 && tx_rate > 0 &&
1439 rate_table->info[tx_rate].ratekbps <= 1121 rate_table->info[tx_rate].ratekbps <=
1440 rate_table->info[rate_ctrl->rate_max_phy].ratekbps) { 1122 rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
1441 ath_rc_get_nextlowervalid_txrate(rate_table, rate_ctrl, 1123 ath_rc_get_nextlowervalid_txrate(rate_table, ath_rc_priv,
1442 (u8) tx_rate, &rate_ctrl->rate_max_phy); 1124 (u8)tx_rate, &ath_rc_priv->rate_max_phy);
1443 1125
1444 /* Don't probe for a little while. */ 1126 /* Don't probe for a little while. */
1445 rate_ctrl->probe_time = now_msec; 1127 ath_rc_priv->probe_time = now_msec;
1446 } 1128 }
1447 1129
1448 if (state_change) { 1130 if (state_change) {
@@ -1453,20 +1135,15 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1453 * made to keep the rssi thresholds monotonically 1135 * made to keep the rssi thresholds monotonically
1454 * increasing between the CCK and OFDM rates.) 1136 * increasing between the CCK and OFDM rates.)
1455 */ 1137 */
1456 for (rate = tx_rate; rate < 1138 for (rate = tx_rate; rate < size - 1; rate++) {
1457 rate_ctrl->rate_table_size - 1; rate++) {
1458 if (rate_table->info[rate+1].phy != 1139 if (rate_table->info[rate+1].phy !=
1459 rate_table->info[tx_rate].phy) 1140 rate_table->info[tx_rate].phy)
1460 break; 1141 break;
1461 1142
1462 if (rate_ctrl->state[rate].rssi_thres + 1143 if (CHK_RSSI(rate)) {
1463 rate_table->info[rate].rssi_ack_deltamin > 1144 ath_rc_priv->state[rate+1].rssi_thres =
1464 rate_ctrl->state[rate+1].rssi_thres) { 1145 ath_rc_priv->state[rate].rssi_thres +
1465 rate_ctrl->state[rate+1].rssi_thres = 1146 rate_table->info[rate].rssi_ack_deltamin;
1466 rate_ctrl->state[rate].
1467 rssi_thres +
1468 rate_table->info[rate].
1469 rssi_ack_deltamin;
1470 } 1147 }
1471 } 1148 }
1472 1149
@@ -1476,27 +1153,20 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1476 rate_table->info[tx_rate].phy) 1153 rate_table->info[tx_rate].phy)
1477 break; 1154 break;
1478 1155
1479 if (rate_ctrl->state[rate].rssi_thres + 1156 if (CHK_RSSI(rate)) {
1480 rate_table->info[rate].rssi_ack_deltamin > 1157 if (ath_rc_priv->state[rate+1].rssi_thres <
1481 rate_ctrl->state[rate+1].rssi_thres) { 1158 rate_table->info[rate].rssi_ack_deltamin)
1482 if (rate_ctrl->state[rate+1].rssi_thres < 1159 ath_rc_priv->state[rate].rssi_thres = 0;
1483 rate_table->info[rate].
1484 rssi_ack_deltamin)
1485 rate_ctrl->state[rate].rssi_thres = 0;
1486 else { 1160 else {
1487 rate_ctrl->state[rate].rssi_thres = 1161 ath_rc_priv->state[rate].rssi_thres =
1488 rate_ctrl->state[rate+1]. 1162 ath_rc_priv->state[rate+1].rssi_thres -
1489 rssi_thres - 1163 rate_table->info[rate].rssi_ack_deltamin;
1490 rate_table->info[rate].
1491 rssi_ack_deltamin;
1492 } 1164 }
1493 1165
1494 if (rate_ctrl->state[rate].rssi_thres < 1166 if (ath_rc_priv->state[rate].rssi_thres <
1495 rate_table->info[rate]. 1167 rate_table->info[rate].rssi_ack_validmin) {
1496 rssi_ack_validmin) { 1168 ath_rc_priv->state[rate].rssi_thres =
1497 rate_ctrl->state[rate].rssi_thres = 1169 rate_table->info[rate].rssi_ack_validmin;
1498 rate_table->info[rate].
1499 rssi_ack_validmin;
1500 } 1170 }
1501 } 1171 }
1502 } 1172 }
@@ -1504,74 +1174,86 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1504 1174
1505 /* Make sure the rates below this have lower PER */ 1175 /* Make sure the rates below this have lower PER */
1506 /* Monotonicity is kept only for rates below the current rate. */ 1176 /* Monotonicity is kept only for rates below the current rate. */
1507 if (rate_ctrl->state[tx_rate].per < last_per) { 1177 if (ath_rc_priv->state[tx_rate].per < last_per) {
1508 for (rate = tx_rate - 1; rate >= 0; rate--) { 1178 for (rate = tx_rate - 1; rate >= 0; rate--) {
1509 if (rate_table->info[rate].phy != 1179 if (rate_table->info[rate].phy !=
1510 rate_table->info[tx_rate].phy) 1180 rate_table->info[tx_rate].phy)
1511 break; 1181 break;
1512 1182
1513 if (rate_ctrl->state[rate].per > 1183 if (ath_rc_priv->state[rate].per >
1514 rate_ctrl->state[rate+1].per) { 1184 ath_rc_priv->state[rate+1].per) {
1515 rate_ctrl->state[rate].per = 1185 ath_rc_priv->state[rate].per =
1516 rate_ctrl->state[rate+1].per; 1186 ath_rc_priv->state[rate+1].per;
1517 } 1187 }
1518 } 1188 }
1519 } 1189 }
1520 1190
1521 /* Maintain monotonicity for rates above the current rate */ 1191 /* Maintain monotonicity for rates above the current rate */
1522 for (rate = tx_rate; rate < rate_ctrl->rate_table_size - 1; rate++) { 1192 for (rate = tx_rate; rate < size - 1; rate++) {
1523 if (rate_ctrl->state[rate+1].per < rate_ctrl->state[rate].per) 1193 if (ath_rc_priv->state[rate+1].per <
1524 rate_ctrl->state[rate+1].per = 1194 ath_rc_priv->state[rate].per)
1525 rate_ctrl->state[rate].per; 1195 ath_rc_priv->state[rate+1].per =
1196 ath_rc_priv->state[rate].per;
1526 } 1197 }
1527 1198
1528 /* Every so often, we reduce the thresholds and 1199 /* Every so often, we reduce the thresholds and
1529 * PER (different for CCK and OFDM). */ 1200 * PER (different for CCK and OFDM). */
1530 if (now_msec - rate_ctrl->rssi_down_time >= 1201 if (now_msec - ath_rc_priv->rssi_down_time >=
1531 rate_table->rssi_reduce_interval) { 1202 rate_table->rssi_reduce_interval) {
1532 1203
1533 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) { 1204 for (rate = 0; rate < size; rate++) {
1534 if (rate_ctrl->state[rate].rssi_thres > 1205 if (ath_rc_priv->state[rate].rssi_thres >
1535 rate_table->info[rate].rssi_ack_validmin) 1206 rate_table->info[rate].rssi_ack_validmin)
1536 rate_ctrl->state[rate].rssi_thres -= 1; 1207 ath_rc_priv->state[rate].rssi_thres -= 1;
1537 } 1208 }
1538 rate_ctrl->rssi_down_time = now_msec; 1209 ath_rc_priv->rssi_down_time = now_msec;
1539 } 1210 }
1540 1211
1541 /* Every so often, we reduce the thresholds 1212 /* Every so often, we reduce the thresholds
1542 * and PER (different for CCK and OFDM). */ 1213 * and PER (different for CCK and OFDM). */
1543 if (now_msec - rate_ctrl->per_down_time >= 1214 if (now_msec - ath_rc_priv->per_down_time >=
1544 rate_table->rssi_reduce_interval) { 1215 rate_table->rssi_reduce_interval) {
1545 for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) { 1216 for (rate = 0; rate < size; rate++) {
1546 rate_ctrl->state[rate].per = 1217 ath_rc_priv->state[rate].per =
1547 7 * rate_ctrl->state[rate].per / 8; 1218 7 * ath_rc_priv->state[rate].per / 8;
1548 } 1219 }
1549 1220
1550 rate_ctrl->per_down_time = now_msec; 1221 ath_rc_priv->per_down_time = now_msec;
1551 } 1222 }
1223
1224#undef CHK_RSSI
1552} 1225}
1553 1226
1554/* 1227static int ath_rc_get_rateindex(struct ath_rate_table *rate_table,
1555 * This routine is called in rate control callback tx_status() to give 1228 struct ieee80211_tx_rate *rate)
1556 * the status of previous frames.
1557 */
1558static void ath_rc_update(struct ath_softc *sc,
1559 struct ath_rate_node *ath_rc_priv,
1560 struct ath_tx_info_priv *info_priv, int final_ts_idx,
1561 int xretries, int long_retry)
1562{ 1229{
1563 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; 1230 int rix;
1231
1232 if ((rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
1233 (rate->flags & IEEE80211_TX_RC_SHORT_GI))
1234 rix = rate_table->info[rate->idx].ht_index;
1235 else if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
1236 rix = rate_table->info[rate->idx].sgi_index;
1237 else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
1238 rix = rate_table->info[rate->idx].cw40index;
1239 else
1240 rix = rate_table->info[rate->idx].base_index;
1241
1242 return rix;
1243}
1244
1245static void ath_rc_tx_status(struct ath_softc *sc,
1246 struct ath_rate_priv *ath_rc_priv,
1247 struct ieee80211_tx_info *tx_info,
1248 int final_ts_idx, int xretries, int long_retry)
1249{
1250 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
1564 struct ath_rate_table *rate_table; 1251 struct ath_rate_table *rate_table;
1565 struct ath_tx_ratectrl *rate_ctrl; 1252 struct ieee80211_tx_rate *rates = tx_info->status.rates;
1566 struct ath_rc_series rcs[4];
1567 u8 flags; 1253 u8 flags;
1568 u32 series = 0, rix; 1254 u32 i = 0, rix;
1569 1255
1570 memcpy(rcs, info_priv->rcs, 4 * sizeof(rcs[0])); 1256 rate_table = sc->cur_rate_table;
1571 rate_table = (struct ath_rate_table *)
1572 asc->hw_rate_table[sc->sc_curmode];
1573 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1574 ASSERT(rcs[0].tries != 0);
1575 1257
1576 /* 1258 /*
1577 * If the first rate is not the final index, there 1259 * If the first rate is not the final index, there
@@ -1579,32 +1261,22 @@ static void ath_rc_update(struct ath_softc *sc,
1579 */ 1261 */
1580 if (final_ts_idx != 0) { 1262 if (final_ts_idx != 0) {
1581 /* Process intermediate rates that failed.*/ 1263 /* Process intermediate rates that failed.*/
1582 for (series = 0; series < final_ts_idx ; series++) { 1264 for (i = 0; i < final_ts_idx ; i++) {
1583 if (rcs[series].tries != 0) { 1265 if (rates[i].count != 0 && (rates[i].idx >= 0)) {
1584 flags = rcs[series].flags; 1266 flags = rates[i].flags;
1267
1585 /* If HT40 and we have switched mode from 1268 /* If HT40 and we have switched mode from
1586 * 40 to 20 => don't update */ 1269 * 40 to 20 => don't update */
1587 if ((flags & ATH_RC_CW40_FLAG) && 1270
1588 (rate_ctrl->rc_phy_mode != 1271 if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
1589 (flags & ATH_RC_CW40_FLAG))) 1272 (ath_rc_priv->rc_phy_mode != WLAN_RC_40_FLAG))
1590 return; 1273 return;
1591 if ((flags & ATH_RC_CW40_FLAG) && 1274
1592 (flags & ATH_RC_SGI_FLAG)) 1275 rix = ath_rc_get_rateindex(rate_table, &rates[i]);
1593 rix = rate_table->info[
1594 rcs[series].rix].ht_index;
1595 else if (flags & ATH_RC_SGI_FLAG)
1596 rix = rate_table->info[
1597 rcs[series].rix].sgi_index;
1598 else if (flags & ATH_RC_CW40_FLAG)
1599 rix = rate_table->info[
1600 rcs[series].rix].cw40index;
1601 else
1602 rix = rate_table->info[
1603 rcs[series].rix].base_index;
1604 ath_rc_update_ht(sc, ath_rc_priv, 1276 ath_rc_update_ht(sc, ath_rc_priv,
1605 info_priv, rix, 1277 tx_info_priv, rix,
1606 xretries ? 1 : 2, 1278 xretries ? 1 : 2,
1607 rcs[series].tries); 1279 rates[i].count);
1608 } 1280 }
1609 } 1281 }
1610 } else { 1282 } else {
@@ -1614,240 +1286,152 @@ static void ath_rc_update(struct ath_softc *sc,
1614 * Treating it as an excessive retry penalizes the rate 1286 * Treating it as an excessive retry penalizes the rate
1615 * inordinately. 1287 * inordinately.
1616 */ 1288 */
1617 if (rcs[0].tries == 1 && xretries == 1) 1289 if (rates[0].count == 1 && xretries == 1)
1618 xretries = 2; 1290 xretries = 2;
1619 } 1291 }
1620 1292
1621 flags = rcs[series].flags; 1293 flags = rates[i].flags;
1294
1622 /* If HT40 and we have switched mode from 40 to 20 => don't update */ 1295 /* If HT40 and we have switched mode from 40 to 20 => don't update */
1623 if ((flags & ATH_RC_CW40_FLAG) && 1296 if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
1624 (rate_ctrl->rc_phy_mode != (flags & ATH_RC_CW40_FLAG))) 1297 (ath_rc_priv->rc_phy_mode != WLAN_RC_40_FLAG)) {
1625 return; 1298 return;
1299 }
1626 1300
1627 if ((flags & ATH_RC_CW40_FLAG) && (flags & ATH_RC_SGI_FLAG)) 1301 rix = ath_rc_get_rateindex(rate_table, &rates[i]);
1628 rix = rate_table->info[rcs[series].rix].ht_index; 1302 ath_rc_update_ht(sc, ath_rc_priv, tx_info_priv, rix,
1629 else if (flags & ATH_RC_SGI_FLAG) 1303 xretries, long_retry);
1630 rix = rate_table->info[rcs[series].rix].sgi_index;
1631 else if (flags & ATH_RC_CW40_FLAG)
1632 rix = rate_table->info[rcs[series].rix].cw40index;
1633 else
1634 rix = rate_table->info[rcs[series].rix].base_index;
1635
1636 ath_rc_update_ht(sc, ath_rc_priv, info_priv, rix,
1637 xretries, long_retry);
1638} 1304}
1639 1305
1640/* 1306static struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1641 * Process a tx descriptor for a completed transmit (success or failure). 1307 enum ieee80211_band band,
1642 */ 1308 bool is_ht, bool is_cw_40)
1643static void ath_rate_tx_complete(struct ath_softc *sc,
1644 struct ath_node *an,
1645 struct ath_rate_node *rc_priv,
1646 struct ath_tx_info_priv *info_priv)
1647{ 1309{
1648 int final_ts_idx = info_priv->tx.ts_rateindex; 1310 int mode = 0;
1649 int tx_status = 0, is_underrun = 0; 1311
1650 struct ath_vap *avp; 1312 switch(band) {
1651 1313 case IEEE80211_BAND_2GHZ:
1652 avp = rc_priv->avp; 1314 mode = ATH9K_MODE_11G;
1653 if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) || 1315 if (is_ht)
1654 (info_priv->tx.ts_status & ATH9K_TXERR_FILT)) 1316 mode = ATH9K_MODE_11NG_HT20;
1655 return; 1317 if (is_cw_40)
1656 1318 mode = ATH9K_MODE_11NG_HT40PLUS;
1657 if (info_priv->tx.ts_rssi > 0) { 1319 break;
1658 ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi, 1320 case IEEE80211_BAND_5GHZ:
1659 info_priv->tx.ts_rssi); 1321 mode = ATH9K_MODE_11A;
1660 } 1322 if (is_ht)
1661 1323 mode = ATH9K_MODE_11NA_HT20;
1662 /* 1324 if (is_cw_40)
1663 * If underrun error is seen assume it as an excessive retry only 1325 mode = ATH9K_MODE_11NA_HT40PLUS;
1664 * if prefetch trigger level have reached the max (0x3f for 5416) 1326 break;
1665 * Adjust the long retry as if the frame was tried ATH_11N_TXMAXTRY 1327 default:
1666 * times. This affects how ratectrl updates PER for the failed rate. 1328 DPRINTF(sc, ATH_DBG_CONFIG, "Invalid band\n");
1667 */ 1329 return NULL;
1668 if (info_priv->tx.ts_flags &
1669 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN) &&
1670 ((sc->sc_ah->ah_txTrigLevel) >= tx_triglevel_max)) {
1671 tx_status = 1;
1672 is_underrun = 1;
1673 } 1330 }
1674 1331
1675 if ((info_priv->tx.ts_status & ATH9K_TXERR_XRETRY) || 1332 BUG_ON(mode >= ATH9K_MODE_MAX);
1676 (info_priv->tx.ts_status & ATH9K_TXERR_FIFO))
1677 tx_status = 1;
1678 1333
1679 ath_rc_update(sc, rc_priv, info_priv, final_ts_idx, tx_status, 1334 DPRINTF(sc, ATH_DBG_CONFIG, "Choosing rate table for mode: %d\n", mode);
1680 (is_underrun) ? ATH_11N_TXMAXTRY : 1335 return sc->hw_rate_table[mode];
1681 info_priv->tx.ts_longretry);
1682} 1336}
1683 1337
1684/* 1338static void ath_rc_init(struct ath_softc *sc,
1685 * Update the SIB's rate control information 1339 struct ath_rate_priv *ath_rc_priv,
1686 * 1340 struct ieee80211_supported_band *sband,
1687 * This should be called when the supported rates change 1341 struct ieee80211_sta *sta)
1688 * (e.g. SME operation, wireless mode change)
1689 *
1690 * It will determine which rates are valid for use.
1691 */
1692static void ath_rc_sib_update(struct ath_softc *sc,
1693 struct ath_rate_node *ath_rc_priv,
1694 u32 capflag, int keep_state,
1695 struct ath_rateset *negotiated_rates,
1696 struct ath_rateset *negotiated_htrates)
1697{ 1342{
1698 struct ath_rate_table *rate_table = NULL; 1343 struct ath_rate_table *rate_table = NULL;
1699 struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; 1344 struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
1700 struct ath_rateset *rateset = negotiated_rates; 1345 u8 *ht_mcs = (u8 *)&ath_rc_priv->neg_ht_rates;
1701 u8 *ht_mcs = (u8 *)negotiated_htrates;
1702 struct ath_tx_ratectrl *rate_ctrl =
1703 (struct ath_tx_ratectrl *)ath_rc_priv;
1704 u8 i, j, k, hi = 0, hthi = 0; 1346 u8 i, j, k, hi = 0, hthi = 0;
1705 1347
1706 rate_table = (struct ath_rate_table *) 1348 /* FIXME: Adhoc */
1707 asc->hw_rate_table[sc->sc_curmode]; 1349 if ((sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION) ||
1350 (sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC)) {
1351 bool is_cw_40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1352 rate_table = ath_choose_rate_table(sc, sband->band,
1353 sta->ht_cap.ht_supported,
1354 is_cw_40);
1355 } else if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) {
1356 /* cur_rate_table would be set on init through config() */
1357 rate_table = sc->cur_rate_table;
1358 }
1359
1360 if (!rate_table) {
1361 DPRINTF(sc, ATH_DBG_FATAL, "Rate table not initialized\n");
1362 return;
1363 }
1364
1365 if (sta->ht_cap.ht_supported) {
1366 ath_rc_priv->ht_cap = (WLAN_RC_HT_FLAG | WLAN_RC_DS_FLAG);
1367 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
1368 ath_rc_priv->ht_cap |= WLAN_RC_40_FLAG;
1369 }
1708 1370
1709 /* Initial rate table size. Will change depending 1371 /* Initial rate table size. Will change depending
1710 * on the working rate set */ 1372 * on the working rate set */
1711 rate_ctrl->rate_table_size = MAX_TX_RATE_TBL; 1373 ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
1712 1374
1713 /* Initialize thresholds according to the global rate table */ 1375 /* Initialize thresholds according to the global rate table */
1714 for (i = 0 ; (i < rate_ctrl->rate_table_size) && (!keep_state); i++) { 1376 for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
1715 rate_ctrl->state[i].rssi_thres = 1377 ath_rc_priv->state[i].rssi_thres =
1716 rate_table->info[i].rssi_ack_validmin; 1378 rate_table->info[i].rssi_ack_validmin;
1717 rate_ctrl->state[i].per = 0; 1379 ath_rc_priv->state[i].per = 0;
1718 } 1380 }
1719 1381
1720 /* Determine the valid rates */ 1382 /* Determine the valid rates */
1721 ath_rc_init_valid_txmask(rate_ctrl); 1383 ath_rc_init_valid_txmask(ath_rc_priv);
1722 1384
1723 for (i = 0; i < WLAN_RC_PHY_MAX; i++) { 1385 for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
1724 for (j = 0; j < MAX_TX_RATE_PHY; j++) 1386 for (j = 0; j < MAX_TX_RATE_PHY; j++)
1725 rate_ctrl->valid_phy_rateidx[i][j] = 0; 1387 ath_rc_priv->valid_phy_rateidx[i][j] = 0;
1726 rate_ctrl->valid_phy_ratecnt[i] = 0; 1388 ath_rc_priv->valid_phy_ratecnt[i] = 0;
1727 } 1389 }
1728 rate_ctrl->rc_phy_mode = (capflag & WLAN_RC_40_FLAG); 1390 ath_rc_priv->rc_phy_mode = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
1729 1391
1730 /* Set stream capability */ 1392 /* Set stream capability */
1731 ath_rc_priv->single_stream = (capflag & WLAN_RC_DS_FLAG) ? 0 : 1; 1393 ath_rc_priv->single_stream = (ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG) ? 0 : 1;
1732 1394
1733 if (!rateset->rs_nrates) { 1395 if (!rateset->rs_nrates) {
1734 /* No working rate, just initialize valid rates */ 1396 /* No working rate, just initialize valid rates */
1735 hi = ath_rc_sib_init_validrates(ath_rc_priv, rate_table, 1397 hi = ath_rc_init_validrates(ath_rc_priv, rate_table,
1736 capflag); 1398 ath_rc_priv->ht_cap);
1737 } else { 1399 } else {
1738 /* Use intersection of working rates and valid rates */ 1400 /* Use intersection of working rates and valid rates */
1739 hi = ath_rc_sib_setvalid_rates(ath_rc_priv, rate_table, 1401 hi = ath_rc_setvalid_rates(ath_rc_priv, rate_table,
1740 rateset, capflag); 1402 rateset, ath_rc_priv->ht_cap);
1741 if (capflag & WLAN_RC_HT_FLAG) { 1403 if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG) {
1742 hthi = ath_rc_sib_setvalid_htrates(ath_rc_priv, 1404 hthi = ath_rc_setvalid_htrates(ath_rc_priv,
1743 rate_table, 1405 rate_table,
1744 ht_mcs, 1406 ht_mcs,
1745 capflag); 1407 ath_rc_priv->ht_cap);
1746 } 1408 }
1747 hi = A_MAX(hi, hthi); 1409 hi = A_MAX(hi, hthi);
1748 } 1410 }
1749 1411
1750 rate_ctrl->rate_table_size = hi + 1; 1412 ath_rc_priv->rate_table_size = hi + 1;
1751 rate_ctrl->rate_max_phy = 0; 1413 ath_rc_priv->rate_max_phy = 0;
1752 ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL); 1414 ASSERT(ath_rc_priv->rate_table_size <= RATE_TABLE_SIZE);
1753 1415
1754 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) { 1416 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
1755 for (j = 0; j < rate_ctrl->valid_phy_ratecnt[i]; j++) { 1417 for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
1756 rate_ctrl->valid_rate_index[k++] = 1418 ath_rc_priv->valid_rate_index[k++] =
1757 rate_ctrl->valid_phy_rateidx[i][j]; 1419 ath_rc_priv->valid_phy_rateidx[i][j];
1758 } 1420 }
1759 1421
1760 if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, TRUE) 1422 if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1)
1761 || !rate_ctrl->valid_phy_ratecnt[i]) 1423 || !ath_rc_priv->valid_phy_ratecnt[i])
1762 continue; 1424 continue;
1763 1425
1764 rate_ctrl->rate_max_phy = rate_ctrl->valid_phy_rateidx[i][j-1]; 1426 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
1765 } 1427 }
1766 ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL); 1428 ASSERT(ath_rc_priv->rate_table_size <= RATE_TABLE_SIZE);
1767 ASSERT(k <= MAX_TX_RATE_TBL); 1429 ASSERT(k <= RATE_TABLE_SIZE);
1768
1769 rate_ctrl->max_valid_rate = k;
1770 /*
1771 * Some third party vendors don't send the supported rate series in
1772 * order. So sorting to make sure its in order, otherwise our RateFind
1773 * Algo will select wrong rates
1774 */
1775 ath_rc_sort_validrates(rate_table, rate_ctrl);
1776 rate_ctrl->rate_max_phy = rate_ctrl->valid_rate_index[k-4];
1777}
1778
1779/*
1780 * Update rate-control state on station associate/reassociate.
1781 */
1782static int ath_rate_newassoc(struct ath_softc *sc,
1783 struct ath_rate_node *ath_rc_priv,
1784 unsigned int capflag,
1785 struct ath_rateset *negotiated_rates,
1786 struct ath_rateset *negotiated_htrates)
1787{
1788
1789
1790 ath_rc_priv->ht_cap =
1791 ((capflag & ATH_RC_DS_FLAG) ? WLAN_RC_DS_FLAG : 0) |
1792 ((capflag & ATH_RC_SGI_FLAG) ? WLAN_RC_SGI_FLAG : 0) |
1793 ((capflag & ATH_RC_HT_FLAG) ? WLAN_RC_HT_FLAG : 0) |
1794 ((capflag & ATH_RC_CW40_FLAG) ? WLAN_RC_40_FLAG : 0);
1795
1796 ath_rc_sib_update(sc, ath_rc_priv, ath_rc_priv->ht_cap, 0,
1797 negotiated_rates, negotiated_htrates);
1798
1799 return 0;
1800}
1801
1802/*
1803 * This routine is called to initialize the rate control parameters
1804 * in the SIB. It is called initially during system initialization
1805 * or when a station is associated with the AP.
1806 */
1807static void ath_rc_sib_init(struct ath_rate_node *ath_rc_priv)
1808{
1809 struct ath_tx_ratectrl *rate_ctrl;
1810
1811 rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv);
1812 rate_ctrl->rssi_down_time = jiffies_to_msecs(jiffies);
1813}
1814
1815
1816static void ath_setup_rates(struct ath_softc *sc,
1817 struct ieee80211_supported_band *sband,
1818 struct ieee80211_sta *sta,
1819 struct ath_rate_node *rc_priv)
1820
1821{
1822 int i, j = 0;
1823
1824 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
1825
1826 for (i = 0; i < sband->n_bitrates; i++) {
1827 if (sta->supp_rates[sband->band] & BIT(i)) {
1828 rc_priv->neg_rates.rs_rates[j]
1829 = (sband->bitrates[i].bitrate * 2) / 10;
1830 j++;
1831 }
1832 }
1833 rc_priv->neg_rates.rs_nrates = j;
1834}
1835
1836void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv)
1837{
1838 struct ath_softc *sc = hw->priv;
1839 u32 capflag = 0;
1840
1841 if (hw->conf.ht_conf.ht_supported) {
1842 capflag |= ATH_RC_HT_FLAG | ATH_RC_DS_FLAG;
1843 if (sc->sc_ht_info.tx_chan_width == ATH9K_HT_MACMODE_2040)
1844 capflag |= ATH_RC_CW40_FLAG;
1845 }
1846
1847 ath_rate_newassoc(sc, rc_priv, capflag,
1848 &rc_priv->neg_rates,
1849 &rc_priv->neg_ht_rates);
1850 1430
1431 ath_rc_priv->max_valid_rate = k;
1432 ath_rc_sort_validrates(rate_table, ath_rc_priv);
1433 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
1434 sc->cur_rate_table = rate_table;
1851} 1435}
1852 1436
1853/* Rate Control callbacks */ 1437/* Rate Control callbacks */
@@ -1856,163 +1440,88 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1856 struct sk_buff *skb) 1440 struct sk_buff *skb)
1857{ 1441{
1858 struct ath_softc *sc = priv; 1442 struct ath_softc *sc = priv;
1859 struct ath_tx_info_priv *tx_info_priv; 1443 struct ath_rate_priv *ath_rc_priv = priv_sta;
1860 struct ath_node *an; 1444 struct ath_tx_info_priv *tx_info_priv = NULL;
1861 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1445 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1862 struct ieee80211_hdr *hdr; 1446 struct ieee80211_hdr *hdr;
1447 int final_ts_idx, tx_status = 0, is_underrun = 0;
1863 __le16 fc; 1448 __le16 fc;
1864 1449
1865 hdr = (struct ieee80211_hdr *)skb->data; 1450 hdr = (struct ieee80211_hdr *)skb->data;
1866 fc = hdr->frame_control; 1451 fc = hdr->frame_control;
1867 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; 1452 tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
1453 final_ts_idx = tx_info_priv->tx.ts_rateindex;
1868 1454
1869 spin_lock_bh(&sc->node_lock); 1455 if (!priv_sta || !ieee80211_is_data(fc) ||
1870 an = ath_node_find(sc, hdr->addr1); 1456 !tx_info_priv->update_rc)
1871 spin_unlock_bh(&sc->node_lock); 1457 goto exit;
1872 1458
1873 if (!an || !priv_sta || !ieee80211_is_data(fc)) { 1459 if (tx_info_priv->tx.ts_status & ATH9K_TXERR_FILT)
1874 if (tx_info->driver_data[0] != NULL) { 1460 goto exit;
1875 kfree(tx_info->driver_data[0]);
1876 tx_info->driver_data[0] = NULL;
1877 }
1878 return;
1879 }
1880 if (tx_info->driver_data[0] != NULL) {
1881 ath_rate_tx_complete(sc, an, priv_sta, tx_info_priv);
1882 kfree(tx_info->driver_data[0]);
1883 tx_info->driver_data[0] = NULL;
1884 }
1885}
1886
1887static void ath_tx_aggr_resp(struct ath_softc *sc,
1888 struct ieee80211_supported_band *sband,
1889 struct ieee80211_sta *sta,
1890 struct ath_node *an,
1891 u8 tidno)
1892{
1893 struct ath_atx_tid *txtid;
1894 u16 buffersize = 0;
1895 int state;
1896 struct sta_info *si;
1897
1898 if (!(sc->sc_flags & SC_OP_TXAGGR))
1899 return;
1900
1901 txtid = ATH_AN_2_TID(an, tidno);
1902 if (!txtid->paused)
1903 return;
1904 1461
1905 /* 1462 /*
1906 * XXX: This is entirely busted, we aren't supposed to 1463 * If underrun error is seen assume it as an excessive retry only
1907 * access the sta from here because it's internal 1464 * if prefetch trigger level have reached the max (0x3f for 5416)
1908 * to mac80211, and looking at the state without 1465 * Adjust the long retry as if the frame was tried ATH_11N_TXMAXTRY
1909 * locking is wrong too. 1466 * times. This affects how ratectrl updates PER for the failed rate.
1910 */ 1467 */
1911 si = container_of(sta, struct sta_info, sta); 1468 if (tx_info_priv->tx.ts_flags &
1912 buffersize = IEEE80211_MIN_AMPDU_BUF << 1469 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN) &&
1913 sband->ht_info.ampdu_factor; /* FIXME */ 1470 ((sc->sc_ah->ah_txTrigLevel) >= ath_rc_priv->tx_triglevel_max)) {
1914 state = si->ampdu_mlme.tid_state_tx[tidno]; 1471 tx_status = 1;
1915 1472 is_underrun = 1;
1916 if (state & HT_ADDBA_RECEIVED_MSK) {
1917 txtid->addba_exchangecomplete = 1;
1918 txtid->addba_exchangeinprogress = 0;
1919 txtid->baw_size = buffersize;
1920
1921 DPRINTF(sc, ATH_DBG_AGGR,
1922 "%s: Resuming tid, buffersize: %d\n",
1923 __func__,
1924 buffersize);
1925
1926 ath_tx_resume_tid(sc, txtid);
1927 } 1473 }
1474
1475 if ((tx_info_priv->tx.ts_status & ATH9K_TXERR_XRETRY) ||
1476 (tx_info_priv->tx.ts_status & ATH9K_TXERR_FIFO))
1477 tx_status = 1;
1478
1479 ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
1480 (is_underrun) ? ATH_11N_TXMAXTRY :
1481 tx_info_priv->tx.ts_longretry);
1482
1483exit:
1484 kfree(tx_info_priv);
1928} 1485}
1929 1486
1930static void ath_get_rate(void *priv, struct ieee80211_supported_band *sband, 1487static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
1931 struct ieee80211_sta *sta, void *priv_sta, 1488 struct ieee80211_tx_rate_control *txrc)
1932 struct sk_buff *skb, struct rate_selection *sel)
1933{ 1489{
1490 struct ieee80211_supported_band *sband = txrc->sband;
1491 struct sk_buff *skb = txrc->skb;
1934 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1492 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1935 struct ath_softc *sc = priv; 1493 struct ath_softc *sc = priv;
1936 struct ieee80211_hw *hw = sc->hw; 1494 struct ieee80211_hw *hw = sc->hw;
1937 struct ath_tx_info_priv *tx_info_priv; 1495 struct ath_rate_priv *ath_rc_priv = priv_sta;
1938 struct ath_rate_node *ath_rc_priv = priv_sta;
1939 struct ath_node *an;
1940 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1496 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1941 int is_probe = FALSE, chk, ret; 1497 int is_probe = 0;
1942 s8 lowest_idx;
1943 __le16 fc = hdr->frame_control; 1498 __le16 fc = hdr->frame_control;
1944 u8 *qc, tid;
1945 DECLARE_MAC_BUF(mac);
1946
1947 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
1948
1949 /* allocate driver private area of tx_info */
1950 tx_info->driver_data[0] = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1951 ASSERT(tx_info->driver_data[0] != NULL);
1952 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1953 1499
1954 lowest_idx = rate_lowest_index(sband, sta);
1955 tx_info_priv->min_rate = (sband->bitrates[lowest_idx].bitrate * 2) / 10;
1956 /* lowest rate for management and multicast/broadcast frames */ 1500 /* lowest rate for management and multicast/broadcast frames */
1957 if (!ieee80211_is_data(fc) || 1501 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) ||
1958 is_multicast_ether_addr(hdr->addr1) || !sta) { 1502 !sta) {
1959 sel->rate_idx = lowest_idx; 1503 tx_info->control.rates[0].idx = rate_lowest_index(sband, sta);
1504 tx_info->control.rates[0].count =
1505 is_multicast_ether_addr(hdr->addr1) ? 1 : ATH_MGT_TXMAXTRY;
1960 return; 1506 return;
1961 } 1507 }
1962 1508
1963 /* Find tx rate for unicast frames */ 1509 /* Find tx rate for unicast frames */
1964 ath_rate_findrate(sc, ath_rc_priv, 1510 ath_rc_ratefind(sc, ath_rc_priv, ATH_11N_TXMAXTRY, 4,
1965 ATH_11N_TXMAXTRY, 4, 1511 tx_info, &is_probe, false);
1966 ATH_RC_PROBE_ALLOWED,
1967 tx_info_priv->rcs,
1968 &is_probe,
1969 false);
1970 if (is_probe)
1971 sel->probe_idx = ath_rc_priv->tx_ratectrl.probe_rate;
1972
1973 /* Ratecontrol sometimes returns invalid rate index */
1974 if (tx_info_priv->rcs[0].rix != 0xff)
1975 ath_rc_priv->prev_data_rix = tx_info_priv->rcs[0].rix;
1976 else
1977 tx_info_priv->rcs[0].rix = ath_rc_priv->prev_data_rix;
1978
1979 sel->rate_idx = tx_info_priv->rcs[0].rix;
1980 1512
1981 /* Check if aggregation has to be enabled for this tid */ 1513 /* Check if aggregation has to be enabled for this tid */
1982 1514 if (hw->conf.ht.enabled) {
1983 if (hw->conf.ht_conf.ht_supported) {
1984 if (ieee80211_is_data_qos(fc)) { 1515 if (ieee80211_is_data_qos(fc)) {
1516 u8 *qc, tid;
1517 struct ath_node *an;
1518
1985 qc = ieee80211_get_qos_ctl(hdr); 1519 qc = ieee80211_get_qos_ctl(hdr);
1986 tid = qc[0] & 0xf; 1520 tid = qc[0] & 0xf;
1521 an = (struct ath_node *)sta->drv_priv;
1987 1522
1988 spin_lock_bh(&sc->node_lock); 1523 if(ath_tx_aggr_check(sc, an, tid))
1989 an = ath_node_find(sc, hdr->addr1); 1524 ieee80211_start_tx_ba_session(hw, hdr->addr1, tid);
1990 spin_unlock_bh(&sc->node_lock);
1991
1992 if (!an) {
1993 DPRINTF(sc, ATH_DBG_AGGR,
1994 "%s: Node not found to "
1995 "init/chk TX aggr\n", __func__);
1996 return;
1997 }
1998
1999 chk = ath_tx_aggr_check(sc, an, tid);
2000 if (chk == AGGR_REQUIRED) {
2001 ret = ieee80211_start_tx_ba_session(hw,
2002 hdr->addr1, tid);
2003 if (ret)
2004 DPRINTF(sc, ATH_DBG_AGGR,
2005 "%s: Unable to start tx "
2006 "aggr for: %s\n",
2007 __func__,
2008 print_mac(mac, hdr->addr1));
2009 else
2010 DPRINTF(sc, ATH_DBG_AGGR,
2011 "%s: Started tx aggr for: %s\n",
2012 __func__,
2013 print_mac(mac, hdr->addr1));
2014 } else if (chk == AGGR_EXCHANGE_PROGRESS)
2015 ath_tx_aggr_resp(sc, sband, sta, an, tid);
2016 } 1525 }
2017 } 1526 }
2018} 1527}
@@ -2021,34 +1530,33 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
2021 struct ieee80211_sta *sta, void *priv_sta) 1530 struct ieee80211_sta *sta, void *priv_sta)
2022{ 1531{
2023 struct ath_softc *sc = priv; 1532 struct ath_softc *sc = priv;
2024 struct ath_rate_node *ath_rc_priv = priv_sta; 1533 struct ath_rate_priv *ath_rc_priv = priv_sta;
2025 int i, j = 0; 1534 int i, j = 0;
2026 1535
2027 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__); 1536 for (i = 0; i < sband->n_bitrates; i++) {
1537 if (sta->supp_rates[sband->band] & BIT(i)) {
1538 ath_rc_priv->neg_rates.rs_rates[j]
1539 = (sband->bitrates[i].bitrate * 2) / 10;
1540 j++;
1541 }
1542 }
1543 ath_rc_priv->neg_rates.rs_nrates = j;
2028 1544
2029 ath_setup_rates(sc, sband, sta, ath_rc_priv); 1545 if (sta->ht_cap.ht_supported) {
2030 if (sc->hw->conf.flags & IEEE80211_CONF_SUPPORT_HT_MODE) { 1546 for (i = 0, j = 0; i < 77; i++) {
2031 for (i = 0; i < MCS_SET_SIZE; i++) { 1547 if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
2032 if (sc->hw->conf.ht_conf.supp_mcs_set[i/8] & (1<<(i%8)))
2033 ath_rc_priv->neg_ht_rates.rs_rates[j++] = i; 1548 ath_rc_priv->neg_ht_rates.rs_rates[j++] = i;
2034 if (j == ATH_RATE_MAX) 1549 if (j == ATH_RATE_MAX)
2035 break; 1550 break;
2036 } 1551 }
2037 ath_rc_priv->neg_ht_rates.rs_nrates = j; 1552 ath_rc_priv->neg_ht_rates.rs_nrates = j;
2038 } 1553 }
2039 ath_rc_node_update(sc->hw, priv_sta);
2040}
2041 1554
2042static void ath_rate_clear(void *priv) 1555 ath_rc_init(sc, priv_sta, sband, sta);
2043{
2044 return;
2045} 1556}
2046 1557
2047static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 1558static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
2048{ 1559{
2049 struct ath_softc *sc = hw->priv;
2050
2051 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
2052 return hw->priv; 1560 return hw->priv;
2053} 1561}
2054 1562
@@ -2060,19 +1568,17 @@ static void ath_rate_free(void *priv)
2060static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) 1568static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
2061{ 1569{
2062 struct ath_softc *sc = priv; 1570 struct ath_softc *sc = priv;
2063 struct ath_vap *avp = sc->sc_vaps[0]; 1571 struct ath_rate_priv *rate_priv;
2064 struct ath_rate_node *rate_priv;
2065
2066 DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__);
2067 1572
2068 rate_priv = ath_rate_node_alloc(avp, sc->sc_rc, gfp); 1573 rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp);
2069 if (!rate_priv) { 1574 if (!rate_priv) {
2070 DPRINTF(sc, ATH_DBG_FATAL, 1575 DPRINTF(sc, ATH_DBG_FATAL,
2071 "%s: Unable to allocate private rc structure\n", 1576 "Unable to allocate private rc structure\n");
2072 __func__);
2073 return NULL; 1577 return NULL;
2074 } 1578 }
2075 ath_rc_sib_init(rate_priv); 1579
1580 rate_priv->rssi_down_time = jiffies_to_msecs(jiffies);
1581 rate_priv->tx_triglevel_max = sc->sc_ah->ah_caps.tx_triglevel_max;
2076 1582
2077 return rate_priv; 1583 return rate_priv;
2078} 1584}
@@ -2080,11 +1586,8 @@ static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp
2080static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta, 1586static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
2081 void *priv_sta) 1587 void *priv_sta)
2082{ 1588{
2083 struct ath_rate_node *rate_priv = priv_sta; 1589 struct ath_rate_priv *rate_priv = priv_sta;
2084 struct ath_softc *sc = priv; 1590 kfree(rate_priv);
2085
2086 DPRINTF(sc, ATH_DBG_RATE, "%s", __func__);
2087 ath_rate_node_free(rate_priv);
2088} 1591}
2089 1592
2090static struct rate_control_ops ath_rate_ops = { 1593static struct rate_control_ops ath_rate_ops = {
@@ -2093,13 +1596,69 @@ static struct rate_control_ops ath_rate_ops = {
2093 .tx_status = ath_tx_status, 1596 .tx_status = ath_tx_status,
2094 .get_rate = ath_get_rate, 1597 .get_rate = ath_get_rate,
2095 .rate_init = ath_rate_init, 1598 .rate_init = ath_rate_init,
2096 .clear = ath_rate_clear,
2097 .alloc = ath_rate_alloc, 1599 .alloc = ath_rate_alloc,
2098 .free = ath_rate_free, 1600 .free = ath_rate_free,
2099 .alloc_sta = ath_rate_alloc_sta, 1601 .alloc_sta = ath_rate_alloc_sta,
2100 .free_sta = ath_rate_free_sta, 1602 .free_sta = ath_rate_free_sta,
2101}; 1603};
2102 1604
1605static void ath_setup_rate_table(struct ath_softc *sc,
1606 struct ath_rate_table *rate_table)
1607{
1608 int i;
1609
1610 for (i = 0; i < 256; i++)
1611 rate_table->rateCodeToIndex[i] = (u8)-1;
1612
1613 for (i = 0; i < rate_table->rate_cnt; i++) {
1614 u8 code = rate_table->info[i].ratecode;
1615 u8 cix = rate_table->info[i].ctrl_rate;
1616 u8 sh = rate_table->info[i].short_preamble;
1617
1618 rate_table->rateCodeToIndex[code] = i;
1619 rate_table->rateCodeToIndex[code | sh] = i;
1620
1621 rate_table->info[i].lpAckDuration =
1622 ath9k_hw_computetxtime(sc->sc_ah, rate_table,
1623 WLAN_CTRL_FRAME_SIZE,
1624 cix,
1625 false);
1626 rate_table->info[i].spAckDuration =
1627 ath9k_hw_computetxtime(sc->sc_ah, rate_table,
1628 WLAN_CTRL_FRAME_SIZE,
1629 cix,
1630 true);
1631 }
1632}
1633
1634void ath_rate_attach(struct ath_softc *sc)
1635{
1636 sc->hw_rate_table[ATH9K_MODE_11B] =
1637 &ar5416_11b_ratetable;
1638 sc->hw_rate_table[ATH9K_MODE_11A] =
1639 &ar5416_11a_ratetable;
1640 sc->hw_rate_table[ATH9K_MODE_11G] =
1641 &ar5416_11g_ratetable;
1642 sc->hw_rate_table[ATH9K_MODE_11NA_HT20] =
1643 &ar5416_11na_ratetable;
1644 sc->hw_rate_table[ATH9K_MODE_11NG_HT20] =
1645 &ar5416_11ng_ratetable;
1646 sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS] =
1647 &ar5416_11na_ratetable;
1648 sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS] =
1649 &ar5416_11na_ratetable;
1650 sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS] =
1651 &ar5416_11ng_ratetable;
1652 sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS] =
1653 &ar5416_11ng_ratetable;
1654
1655 ath_setup_rate_table(sc, &ar5416_11b_ratetable);
1656 ath_setup_rate_table(sc, &ar5416_11a_ratetable);
1657 ath_setup_rate_table(sc, &ar5416_11g_ratetable);
1658 ath_setup_rate_table(sc, &ar5416_11na_ratetable);
1659 ath_setup_rate_table(sc, &ar5416_11ng_ratetable);
1660}
1661
2103int ath_rate_control_register(void) 1662int ath_rate_control_register(void)
2104{ 1663{
2105 return ieee80211_rate_control_register(&ath_rate_ops); 1664 return ieee80211_rate_control_register(&ath_rate_ops);
@@ -2109,4 +1668,3 @@ void ath_rate_control_unregister(void)
2109{ 1668{
2110 ieee80211_rate_control_unregister(&ath_rate_ops); 1669 ieee80211_rate_control_unregister(&ath_rate_ops);
2111} 1670}
2112
diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h
index b95b41508b98..97c60d12e8aa 100644
--- a/drivers/net/wireless/ath9k/rc.h
+++ b/drivers/net/wireless/ath9k/rc.h
@@ -20,84 +20,24 @@
20#define RC_H 20#define RC_H
21 21
22#include "ath9k.h" 22#include "ath9k.h"
23/*
24 * Interface definitions for transmit rate control modules for the
25 * Atheros driver.
26 *
27 * A rate control module is responsible for choosing the transmit rate
28 * for each data frame. Management+control frames are always sent at
29 * a fixed rate.
30 *
31 * Only one module may be present at a time; the driver references
32 * rate control interfaces by symbol name. If multiple modules are
33 * to be supported we'll need to switch to a registration-based scheme
34 * as is currently done, for example, for authentication modules.
35 *
36 * An instance of the rate control module is attached to each device
37 * at attach time and detached when the device is destroyed. The module
38 * may associate data with each device and each node (station). Both
39 * sets of storage are opaque except for the size of the per-node storage
40 * which must be provided when the module is attached.
41 *
42 * The rate control module is notified for each state transition and
43 * station association/reassociation. Otherwise it is queried for a
44 * rate for each outgoing frame and provided status from each transmitted
45 * frame. Any ancillary processing is the responsibility of the module
46 * (e.g. if periodic processing is required then the module should setup
47 * it's own timer).
48 *
49 * In addition to the transmit rate for each frame the module must also
50 * indicate the number of attempts to make at the specified rate. If this
51 * number is != ATH_TXMAXTRY then an additional callback is made to setup
52 * additional transmit state. The rate control code is assumed to write
53 * this additional data directly to the transmit descriptor.
54 */
55 23
56struct ath_softc; 24struct ath_softc;
57 25
58#define TRUE 1 26#define ATH_RATE_MAX 30
59#define FALSE 0 27#define RATE_TABLE_SIZE 64
28#define MAX_TX_RATE_PHY 48
60 29
61#define ATH_RATE_MAX 30 30/* VALID_ALL - valid for 20/40/Legacy,
62#define MCS_SET_SIZE 128 31 * VALID - Legacy only,
32 * VALID_20 - HT 20 only,
33 * VALID_40 - HT 40 only */
63 34
64enum ieee80211_fixed_rate_mode { 35#define INVALID 0x0
65 IEEE80211_FIXED_RATE_NONE = 0, 36#define VALID 0x1
66 IEEE80211_FIXED_RATE_MCS = 1 /* HT rates */ 37#define VALID_20 0x2
67}; 38#define VALID_40 0x4
68 39#define VALID_2040 (VALID_20|VALID_40)
69/* 40#define VALID_ALL (VALID_2040|VALID)
70 * Use the hal os glue code to get ms time
71 */
72#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8)))
73
74#define WLAN_PHY_HT_20_SS WLAN_RC_PHY_HT_20_SS
75#define WLAN_PHY_HT_20_DS WLAN_RC_PHY_HT_20_DS
76#define WLAN_PHY_HT_20_DS_HGI WLAN_RC_PHY_HT_20_DS_HGI
77#define WLAN_PHY_HT_40_SS WLAN_RC_PHY_HT_40_SS
78#define WLAN_PHY_HT_40_SS_HGI WLAN_RC_PHY_HT_40_SS_HGI
79#define WLAN_PHY_HT_40_DS WLAN_RC_PHY_HT_40_DS
80#define WLAN_PHY_HT_40_DS_HGI WLAN_RC_PHY_HT_40_DS_HGI
81
82#define WLAN_PHY_OFDM PHY_OFDM
83#define WLAN_PHY_CCK PHY_CCK
84
85#define TRUE_20 0x2
86#define TRUE_40 0x4
87#define TRUE_2040 (TRUE_20|TRUE_40)
88#define TRUE_ALL (TRUE_2040|TRUE)
89
90enum {
91 WLAN_RC_PHY_HT_20_SS = 4,
92 WLAN_RC_PHY_HT_20_DS,
93 WLAN_RC_PHY_HT_40_SS,
94 WLAN_RC_PHY_HT_40_DS,
95 WLAN_RC_PHY_HT_20_SS_HGI,
96 WLAN_RC_PHY_HT_20_DS_HGI,
97 WLAN_RC_PHY_HT_40_SS_HGI,
98 WLAN_RC_PHY_HT_40_DS_HGI,
99 WLAN_RC_PHY_MAX
100};
101 41
102#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \ 42#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
103 || (_phy == WLAN_RC_PHY_HT_40_DS) \ 43 || (_phy == WLAN_RC_PHY_HT_40_DS) \
@@ -114,26 +54,22 @@ enum {
114 54
115#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS) 55#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
116 56
117/* Returns the capflag mode */
118#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \ 57#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
119 (capflag & WLAN_RC_40_FLAG) ? TRUE_40 : TRUE_20 : TRUE)) 58 (capflag & WLAN_RC_40_FLAG) ? VALID_40 : VALID_20 : VALID))
120 59
121/* Return TRUE if flag supports HT20 && client supports HT20 or 60/* Return TRUE if flag supports HT20 && client supports HT20 or
122 * return TRUE if flag supports HT40 && client supports HT40. 61 * return TRUE if flag supports HT40 && client supports HT40.
123 * This is used becos some rates overlap between HT20/HT40. 62 * This is used becos some rates overlap between HT20/HT40.
124 */ 63 */
125 64#define WLAN_RC_PHY_HT_VALID(flag, capflag) \
126#define WLAN_RC_PHY_HT_VALID(flag, capflag) (((flag & TRUE_20) && !(capflag \ 65 (((flag & VALID_20) && !(capflag & WLAN_RC_40_FLAG)) || \
127 & WLAN_RC_40_FLAG)) || ((flag & TRUE_40) && \ 66 ((flag & VALID_40) && (capflag & WLAN_RC_40_FLAG)))
128 (capflag & WLAN_RC_40_FLAG)))
129 67
130#define WLAN_RC_DS_FLAG (0x01) 68#define WLAN_RC_DS_FLAG (0x01)
131#define WLAN_RC_40_FLAG (0x02) 69#define WLAN_RC_40_FLAG (0x02)
132#define WLAN_RC_SGI_FLAG (0x04) 70#define WLAN_RC_SGI_FLAG (0x04)
133#define WLAN_RC_HT_FLAG (0x08) 71#define WLAN_RC_HT_FLAG (0x08)
134 72
135#define RATE_TABLE_SIZE 64
136
137/** 73/**
138 * struct ath_rate_table - Rate Control table 74 * struct ath_rate_table - Rate Control table
139 * @valid: valid for use in rate control 75 * @valid: valid for use in rate control
@@ -150,10 +86,11 @@ enum {
150 * @max_4ms_framelen: maximum frame length(bytes) for tx duration 86 * @max_4ms_framelen: maximum frame length(bytes) for tx duration
151 * @probe_interval: interval for rate control to probe for other rates 87 * @probe_interval: interval for rate control to probe for other rates
152 * @rssi_reduce_interval: interval for rate control to reduce rssi 88 * @rssi_reduce_interval: interval for rate control to reduce rssi
153 * @initial_ratemax: initial ratemax value used in ath_rc_sib_update() 89 * @initial_ratemax: initial ratemax value
154 */ 90 */
155struct ath_rate_table { 91struct ath_rate_table {
156 int rate_cnt; 92 int rate_cnt;
93 u8 rateCodeToIndex[256];
157 struct { 94 struct {
158 int valid; 95 int valid;
159 int valid_single_stream; 96 int valid_single_stream;
@@ -171,42 +108,26 @@ struct ath_rate_table {
171 u8 sgi_index; 108 u8 sgi_index;
172 u8 ht_index; 109 u8 ht_index;
173 u32 max_4ms_framelen; 110 u32 max_4ms_framelen;
111 u16 lpAckDuration;
112 u16 spAckDuration;
174 } info[RATE_TABLE_SIZE]; 113 } info[RATE_TABLE_SIZE];
175 u32 probe_interval; 114 u32 probe_interval;
176 u32 rssi_reduce_interval; 115 u32 rssi_reduce_interval;
177 u8 initial_ratemax; 116 u8 initial_ratemax;
178}; 117};
179 118
180#define ATH_RC_PROBE_ALLOWED 0x00000001
181#define ATH_RC_MINRATE_LASTRATE 0x00000002
182
183struct ath_rc_series {
184 u8 rix;
185 u8 tries;
186 u8 flags;
187 u32 max_4ms_framelen;
188};
189
190/* rcs_flags definition */
191#define ATH_RC_DS_FLAG 0x01
192#define ATH_RC_CW40_FLAG 0x02 /* CW 40 */
193#define ATH_RC_SGI_FLAG 0x04 /* Short Guard Interval */
194#define ATH_RC_HT_FLAG 0x08 /* HT */
195#define ATH_RC_RTSCTS_FLAG 0x10 /* RTS-CTS */
196
197/*
198 * State structures for new rate adaptation code
199 */
200#define MAX_TX_RATE_TBL 64
201#define MAX_TX_RATE_PHY 48
202
203struct ath_tx_ratectrl_state { 119struct ath_tx_ratectrl_state {
204 int8_t rssi_thres; /* required rssi for this rate (dB) */ 120 int8_t rssi_thres; /* required rssi for this rate (dB) */
205 u8 per; /* recent estimate of packet error rate (%) */ 121 u8 per; /* recent estimate of packet error rate (%) */
206}; 122};
207 123
124struct ath_rateset {
125 u8 rs_nrates;
126 u8 rs_rates[ATH_RATE_MAX];
127};
128
208/** 129/**
209 * struct ath_tx_ratectrl - TX Rate control Information 130 * struct ath_rate_priv - Rate Control priv data
210 * @state: RC state 131 * @state: RC state
211 * @rssi_last: last ACK rssi 132 * @rssi_last: last ACK rssi
212 * @rssi_last_lookup: last ACK rssi used for lookup 133 * @rssi_last_lookup: last ACK rssi used for lookup
@@ -225,9 +146,13 @@ struct ath_tx_ratectrl_state {
225 * @valid_phy_ratecnt: valid rate count 146 * @valid_phy_ratecnt: valid rate count
226 * @rate_max_phy: phy index for the max rate 147 * @rate_max_phy: phy index for the max rate
227 * @probe_interval: interval for ratectrl to probe for other rates 148 * @probe_interval: interval for ratectrl to probe for other rates
149 * @prev_data_rix: rate idx of last data frame
150 * @ht_cap: HT capabilities
151 * @single_stream: When TRUE, only single TX stream possible
152 * @neg_rates: Negotatied rates
153 * @neg_ht_rates: Negotiated HT rates
228 */ 154 */
229struct ath_tx_ratectrl { 155struct ath_rate_priv {
230 struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL];
231 int8_t rssi_last; 156 int8_t rssi_last;
232 int8_t rssi_last_lookup; 157 int8_t rssi_last_lookup;
233 int8_t rssi_last_prev; 158 int8_t rssi_last_prev;
@@ -237,89 +162,40 @@ struct ath_tx_ratectrl {
237 int32_t rssi_sum; 162 int32_t rssi_sum;
238 u8 rate_table_size; 163 u8 rate_table_size;
239 u8 probe_rate; 164 u8 probe_rate;
240 u32 rssi_time;
241 u32 rssi_down_time;
242 u32 probe_time;
243 u8 hw_maxretry_pktcnt; 165 u8 hw_maxretry_pktcnt;
244 u8 max_valid_rate; 166 u8 max_valid_rate;
245 u8 valid_rate_index[MAX_TX_RATE_TBL]; 167 u8 valid_rate_index[RATE_TABLE_SIZE];
246 u32 per_down_time; 168 u8 ht_cap;
247 169 u8 single_stream;
248 /* 11n state */
249 u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX]; 170 u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
250 u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL]; 171 u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][RATE_TABLE_SIZE];
251 u8 rc_phy_mode; 172 u8 rc_phy_mode;
252 u8 rate_max_phy; 173 u8 rate_max_phy;
174 u32 rssi_time;
175 u32 rssi_down_time;
176 u32 probe_time;
177 u32 per_down_time;
253 u32 probe_interval; 178 u32 probe_interval;
254};
255
256struct ath_rateset {
257 u8 rs_nrates;
258 u8 rs_rates[ATH_RATE_MAX];
259};
260
261/* per-device state */
262struct ath_rate_softc {
263 /* phy tables that contain rate control data */
264 const void *hw_rate_table[ATH9K_MODE_MAX];
265
266 /* -1 or index of fixed rate */
267 int fixedrix;
268};
269
270/* per-node state */
271struct ath_rate_node {
272 struct ath_tx_ratectrl tx_ratectrl;
273
274 /* rate idx of last data frame */
275 u32 prev_data_rix; 179 u32 prev_data_rix;
276 180 u32 tx_triglevel_max;
277 /* ht capabilities */ 181 struct ath_tx_ratectrl_state state[RATE_TABLE_SIZE];
278 u8 ht_cap;
279
280 /* When TRUE, only single stream Tx possible */
281 u8 single_stream;
282
283 /* Negotiated rates */
284 struct ath_rateset neg_rates; 182 struct ath_rateset neg_rates;
285
286 /* Negotiated HT rates */
287 struct ath_rateset neg_ht_rates; 183 struct ath_rateset neg_ht_rates;
288
289 struct ath_rate_softc *asc; 184 struct ath_rate_softc *asc;
290 struct ath_vap *avp;
291}; 185};
292 186
293/* Driver data of ieee80211_tx_info */
294struct ath_tx_info_priv { 187struct ath_tx_info_priv {
295 struct ath_rc_series rcs[4];
296 struct ath_tx_status tx; 188 struct ath_tx_status tx;
297 int n_frames; 189 int n_frames;
298 int n_bad_frames; 190 int n_bad_frames;
299 u8 min_rate; 191 bool update_rc;
300}; 192};
301 193
302/* 194#define ATH_TX_INFO_PRIV(tx_info) \
303 * Attach/detach a rate control module. 195 ((struct ath_tx_info_priv *)((tx_info)->rate_driver_data[0]))
304 */
305struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah);
306void ath_rate_detach(struct ath_rate_softc *asc);
307
308/*
309 * Update/reset rate control state for 802.11 state transitions.
310 * Important mostly as the analog to ath_rate_newassoc when operating
311 * in station mode.
312 */
313void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv);
314void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp);
315
316/*
317 * Return rate index for given Dot11 Rate.
318 */
319u8 ath_rate_findrateix(struct ath_softc *sc,
320 u8 dot11_rate);
321 196
322/* Routines to register/unregister rate control algorithm */ 197void ath_rate_attach(struct ath_softc *sc);
198u8 ath_rate_findrateix(struct ath_softc *sc, u8 dot11_rate);
323int ath_rate_control_register(void); 199int ath_rate_control_register(void);
324void ath_rate_control_unregister(void); 200void ath_rate_control_unregister(void);
325 201
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 504a0444d89f..462e08c3d09d 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -14,10 +14,6 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17/*
18 * Implementation of receive path.
19 */
20
21#include "core.h" 17#include "core.h"
22 18
23/* 19/*
@@ -27,10 +23,7 @@
27 * MAC acknowledges BA status as long as it copies frames to host 23 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets 24 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked. 25 * to a sender if last desc is self-linked.
30 *
31 * NOTE: Caller should hold the rxbuf lock.
32 */ 26 */
33
34static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 27static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
35{ 28{
36 struct ath_hal *ah = sc->sc_ah; 29 struct ath_hal *ah = sc->sc_ah;
@@ -40,356 +33,53 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
40 ATH_RXBUF_RESET(bf); 33 ATH_RXBUF_RESET(bf);
41 34
42 ds = bf->bf_desc; 35 ds = bf->bf_desc;
43 ds->ds_link = 0; /* link to null */ 36 ds->ds_link = 0; /* link to null */
44 ds->ds_data = bf->bf_buf_addr; 37 ds->ds_data = bf->bf_buf_addr;
45 38
46 /* XXX For RADAR? 39 /* virtual addr of the beginning of the buffer. */
47 * virtual addr of the beginning of the buffer. */
48 skb = bf->bf_mpdu; 40 skb = bf->bf_mpdu;
49 ASSERT(skb != NULL); 41 ASSERT(skb != NULL);
50 ds->ds_vdata = skb->data; 42 ds->ds_vdata = skb->data;
51 43
52 /* setup rx descriptors. The sc_rxbufsize here tells the harware 44 /* setup rx descriptors. The rx.bufsize here tells the harware
53 * how much data it can DMA to us and that we are prepared 45 * how much data it can DMA to us and that we are prepared
54 * to process */ 46 * to process */
55 ath9k_hw_setuprxdesc(ah, 47 ath9k_hw_setuprxdesc(ah, ds,
56 ds, 48 sc->rx.bufsize,
57 sc->sc_rxbufsize,
58 0); 49 0);
59 50
60 if (sc->sc_rxlink == NULL) 51 if (sc->rx.rxlink == NULL)
61 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 52 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
62 else 53 else
63 *sc->sc_rxlink = bf->bf_daddr; 54 *sc->rx.rxlink = bf->bf_daddr;
64 55
65 sc->sc_rxlink = &ds->ds_link; 56 sc->rx.rxlink = &ds->ds_link;
66 ath9k_hw_rxena(ah); 57 ath9k_hw_rxena(ah);
67} 58}
68 59
69/* Process received BAR frame */ 60static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
70
71static int ath_bar_rx(struct ath_softc *sc,
72 struct ath_node *an,
73 struct sk_buff *skb)
74{
75 struct ieee80211_bar *bar;
76 struct ath_arx_tid *rxtid;
77 struct sk_buff *tskb;
78 struct ath_recv_status *rx_status;
79 int tidno, index, cindex;
80 u16 seqno;
81
82 /* look at BAR contents */
83
84 bar = (struct ieee80211_bar *)skb->data;
85 tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M)
86 >> IEEE80211_BAR_CTL_TID_S;
87 seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT;
88
89 /* process BAR - indicate all pending RX frames till the BAR seqno */
90
91 rxtid = &an->an_aggr.rx.tid[tidno];
92
93 spin_lock_bh(&rxtid->tidlock);
94
95 /* get relative index */
96
97 index = ATH_BA_INDEX(rxtid->seq_next, seqno);
98
99 /* drop BAR if old sequence (index is too large) */
100
101 if ((index > rxtid->baw_size) &&
102 (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))))
103 /* discard frame, ieee layer may not treat frame as a dup */
104 goto unlock_and_free;
105
106 /* complete receive processing for all pending frames upto BAR seqno */
107
108 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
109 while ((rxtid->baw_head != rxtid->baw_tail) &&
110 (rxtid->baw_head != cindex)) {
111 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
112 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
113 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
114
115 if (tskb != NULL)
116 ath_rx_subframe(an, tskb, rx_status);
117
118 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
119 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
120 }
121
122 /* ... and indicate rest of the frames in-order */
123
124 while (rxtid->baw_head != rxtid->baw_tail &&
125 rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) {
126 tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf;
127 rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status;
128 rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL;
129
130 ath_rx_subframe(an, tskb, rx_status);
131
132 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
133 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
134 }
135
136unlock_and_free:
137 spin_unlock_bh(&rxtid->tidlock);
138 /* free bar itself */
139 dev_kfree_skb(skb);
140 return IEEE80211_FTYPE_CTL;
141}
142
143/* Function to handle a subframe of aggregation when HT is enabled */
144
145static int ath_ampdu_input(struct ath_softc *sc,
146 struct ath_node *an,
147 struct sk_buff *skb,
148 struct ath_recv_status *rx_status)
149{
150 struct ieee80211_hdr *hdr;
151 struct ath_arx_tid *rxtid;
152 struct ath_rxbuf *rxbuf;
153 u8 type, subtype;
154 u16 rxseq;
155 int tid = 0, index, cindex, rxdiff;
156 __le16 fc;
157 u8 *qc;
158
159 hdr = (struct ieee80211_hdr *)skb->data;
160 fc = hdr->frame_control;
161
162 /* collect stats of frames with non-zero version */
163
164 if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) {
165 dev_kfree_skb(skb);
166 return -1;
167 }
168
169 type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE;
170 subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE;
171
172 if (ieee80211_is_back_req(fc))
173 return ath_bar_rx(sc, an, skb);
174
175 /* special aggregate processing only for qos unicast data frames */
176
177 if (!ieee80211_is_data(fc) ||
178 !ieee80211_is_data_qos(fc) ||
179 is_multicast_ether_addr(hdr->addr1))
180 return ath_rx_subframe(an, skb, rx_status);
181
182 /* lookup rx tid state */
183
184 if (ieee80211_is_data_qos(fc)) {
185 qc = ieee80211_get_qos_ctl(hdr);
186 tid = qc[0] & 0xf;
187 }
188
189 if (sc->sc_ah->ah_opmode == ATH9K_M_STA) {
190 /* Drop the frame not belonging to me. */
191 if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) {
192 dev_kfree_skb(skb);
193 return -1;
194 }
195 }
196
197 rxtid = &an->an_aggr.rx.tid[tid];
198
199 spin_lock(&rxtid->tidlock);
200
201 rxdiff = (rxtid->baw_tail - rxtid->baw_head) &
202 (ATH_TID_MAX_BUFS - 1);
203
204 /*
205 * If the ADDBA exchange has not been completed by the source,
206 * process via legacy path (i.e. no reordering buffer is needed)
207 */
208 if (!rxtid->addba_exchangecomplete) {
209 spin_unlock(&rxtid->tidlock);
210 return ath_rx_subframe(an, skb, rx_status);
211 }
212
213 /* extract sequence number from recvd frame */
214
215 rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT;
216
217 if (rxtid->seq_reset) {
218 rxtid->seq_reset = 0;
219 rxtid->seq_next = rxseq;
220 }
221
222 index = ATH_BA_INDEX(rxtid->seq_next, rxseq);
223
224 /* drop frame if old sequence (index is too large) */
225
226 if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) {
227 /* discard frame, ieee layer may not treat frame as a dup */
228 spin_unlock(&rxtid->tidlock);
229 dev_kfree_skb(skb);
230 return IEEE80211_FTYPE_DATA;
231 }
232
233 /* sequence number is beyond block-ack window */
234
235 if (index >= rxtid->baw_size) {
236
237 /* complete receive processing for all pending frames */
238
239 while (index >= rxtid->baw_size) {
240
241 rxbuf = rxtid->rxbuf + rxtid->baw_head;
242
243 if (rxbuf->rx_wbuf != NULL) {
244 ath_rx_subframe(an, rxbuf->rx_wbuf,
245 &rxbuf->rx_status);
246 rxbuf->rx_wbuf = NULL;
247 }
248
249 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
250 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
251
252 index--;
253 }
254 }
255
256 /* add buffer to the recv ba window */
257
258 cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
259 rxbuf = rxtid->rxbuf + cindex;
260
261 if (rxbuf->rx_wbuf != NULL) {
262 spin_unlock(&rxtid->tidlock);
263 /* duplicate frame */
264 dev_kfree_skb(skb);
265 return IEEE80211_FTYPE_DATA;
266 }
267
268 rxbuf->rx_wbuf = skb;
269 rxbuf->rx_time = get_timestamp();
270 rxbuf->rx_status = *rx_status;
271
272 /* advance tail if sequence received is newer
273 * than any received so far */
274
275 if (index >= rxdiff) {
276 rxtid->baw_tail = cindex;
277 INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS);
278 }
279
280 /* indicate all in-order received frames */
281
282 while (rxtid->baw_head != rxtid->baw_tail) {
283 rxbuf = rxtid->rxbuf + rxtid->baw_head;
284 if (!rxbuf->rx_wbuf)
285 break;
286
287 ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status);
288 rxbuf->rx_wbuf = NULL;
289
290 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
291 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
292 }
293
294 /*
295 * start a timer to flush all received frames if there are pending
296 * receive frames
297 */
298 if (rxtid->baw_head != rxtid->baw_tail)
299 mod_timer(&rxtid->timer, ATH_RX_TIMEOUT);
300 else
301 del_timer_sync(&rxtid->timer);
302
303 spin_unlock(&rxtid->tidlock);
304 return IEEE80211_FTYPE_DATA;
305}
306
307/* Timer to flush all received sub-frames */
308
309static void ath_rx_timer(unsigned long data)
310{ 61{
311 struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data; 62 /* XXX block beacon interrupts */
312 struct ath_node *an = rxtid->an; 63 ath9k_hw_setantenna(sc->sc_ah, antenna);
313 struct ath_rxbuf *rxbuf; 64 sc->rx.defant = antenna;
314 int nosched; 65 sc->rx.rxotherant = 0;
315
316 spin_lock_bh(&rxtid->tidlock);
317 while (rxtid->baw_head != rxtid->baw_tail) {
318 rxbuf = rxtid->rxbuf + rxtid->baw_head;
319 if (!rxbuf->rx_wbuf) {
320 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
321 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
322 continue;
323 }
324
325 /*
326 * Stop if the next one is a very recent frame.
327 *
328 * Call get_timestamp in every iteration to protect against the
329 * case in which a new frame is received while we are executing
330 * this function. Using a timestamp obtained before entering
331 * the loop could lead to a very large time interval
332 * (a negative value typecast to unsigned), breaking the
333 * function's logic.
334 */
335 if ((get_timestamp() - rxbuf->rx_time) <
336 (ATH_RX_TIMEOUT * HZ / 1000))
337 break;
338
339 ath_rx_subframe(an, rxbuf->rx_wbuf,
340 &rxbuf->rx_status);
341 rxbuf->rx_wbuf = NULL;
342
343 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
344 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
345 }
346
347 /*
348 * start a timer to flush all received frames if there are pending
349 * receive frames
350 */
351 if (rxtid->baw_head != rxtid->baw_tail)
352 nosched = 0;
353 else
354 nosched = 1; /* no need to re-arm the timer again */
355
356 spin_unlock_bh(&rxtid->tidlock);
357} 66}
358 67
359/* Free all pending sub-frames in the re-ordering buffer */ 68/*
360 69 * Extend 15-bit time stamp from rx descriptor to
361static void ath_rx_flush_tid(struct ath_softc *sc, 70 * a full 64-bit TSF using the current h/w TSF.
362 struct ath_arx_tid *rxtid, int drop) 71*/
72static u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
363{ 73{
364 struct ath_rxbuf *rxbuf; 74 u64 tsf;
365 unsigned long flag;
366
367 spin_lock_irqsave(&rxtid->tidlock, flag);
368 while (rxtid->baw_head != rxtid->baw_tail) {
369 rxbuf = rxtid->rxbuf + rxtid->baw_head;
370 if (!rxbuf->rx_wbuf) {
371 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS);
372 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX);
373 continue;
374 }
375
376 if (drop)
377 dev_kfree_skb(rxbuf->rx_wbuf);
378 else
379 ath_rx_subframe(rxtid->an,
380 rxbuf->rx_wbuf,
381 &rxbuf->rx_status);
382
383 rxbuf->rx_wbuf = NULL;
384 75
385 INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); 76 tsf = ath9k_hw_gettsf64(sc->sc_ah);
386 INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); 77 if ((tsf & 0x7fff) < rstamp)
387 } 78 tsf -= 0x8000;
388 spin_unlock_irqrestore(&rxtid->tidlock, flag); 79 return (tsf & ~0x7fff) | rstamp;
389} 80}
390 81
391static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, 82static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len)
392 u32 len)
393{ 83{
394 struct sk_buff *skb; 84 struct sk_buff *skb;
395 u32 off; 85 u32 off;
@@ -414,67 +104,131 @@ static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
414 skb_reserve(skb, sc->sc_cachelsz - off); 104 skb_reserve(skb, sc->sc_cachelsz - off);
415 } else { 105 } else {
416 DPRINTF(sc, ATH_DBG_FATAL, 106 DPRINTF(sc, ATH_DBG_FATAL,
417 "%s: skbuff alloc of size %u failed\n", 107 "skbuff alloc of size %u failed\n", len);
418 __func__, len);
419 return NULL; 108 return NULL;
420 } 109 }
421 110
422 return skb; 111 return skb;
423} 112}
424 113
425static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb) 114/*
115 * For Decrypt or Demic errors, we only mark packet status here and always push
116 * up the frame up to let mac80211 handle the actual error case, be it no
117 * decryption key or real decryption error. This let us keep statistics there.
118 */
119static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds,
120 struct ieee80211_rx_status *rx_status, bool *decrypt_error,
121 struct ath_softc *sc)
426{ 122{
427 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; 123 struct ieee80211_hdr *hdr;
124 u8 ratecode;
125 __le16 fc;
428 126
429 ASSERT(bf != NULL); 127 hdr = (struct ieee80211_hdr *)skb->data;
128 fc = hdr->frame_control;
129 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
430 130
431 spin_lock_bh(&sc->sc_rxbuflock); 131 if (ds->ds_rxstat.rs_more) {
432 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
433 /* 132 /*
434 * This buffer is still held for hw acess. 133 * Frame spans multiple descriptors; this cannot happen yet
435 * Mark it as free to be re-queued it later. 134 * as we don't support jumbograms. If not in monitor mode,
135 * discard the frame. Enable this if you want to see
136 * error frames in Monitor mode.
436 */ 137 */
437 bf->bf_status |= ATH_BUFSTATUS_FREE; 138 if (sc->sc_ah->ah_opmode != NL80211_IFTYPE_MONITOR)
438 } else { 139 goto rx_next;
439 /* XXX: we probably never enter here, remove after 140 } else if (ds->ds_rxstat.rs_status != 0) {
440 * verification */ 141 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
441 list_add_tail(&bf->list, &sc->sc_rxbuf); 142 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
442 ath_rx_buf_link(sc, bf); 143 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY)
144 goto rx_next;
145
146 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
147 *decrypt_error = true;
148 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
149 if (ieee80211_is_ctl(fc))
150 /*
151 * Sometimes, we get invalid
152 * MIC failures on valid control frames.
153 * Remove these mic errors.
154 */
155 ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC;
156 else
157 rx_status->flag |= RX_FLAG_MMIC_ERROR;
158 }
159 /*
160 * Reject error frames with the exception of
161 * decryption and MIC failures. For monitor mode,
162 * we also ignore the CRC error.
163 */
164 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_MONITOR) {
165 if (ds->ds_rxstat.rs_status &
166 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
167 ATH9K_RXERR_CRC))
168 goto rx_next;
169 } else {
170 if (ds->ds_rxstat.rs_status &
171 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
172 goto rx_next;
173 }
174 }
443 } 175 }
444 spin_unlock_bh(&sc->sc_rxbuflock);
445}
446 176
447/* 177 ratecode = ds->ds_rxstat.rs_rate;
448 * The skb indicated to upper stack won't be returned to us. 178
449 * So we have to allocate a new one and queue it by ourselves. 179 if (ratecode & 0x80) {
450 */ 180 /* HT rate */
451static int ath_rx_indicate(struct ath_softc *sc, 181 rx_status->flag |= RX_FLAG_HT;
452 struct sk_buff *skb, 182 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040)
453 struct ath_recv_status *status, 183 rx_status->flag |= RX_FLAG_40MHZ;
454 u16 keyix) 184 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
455{ 185 rx_status->flag |= RX_FLAG_SHORT_GI;
456 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; 186 rx_status->rate_idx = ratecode & 0x7f;
457 struct sk_buff *nskb; 187 } else {
458 int type; 188 int i = 0, cur_band, n_rates;
459 189 struct ieee80211_hw *hw = sc->hw;
460 /* indicate frame to the stack, which will free the old skb. */
461 type = _ath_rx_indicate(sc, skb, status, keyix);
462
463 /* allocate a new skb and queue it to for H/W processing */
464 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
465 if (nskb != NULL) {
466 bf->bf_mpdu = nskb;
467 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data,
468 sc->sc_rxbufsize,
469 PCI_DMA_FROMDEVICE);
470 bf->bf_dmacontext = bf->bf_buf_addr;
471 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
472 190
473 /* queue the new wbuf to H/W */ 191 cur_band = hw->conf.channel->band;
474 ath_rx_requeue(sc, nskb); 192 n_rates = sc->sbands[cur_band].n_bitrates;
193
194 for (i = 0; i < n_rates; i++) {
195 if (sc->sbands[cur_band].bitrates[i].hw_value ==
196 ratecode) {
197 rx_status->rate_idx = i;
198 break;
199 }
200
201 if (sc->sbands[cur_band].bitrates[i].hw_value_short ==
202 ratecode) {
203 rx_status->rate_idx = i;
204 rx_status->flag |= RX_FLAG_SHORTPRE;
205 break;
206 }
207 }
475 } 208 }
476 209
477 return type; 210 rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
211 rx_status->band = sc->hw->conf.channel->band;
212 rx_status->freq = sc->hw->conf.channel->center_freq;
213 rx_status->noise = sc->sc_ani.sc_noise_floor;
214 rx_status->signal = rx_status->noise + ds->ds_rxstat.rs_rssi;
215 rx_status->antenna = ds->ds_rxstat.rs_antenna;
216
217 /* at 45 you will be able to use MCS 15 reliably. A more elaborate
218 * scheme can be used here but it requires tables of SNR/throughput for
219 * each possible mode used. */
220 rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 45;
221
222 /* rssi can be more than 45 though, anything above that
223 * should be considered at 100% */
224 if (rx_status->qual > 100)
225 rx_status->qual = 100;
226
227 rx_status->flag |= RX_FLAG_TSFT;
228
229 return 1;
230rx_next:
231 return 0;
478} 232}
479 233
480static void ath_opmode_init(struct ath_softc *sc) 234static void ath_opmode_init(struct ath_softc *sc)
@@ -498,11 +252,7 @@ static void ath_opmode_init(struct ath_softc *sc)
498 252
499 /* calculate and install multicast filter */ 253 /* calculate and install multicast filter */
500 mfilt[0] = mfilt[1] = ~0; 254 mfilt[0] = mfilt[1] = ~0;
501
502 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 255 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
503 DPRINTF(sc, ATH_DBG_CONFIG ,
504 "%s: RX filter 0x%x, MC filter %08x:%08x\n",
505 __func__, rfilt, mfilt[0], mfilt[1]);
506} 256}
507 257
508int ath_rx_init(struct ath_softc *sc, int nbufs) 258int ath_rx_init(struct ath_softc *sc, int nbufs)
@@ -512,38 +262,29 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
512 int error = 0; 262 int error = 0;
513 263
514 do { 264 do {
515 spin_lock_init(&sc->sc_rxflushlock); 265 spin_lock_init(&sc->rx.rxflushlock);
516 sc->sc_flags &= ~SC_OP_RXFLUSH; 266 sc->sc_flags &= ~SC_OP_RXFLUSH;
517 spin_lock_init(&sc->sc_rxbuflock); 267 spin_lock_init(&sc->rx.rxbuflock);
518 268
519 /* 269 sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
520 * Cisco's VPN software requires that drivers be able to
521 * receive encapsulated frames that are larger than the MTU.
522 * Since we can't be sure how large a frame we'll get, setup
523 * to handle the larges on possible.
524 */
525 sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
526 min(sc->sc_cachelsz, 270 min(sc->sc_cachelsz,
527 (u16)64)); 271 (u16)64));
528 272
529 DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n", 273 DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
530 __func__, sc->sc_cachelsz, sc->sc_rxbufsize); 274 sc->sc_cachelsz, sc->rx.bufsize);
531 275
532 /* Initialize rx descriptors */ 276 /* Initialize rx descriptors */
533 277
534 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, 278 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
535 "rx", nbufs, 1); 279 "rx", nbufs, 1);
536 if (error != 0) { 280 if (error != 0) {
537 DPRINTF(sc, ATH_DBG_FATAL, 281 DPRINTF(sc, ATH_DBG_FATAL,
538 "%s: failed to allocate rx descriptors: %d\n", 282 "failed to allocate rx descriptors: %d\n", error);
539 __func__, error);
540 break; 283 break;
541 } 284 }
542 285
543 /* Pre-allocate a wbuf for each rx buffer */ 286 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
544 287 skb = ath_rxbuf_alloc(sc, sc->rx.bufsize);
545 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
546 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
547 if (skb == NULL) { 288 if (skb == NULL) {
548 error = -ENOMEM; 289 error = -ENOMEM;
549 break; 290 break;
@@ -551,12 +292,20 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
551 292
552 bf->bf_mpdu = skb; 293 bf->bf_mpdu = skb;
553 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, 294 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
554 sc->sc_rxbufsize, 295 sc->rx.bufsize,
555 PCI_DMA_FROMDEVICE); 296 PCI_DMA_FROMDEVICE);
297 if (unlikely(pci_dma_mapping_error(sc->pdev,
298 bf->bf_buf_addr))) {
299 dev_kfree_skb_any(skb);
300 bf->bf_mpdu = NULL;
301 DPRINTF(sc, ATH_DBG_CONFIG,
302 "pci_dma_mapping_error() on RX init\n");
303 error = -ENOMEM;
304 break;
305 }
556 bf->bf_dmacontext = bf->bf_buf_addr; 306 bf->bf_dmacontext = bf->bf_buf_addr;
557 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
558 } 307 }
559 sc->sc_rxlink = NULL; 308 sc->rx.rxlink = NULL;
560 309
561 } while (0); 310 } while (0);
562 311
@@ -566,23 +315,19 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
566 return error; 315 return error;
567} 316}
568 317
569/* Reclaim all rx queue resources */
570
571void ath_rx_cleanup(struct ath_softc *sc) 318void ath_rx_cleanup(struct ath_softc *sc)
572{ 319{
573 struct sk_buff *skb; 320 struct sk_buff *skb;
574 struct ath_buf *bf; 321 struct ath_buf *bf;
575 322
576 list_for_each_entry(bf, &sc->sc_rxbuf, list) { 323 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
577 skb = bf->bf_mpdu; 324 skb = bf->bf_mpdu;
578 if (skb) 325 if (skb)
579 dev_kfree_skb(skb); 326 dev_kfree_skb(skb);
580 } 327 }
581 328
582 /* cleanup rx descriptors */ 329 if (sc->rx.rxdma.dd_desc_len != 0)
583 330 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
584 if (sc->sc_rxdma.dd_desc_len != 0)
585 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
586} 331}
587 332
588/* 333/*
@@ -615,201 +360,115 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
615 | ATH9K_RX_FILTER_MCAST; 360 | ATH9K_RX_FILTER_MCAST;
616 361
617 /* If not a STA, enable processing of Probe Requests */ 362 /* If not a STA, enable processing of Probe Requests */
618 if (sc->sc_ah->ah_opmode != ATH9K_M_STA) 363 if (sc->sc_ah->ah_opmode != NL80211_IFTYPE_STATION)
619 rfilt |= ATH9K_RX_FILTER_PROBEREQ; 364 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
620 365
621 /* Can't set HOSTAP into promiscous mode */ 366 /* Can't set HOSTAP into promiscous mode */
622 if (((sc->sc_ah->ah_opmode != ATH9K_M_HOSTAP) && 367 if (((sc->sc_ah->ah_opmode != NL80211_IFTYPE_AP) &&
623 (sc->rx_filter & FIF_PROMISC_IN_BSS)) || 368 (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
624 (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR)) { 369 (sc->sc_ah->ah_opmode == NL80211_IFTYPE_MONITOR)) {
625 rfilt |= ATH9K_RX_FILTER_PROM; 370 rfilt |= ATH9K_RX_FILTER_PROM;
626 /* ??? To prevent from sending ACK */ 371 /* ??? To prevent from sending ACK */
627 rfilt &= ~ATH9K_RX_FILTER_UCAST; 372 rfilt &= ~ATH9K_RX_FILTER_UCAST;
628 } 373 }
629 374
630 if (((sc->sc_ah->ah_opmode == ATH9K_M_STA) && 375 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION ||
631 (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)) || 376 sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC)
632 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS))
633 rfilt |= ATH9K_RX_FILTER_BEACON; 377 rfilt |= ATH9K_RX_FILTER_BEACON;
634 378
635 /* If in HOSTAP mode, want to enable reception of PSPOLL frames 379 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
636 & beacon frames */ 380 & beacon frames */
637 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) 381 if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP)
638 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); 382 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
383
639 return rfilt; 384 return rfilt;
640 385
641#undef RX_FILTER_PRESERVE 386#undef RX_FILTER_PRESERVE
642} 387}
643 388
644/* Enable the receive h/w following a reset. */
645
646int ath_startrecv(struct ath_softc *sc) 389int ath_startrecv(struct ath_softc *sc)
647{ 390{
648 struct ath_hal *ah = sc->sc_ah; 391 struct ath_hal *ah = sc->sc_ah;
649 struct ath_buf *bf, *tbf; 392 struct ath_buf *bf, *tbf;
650 393
651 spin_lock_bh(&sc->sc_rxbuflock); 394 spin_lock_bh(&sc->rx.rxbuflock);
652 if (list_empty(&sc->sc_rxbuf)) 395 if (list_empty(&sc->rx.rxbuf))
653 goto start_recv; 396 goto start_recv;
654 397
655 sc->sc_rxlink = NULL; 398 sc->rx.rxlink = NULL;
656 list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) { 399 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
657 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
658 /* restarting h/w, no need for holding descriptors */
659 bf->bf_status &= ~ATH_BUFSTATUS_STALE;
660 /*
661 * Upper layer may not be done with the frame yet so
662 * we can't just re-queue it to hardware. Remove it
663 * from h/w queue. It'll be re-queued when upper layer
664 * returns the frame and ath_rx_requeue_mpdu is called.
665 */
666 if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) {
667 list_del(&bf->list);
668 continue;
669 }
670 }
671 /* chain descriptors */
672 ath_rx_buf_link(sc, bf); 400 ath_rx_buf_link(sc, bf);
673 } 401 }
674 402
675 /* We could have deleted elements so the list may be empty now */ 403 /* We could have deleted elements so the list may be empty now */
676 if (list_empty(&sc->sc_rxbuf)) 404 if (list_empty(&sc->rx.rxbuf))
677 goto start_recv; 405 goto start_recv;
678 406
679 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); 407 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
680 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 408 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
681 ath9k_hw_rxena(ah); /* enable recv descriptors */ 409 ath9k_hw_rxena(ah);
682 410
683start_recv: 411start_recv:
684 spin_unlock_bh(&sc->sc_rxbuflock); 412 spin_unlock_bh(&sc->rx.rxbuflock);
685 ath_opmode_init(sc); /* set filters, etc. */ 413 ath_opmode_init(sc);
686 ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */ 414 ath9k_hw_startpcureceive(ah);
415
687 return 0; 416 return 0;
688} 417}
689 418
690/* Disable the receive h/w in preparation for a reset. */
691
692bool ath_stoprecv(struct ath_softc *sc) 419bool ath_stoprecv(struct ath_softc *sc)
693{ 420{
694 struct ath_hal *ah = sc->sc_ah; 421 struct ath_hal *ah = sc->sc_ah;
695 u64 tsf;
696 bool stopped; 422 bool stopped;
697 423
698 ath9k_hw_stoppcurecv(ah); /* disable PCU */ 424 ath9k_hw_stoppcurecv(ah);
699 ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */ 425 ath9k_hw_setrxfilter(ah, 0);
700 stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */ 426 stopped = ath9k_hw_stopdmarecv(ah);
701 mdelay(3); /* 3ms is long enough for 1 frame */ 427 mdelay(3); /* 3ms is long enough for 1 frame */
702 tsf = ath9k_hw_gettsf64(ah); 428 sc->rx.rxlink = NULL;
703 sc->sc_rxlink = NULL; /* just in case */ 429
704 return stopped; 430 return stopped;
705} 431}
706 432
707/* Flush receive queue */
708
709void ath_flushrecv(struct ath_softc *sc) 433void ath_flushrecv(struct ath_softc *sc)
710{ 434{
711 /* 435 spin_lock_bh(&sc->rx.rxflushlock);
712 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
713 * queue at the same time. Use a lock to serialize the access of rx
714 * queue.
715 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
716 * Instead, do not claim the spinlock but check for a flush in
717 * progress (see references to sc_rxflush)
718 */
719 spin_lock_bh(&sc->sc_rxflushlock);
720 sc->sc_flags |= SC_OP_RXFLUSH; 436 sc->sc_flags |= SC_OP_RXFLUSH;
721
722 ath_rx_tasklet(sc, 1); 437 ath_rx_tasklet(sc, 1);
723
724 sc->sc_flags &= ~SC_OP_RXFLUSH; 438 sc->sc_flags &= ~SC_OP_RXFLUSH;
725 spin_unlock_bh(&sc->sc_rxflushlock); 439 spin_unlock_bh(&sc->rx.rxflushlock);
726} 440}
727 441
728/* Process an individual frame */
729
730int ath_rx_input(struct ath_softc *sc,
731 struct ath_node *an,
732 int is_ampdu,
733 struct sk_buff *skb,
734 struct ath_recv_status *rx_status,
735 enum ATH_RX_TYPE *status)
736{
737 if (is_ampdu && (sc->sc_flags & SC_OP_RXAGGR)) {
738 *status = ATH_RX_CONSUMED;
739 return ath_ampdu_input(sc, an, skb, rx_status);
740 } else {
741 *status = ATH_RX_NON_CONSUMED;
742 return -1;
743 }
744}
745
746/* Process receive queue, as well as LED, etc. */
747
748int ath_rx_tasklet(struct ath_softc *sc, int flush) 442int ath_rx_tasklet(struct ath_softc *sc, int flush)
749{ 443{
750#define PA2DESC(_sc, _pa) \ 444#define PA2DESC(_sc, _pa) \
751 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 445 ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \
752 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 446 ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr)))
753 447
754 struct ath_buf *bf, *bf_held = NULL; 448 struct ath_buf *bf;
755 struct ath_desc *ds; 449 struct ath_desc *ds;
756 struct ieee80211_hdr *hdr; 450 struct sk_buff *skb = NULL, *requeue_skb;
757 struct sk_buff *skb = NULL; 451 struct ieee80211_rx_status rx_status;
758 struct ath_recv_status rx_status;
759 struct ath_hal *ah = sc->sc_ah; 452 struct ath_hal *ah = sc->sc_ah;
760 int type, rx_processed = 0; 453 struct ieee80211_hdr *hdr;
761 u32 phyerr; 454 int hdrlen, padsize, retval;
762 u8 chainreset = 0; 455 bool decrypt_error = false;
763 int retval; 456 u8 keyix;
764 __le16 fc; 457
458 spin_lock_bh(&sc->rx.rxbuflock);
765 459
766 do { 460 do {
767 /* If handling rx interrupt and flush is in progress => exit */ 461 /* If handling rx interrupt and flush is in progress => exit */
768 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 462 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
769 break; 463 break;
770 464
771 spin_lock_bh(&sc->sc_rxbuflock); 465 if (list_empty(&sc->rx.rxbuf)) {
772 if (list_empty(&sc->sc_rxbuf)) { 466 sc->rx.rxlink = NULL;
773 sc->sc_rxlink = NULL;
774 spin_unlock_bh(&sc->sc_rxbuflock);
775 break; 467 break;
776 } 468 }
777 469
778 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); 470 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
779
780 /*
781 * There is a race condition that BH gets scheduled after sw
782 * writes RxE and before hw re-load the last descriptor to get
783 * the newly chained one. Software must keep the last DONE
784 * descriptor as a holding descriptor - software does so by
785 * marking it with the STALE flag.
786 */
787 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
788 bf_held = bf;
789 if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) {
790 /*
791 * The holding descriptor is the last
792 * descriptor in queue. It's safe to
793 * remove the last holding descriptor
794 * in BH context.
795 */
796 list_del(&bf_held->list);
797 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
798 sc->sc_rxlink = NULL;
799
800 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
801 list_add_tail(&bf_held->list,
802 &sc->sc_rxbuf);
803 ath_rx_buf_link(sc, bf_held);
804 }
805 spin_unlock_bh(&sc->sc_rxbuflock);
806 break;
807 }
808 bf = list_entry(bf->list.next, struct ath_buf, list);
809 }
810
811 ds = bf->bf_desc; 471 ds = bf->bf_desc;
812 ++rx_processed;
813 472
814 /* 473 /*
815 * Must provide the virtual address of the current 474 * Must provide the virtual address of the current
@@ -822,8 +481,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
822 * on. All this is necessary because of our use of 481 * on. All this is necessary because of our use of
823 * a self-linked list to avoid rx overruns. 482 * a self-linked list to avoid rx overruns.
824 */ 483 */
825 retval = ath9k_hw_rxprocdesc(ah, 484 retval = ath9k_hw_rxprocdesc(ah, ds,
826 ds,
827 bf->bf_daddr, 485 bf->bf_daddr,
828 PA2DESC(sc, ds->ds_link), 486 PA2DESC(sc, ds->ds_link),
829 0); 487 0);
@@ -831,8 +489,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
831 struct ath_buf *tbf; 489 struct ath_buf *tbf;
832 struct ath_desc *tds; 490 struct ath_desc *tds;
833 491
834 if (list_is_last(&bf->list, &sc->sc_rxbuf)) { 492 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
835 spin_unlock_bh(&sc->sc_rxbuflock); 493 sc->rx.rxlink = NULL;
836 break; 494 break;
837 } 495 }
838 496
@@ -850,451 +508,127 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
850 */ 508 */
851 509
852 tds = tbf->bf_desc; 510 tds = tbf->bf_desc;
853 retval = ath9k_hw_rxprocdesc(ah, 511 retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr,
854 tds, tbf->bf_daddr, 512 PA2DESC(sc, tds->ds_link), 0);
855 PA2DESC(sc, tds->ds_link), 0);
856 if (retval == -EINPROGRESS) { 513 if (retval == -EINPROGRESS) {
857 spin_unlock_bh(&sc->sc_rxbuflock);
858 break; 514 break;
859 } 515 }
860 } 516 }
861 517
862 /* XXX: we do not support frames spanning
863 * multiple descriptors */
864 bf->bf_status |= ATH_BUFSTATUS_DONE;
865
866 skb = bf->bf_mpdu; 518 skb = bf->bf_mpdu;
867 if (skb == NULL) { /* XXX ??? can this happen */ 519 if (!skb)
868 spin_unlock_bh(&sc->sc_rxbuflock);
869 continue; 520 continue;
870 } 521
871 /* 522 /*
872 * Now we know it's a completed frame, we can indicate the 523 * Synchronize the DMA transfer with CPU before
873 * frame. Remove the previous holding descriptor and leave 524 * 1. accessing the frame
874 * this one in the queue as the new holding descriptor. 525 * 2. requeueing the same buffer to h/w
875 */ 526 */
876 if (bf_held) { 527 pci_dma_sync_single_for_cpu(sc->pdev, bf->bf_buf_addr,
877 list_del(&bf_held->list); 528 sc->rx.bufsize,
878 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; 529 PCI_DMA_FROMDEVICE);
879 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
880 list_add_tail(&bf_held->list, &sc->sc_rxbuf);
881 /* try to requeue this descriptor */
882 ath_rx_buf_link(sc, bf_held);
883 }
884 }
885 530
886 bf->bf_status |= ATH_BUFSTATUS_STALE;
887 bf_held = bf;
888 /* 531 /*
889 * Release the lock here in case ieee80211_input() return 532 * If we're asked to flush receive queue, directly
890 * the frame immediately by calling ath_rx_mpdu_requeue(). 533 * chain it back at the queue without processing it.
891 */ 534 */
892 spin_unlock_bh(&sc->sc_rxbuflock); 535 if (flush)
536 goto requeue;
893 537
894 if (flush) { 538 if (!ds->ds_rxstat.rs_datalen)
895 /* 539 goto requeue;
896 * If we're asked to flush receive queue, directly
897 * chain it back at the queue without processing it.
898 */
899 goto rx_next;
900 }
901 540
902 hdr = (struct ieee80211_hdr *)skb->data; 541 /* The status portion of the descriptor could get corrupted. */
903 fc = hdr->frame_control; 542 if (sc->rx.bufsize < ds->ds_rxstat.rs_datalen)
904 memset(&rx_status, 0, sizeof(struct ath_recv_status)); 543 goto requeue;
905 544
906 if (ds->ds_rxstat.rs_more) { 545 if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc))
907 /* 546 goto requeue;
908 * Frame spans multiple descriptors; this 547
909 * cannot happen yet as we don't support 548 /* Ensure we always have an skb to requeue once we are done
910 * jumbograms. If not in monitor mode, 549 * processing the current buffer's skb */
911 * discard the frame. 550 requeue_skb = ath_rxbuf_alloc(sc, sc->rx.bufsize);
912 */ 551
913#ifndef ERROR_FRAMES 552 /* If there is no memory we ignore the current RX'd frame,
914 /* 553 * tell hardware it can give us a new frame using the old
915 * Enable this if you want to see 554 * skb and put it at the tail of the sc->rx.rxbuf list for
916 * error frames in Monitor mode. 555 * processing. */
917 */ 556 if (!requeue_skb)
918 if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR) 557 goto requeue;
919 goto rx_next; 558
920#endif 559 /* Unmap the frame */
921 /* fall thru for monitor mode handling... */ 560 pci_unmap_single(sc->pdev, bf->bf_buf_addr,
922 } else if (ds->ds_rxstat.rs_status != 0) { 561 sc->rx.bufsize,
923 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) 562 PCI_DMA_FROMDEVICE);
924 rx_status.flags |= ATH_RX_FCS_ERROR;
925 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
926 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
927 goto rx_next;
928 }
929 563
930 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
931 /*
932 * Decrypt error. We only mark packet status
933 * here and always push up the frame up to let
934 * mac80211 handle the actual error case, be
935 * it no decryption key or real decryption
936 * error. This let us keep statistics there.
937 */
938 rx_status.flags |= ATH_RX_DECRYPT_ERROR;
939 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
940 /*
941 * Demic error. We only mark frame status here
942 * and always push up the frame up to let
943 * mac80211 handle the actual error case. This
944 * let us keep statistics there. Hardware may
945 * post a false-positive MIC error.
946 */
947 if (ieee80211_is_ctl(fc))
948 /*
949 * Sometimes, we get invalid
950 * MIC failures on valid control frames.
951 * Remove these mic errors.
952 */
953 ds->ds_rxstat.rs_status &=
954 ~ATH9K_RXERR_MIC;
955 else
956 rx_status.flags |= ATH_RX_MIC_ERROR;
957 }
958 /*
959 * Reject error frames with the exception of
960 * decryption and MIC failures. For monitor mode,
961 * we also ignore the CRC error.
962 */
963 if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) {
964 if (ds->ds_rxstat.rs_status &
965 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
966 ATH9K_RXERR_CRC))
967 goto rx_next;
968 } else {
969 if (ds->ds_rxstat.rs_status &
970 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
971 goto rx_next;
972 }
973 }
974 }
975 /*
976 * The status portion of the descriptor could get corrupted.
977 */
978 if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
979 goto rx_next;
980 /*
981 * Sync and unmap the frame. At this point we're
982 * committed to passing the sk_buff somewhere so
983 * clear buf_skb; this means a new sk_buff must be
984 * allocated when the rx descriptor is setup again
985 * to receive another frame.
986 */
987 skb_put(skb, ds->ds_rxstat.rs_datalen); 564 skb_put(skb, ds->ds_rxstat.rs_datalen);
988 skb->protocol = cpu_to_be16(ETH_P_CONTROL); 565 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
989 rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
990 rx_status.rateieee =
991 sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate;
992 rx_status.rateKbps =
993 sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps;
994 rx_status.ratecode = ds->ds_rxstat.rs_rate;
995 566
996 /* HT rate */ 567 /* see if any padding is done by the hw and remove it */
997 if (rx_status.ratecode & 0x80) { 568 hdr = (struct ieee80211_hdr *)skb->data;
998 /* TODO - add table to avoid division */ 569 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
999 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { 570
1000 rx_status.flags |= ATH_RX_40MHZ; 571 /* The MAC header is padded to have 32-bit boundary if the
1001 rx_status.rateKbps = 572 * packet payload is non-zero. The general calculation for
1002 (rx_status.rateKbps * 27) / 13; 573 * padsize would take into account odd header lengths:
1003 } 574 * padsize = (4 - hdrlen % 4) % 4; However, since only
1004 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) 575 * even-length headers are used, padding can only be 0 or 2
1005 rx_status.rateKbps = 576 * bytes and we can optimize this a bit. In addition, we must
1006 (rx_status.rateKbps * 10) / 9; 577 * not try to remove padding from short control frames that do
1007 else 578 * not have payload. */
1008 rx_status.flags |= ATH_RX_SHORT_GI; 579 padsize = hdrlen & 3;
580 if (padsize && hdrlen >= 24) {
581 memmove(skb->data + padsize, skb->data, hdrlen);
582 skb_pull(skb, padsize);
1009 } 583 }
1010 584
1011 /* sc_noise_floor is only available when the station 585 keyix = ds->ds_rxstat.rs_keyix;
1012 attaches to an AP, so we use a default value
1013 if we are not yet attached. */
1014 rx_status.abs_rssi =
1015 ds->ds_rxstat.rs_rssi + sc->sc_ani.sc_noise_floor;
1016
1017 pci_dma_sync_single_for_cpu(sc->pdev,
1018 bf->bf_buf_addr,
1019 sc->sc_rxbufsize,
1020 PCI_DMA_FROMDEVICE);
1021 pci_unmap_single(sc->pdev,
1022 bf->bf_buf_addr,
1023 sc->sc_rxbufsize,
1024 PCI_DMA_FROMDEVICE);
1025 586
1026 /* XXX: Ah! make me more readable, use a helper */ 587 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
1027 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { 588 rx_status.flag |= RX_FLAG_DECRYPTED;
1028 if (ds->ds_rxstat.rs_moreaggr == 0) { 589 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
1029 rx_status.rssictl[0] = 590 && !decrypt_error && skb->len >= hdrlen + 4) {
1030 ds->ds_rxstat.rs_rssi_ctl0; 591 keyix = skb->data[hdrlen + 3] >> 6;
1031 rx_status.rssictl[1] = 592
1032 ds->ds_rxstat.rs_rssi_ctl1; 593 if (test_bit(keyix, sc->sc_keymap))
1033 rx_status.rssictl[2] = 594 rx_status.flag |= RX_FLAG_DECRYPTED;
1034 ds->ds_rxstat.rs_rssi_ctl2;
1035 rx_status.rssi = ds->ds_rxstat.rs_rssi;
1036 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
1037 rx_status.rssiextn[0] =
1038 ds->ds_rxstat.rs_rssi_ext0;
1039 rx_status.rssiextn[1] =
1040 ds->ds_rxstat.rs_rssi_ext1;
1041 rx_status.rssiextn[2] =
1042 ds->ds_rxstat.rs_rssi_ext2;
1043 rx_status.flags |=
1044 ATH_RX_RSSI_EXTN_VALID;
1045 }
1046 rx_status.flags |= ATH_RX_RSSI_VALID |
1047 ATH_RX_CHAIN_RSSI_VALID;
1048 }
1049 } else {
1050 /*
1051 * Need to insert the "combined" rssi into the
1052 * status structure for upper layer processing
1053 */
1054 rx_status.rssi = ds->ds_rxstat.rs_rssi;
1055 rx_status.flags |= ATH_RX_RSSI_VALID;
1056 } 595 }
1057 596
1058 /* Pass frames up to the stack. */ 597 /* Send the frame to mac80211 */
598 __ieee80211_rx(sc->hw, skb, &rx_status);
1059 599
1060 type = ath_rx_indicate(sc, skb, 600 /* We will now give hardware our shiny new allocated skb */
1061 &rx_status, ds->ds_rxstat.rs_keyix); 601 bf->bf_mpdu = requeue_skb;
602 bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data,
603 sc->rx.bufsize,
604 PCI_DMA_FROMDEVICE);
605 if (unlikely(pci_dma_mapping_error(sc->pdev,
606 bf->bf_buf_addr))) {
607 dev_kfree_skb_any(requeue_skb);
608 bf->bf_mpdu = NULL;
609 DPRINTF(sc, ATH_DBG_CONFIG,
610 "pci_dma_mapping_error() on RX\n");
611 break;
612 }
613 bf->bf_dmacontext = bf->bf_buf_addr;
1062 614
1063 /* 615 /*
1064 * change the default rx antenna if rx diversity chooses the 616 * change the default rx antenna if rx diversity chooses the
1065 * other antenna 3 times in a row. 617 * other antenna 3 times in a row.
1066 */ 618 */
1067 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { 619 if (sc->rx.defant != ds->ds_rxstat.rs_antenna) {
1068 if (++sc->sc_rxotherant >= 3) 620 if (++sc->rx.rxotherant >= 3)
1069 ath_setdefantenna(sc, 621 ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna);
1070 ds->ds_rxstat.rs_antenna);
1071 } else { 622 } else {
1072 sc->sc_rxotherant = 0; 623 sc->rx.rxotherant = 0;
1073 } 624 }
625requeue:
626 list_move_tail(&bf->list, &sc->rx.rxbuf);
627 ath_rx_buf_link(sc, bf);
628 } while (1);
1074 629
1075#ifdef CONFIG_SLOW_ANT_DIV 630 spin_unlock_bh(&sc->rx.rxbuflock);
1076 if ((rx_status.flags & ATH_RX_RSSI_VALID) &&
1077 ieee80211_is_beacon(fc)) {
1078 ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat);
1079 }
1080#endif
1081 /*
1082 * For frames successfully indicated, the buffer will be
1083 * returned to us by upper layers by calling
1084 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
1085 * So we don't want to do it here in this loop.
1086 */
1087 continue;
1088
1089rx_next:
1090 bf->bf_status |= ATH_BUFSTATUS_FREE;
1091 } while (TRUE);
1092
1093 if (chainreset) {
1094 DPRINTF(sc, ATH_DBG_CONFIG,
1095 "%s: Reset rx chain mask. "
1096 "Do internal reset\n", __func__);
1097 ASSERT(flush == 0);
1098 ath_reset(sc, false);
1099 }
1100 631
1101 return 0; 632 return 0;
1102#undef PA2DESC 633#undef PA2DESC
1103} 634}
1104
1105/* Process ADDBA request in per-TID data structure */
1106
1107int ath_rx_aggr_start(struct ath_softc *sc,
1108 const u8 *addr,
1109 u16 tid,
1110 u16 *ssn)
1111{
1112 struct ath_arx_tid *rxtid;
1113 struct ath_node *an;
1114 struct ieee80211_hw *hw = sc->hw;
1115 struct ieee80211_supported_band *sband;
1116 u16 buffersize = 0;
1117
1118 spin_lock_bh(&sc->node_lock);
1119 an = ath_node_find(sc, (u8 *) addr);
1120 spin_unlock_bh(&sc->node_lock);
1121
1122 if (!an) {
1123 DPRINTF(sc, ATH_DBG_AGGR,
1124 "%s: Node not found to initialize RX aggregation\n",
1125 __func__);
1126 return -1;
1127 }
1128
1129 sband = hw->wiphy->bands[hw->conf.channel->band];
1130 buffersize = IEEE80211_MIN_AMPDU_BUF <<
1131 sband->ht_info.ampdu_factor; /* FIXME */
1132
1133 rxtid = &an->an_aggr.rx.tid[tid];
1134
1135 spin_lock_bh(&rxtid->tidlock);
1136 if (sc->sc_flags & SC_OP_RXAGGR) {
1137 /* Allow aggregation reception
1138 * Adjust rx BA window size. Peer might indicate a
1139 * zero buffer size for a _dont_care_ condition.
1140 */
1141 if (buffersize)
1142 rxtid->baw_size = min(buffersize, rxtid->baw_size);
1143
1144 /* set rx sequence number */
1145 rxtid->seq_next = *ssn;
1146
1147 /* Allocate the receive buffers for this TID */
1148 DPRINTF(sc, ATH_DBG_AGGR,
1149 "%s: Allcating rxbuffer for TID %d\n", __func__, tid);
1150
1151 if (rxtid->rxbuf == NULL) {
1152 /*
1153 * If the rxbuff is not NULL at this point, we *probably*
1154 * already allocated the buffer on a previous ADDBA,
1155 * and this is a subsequent ADDBA that got through.
1156 * Don't allocate, but use the value in the pointer,
1157 * we zero it out when we de-allocate.
1158 */
1159 rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS *
1160 sizeof(struct ath_rxbuf), GFP_ATOMIC);
1161 }
1162 if (rxtid->rxbuf == NULL) {
1163 DPRINTF(sc, ATH_DBG_AGGR,
1164 "%s: Unable to allocate RX buffer, "
1165 "refusing ADDBA\n", __func__);
1166 } else {
1167 /* Ensure the memory is zeroed out (all internal
1168 * pointers are null) */
1169 memset(rxtid->rxbuf, 0, ATH_TID_MAX_BUFS *
1170 sizeof(struct ath_rxbuf));
1171 DPRINTF(sc, ATH_DBG_AGGR,
1172 "%s: Allocated @%p\n", __func__, rxtid->rxbuf);
1173
1174 /* Allow aggregation reception */
1175 rxtid->addba_exchangecomplete = 1;
1176 }
1177 }
1178 spin_unlock_bh(&rxtid->tidlock);
1179
1180 return 0;
1181}
1182
1183/* Process DELBA */
1184
1185int ath_rx_aggr_stop(struct ath_softc *sc,
1186 const u8 *addr,
1187 u16 tid)
1188{
1189 struct ath_node *an;
1190
1191 spin_lock_bh(&sc->node_lock);
1192 an = ath_node_find(sc, (u8 *) addr);
1193 spin_unlock_bh(&sc->node_lock);
1194
1195 if (!an) {
1196 DPRINTF(sc, ATH_DBG_AGGR,
1197 "%s: RX aggr stop for non-existent node\n", __func__);
1198 return -1;
1199 }
1200
1201 ath_rx_aggr_teardown(sc, an, tid);
1202 return 0;
1203}
1204
1205/* Rx aggregation tear down */
1206
1207void ath_rx_aggr_teardown(struct ath_softc *sc,
1208 struct ath_node *an, u8 tid)
1209{
1210 struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid];
1211
1212 if (!rxtid->addba_exchangecomplete)
1213 return;
1214
1215 del_timer_sync(&rxtid->timer);
1216 ath_rx_flush_tid(sc, rxtid, 0);
1217 rxtid->addba_exchangecomplete = 0;
1218
1219 /* De-allocate the receive buffer array allocated when addba started */
1220
1221 if (rxtid->rxbuf) {
1222 DPRINTF(sc, ATH_DBG_AGGR,
1223 "%s: Deallocating TID %d rxbuff @%p\n",
1224 __func__, tid, rxtid->rxbuf);
1225 kfree(rxtid->rxbuf);
1226
1227 /* Set pointer to null to avoid reuse*/
1228 rxtid->rxbuf = NULL;
1229 }
1230}
1231
1232/* Initialize per-node receive state */
1233
1234void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an)
1235{
1236 if (sc->sc_flags & SC_OP_RXAGGR) {
1237 struct ath_arx_tid *rxtid;
1238 int tidno;
1239
1240 /* Init per tid rx state */
1241 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1242 tidno < WME_NUM_TID;
1243 tidno++, rxtid++) {
1244 rxtid->an = an;
1245 rxtid->seq_reset = 1;
1246 rxtid->seq_next = 0;
1247 rxtid->baw_size = WME_MAX_BA;
1248 rxtid->baw_head = rxtid->baw_tail = 0;
1249
1250 /*
1251 * Ensure the buffer pointer is null at this point
1252 * (needs to be allocated when addba is received)
1253 */
1254
1255 rxtid->rxbuf = NULL;
1256 setup_timer(&rxtid->timer, ath_rx_timer,
1257 (unsigned long)rxtid);
1258 spin_lock_init(&rxtid->tidlock);
1259
1260 /* ADDBA state */
1261 rxtid->addba_exchangecomplete = 0;
1262 }
1263 }
1264}
1265
1266void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1267{
1268 if (sc->sc_flags & SC_OP_RXAGGR) {
1269 struct ath_arx_tid *rxtid;
1270 int tidno, i;
1271
1272 /* Init per tid rx state */
1273 for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno];
1274 tidno < WME_NUM_TID;
1275 tidno++, rxtid++) {
1276
1277 if (!rxtid->addba_exchangecomplete)
1278 continue;
1279
1280 /* must cancel timer first */
1281 del_timer_sync(&rxtid->timer);
1282
1283 /* drop any pending sub-frames */
1284 ath_rx_flush_tid(sc, rxtid, 1);
1285
1286 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
1287 ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL);
1288
1289 rxtid->addba_exchangecomplete = 0;
1290 }
1291 }
1292
1293}
1294
1295/* Cleanup per-node receive state */
1296
1297void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an)
1298{
1299 ath_rx_node_cleanup(sc, an);
1300}
diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h
index 60617ae66209..9fedb4911bc3 100644
--- a/drivers/net/wireless/ath9k/reg.h
+++ b/drivers/net/wireless/ath9k/reg.h
@@ -671,7 +671,11 @@
671#define AR_RC_APB 0x00000002 671#define AR_RC_APB 0x00000002
672#define AR_RC_HOSTIF 0x00000100 672#define AR_RC_HOSTIF 0x00000100
673 673
674#define AR_WA 0x4004 674#define AR_WA 0x4004
675#define AR9285_WA_DEFAULT 0x004a05cb
676#define AR9280_WA_DEFAULT 0x0040073f
677#define AR_WA_DEFAULT 0x0000073f
678
675 679
676#define AR_PM_STATE 0x4008 680#define AR_PM_STATE 0x4008
677#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000 681#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000
@@ -738,6 +742,8 @@
738#define AR_SREV_REVISION_9280_21 2 742#define AR_SREV_REVISION_9280_21 2
739#define AR_SREV_VERSION_9285 0xC0 743#define AR_SREV_VERSION_9285 0xC0
740#define AR_SREV_REVISION_9285_10 0 744#define AR_SREV_REVISION_9285_10 0
745#define AR_SREV_REVISION_9285_11 1
746#define AR_SREV_REVISION_9285_12 2
741 747
742#define AR_SREV_9100_OR_LATER(_ah) \ 748#define AR_SREV_9100_OR_LATER(_ah) \
743 (((_ah)->ah_macVersion >= AR_SREV_VERSION_5416_PCIE)) 749 (((_ah)->ah_macVersion >= AR_SREV_VERSION_5416_PCIE))
@@ -768,6 +774,16 @@
768#define AR_SREV_9285(_ah) (((_ah)->ah_macVersion == AR_SREV_VERSION_9285)) 774#define AR_SREV_9285(_ah) (((_ah)->ah_macVersion == AR_SREV_VERSION_9285))
769#define AR_SREV_9285_10_OR_LATER(_ah) \ 775#define AR_SREV_9285_10_OR_LATER(_ah) \
770 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9285)) 776 (((_ah)->ah_macVersion >= AR_SREV_VERSION_9285))
777#define AR_SREV_9285_11(_ah) \
778 (AR_SREV_9280(ah) && ((_ah)->ah_macRev == AR_SREV_REVISION_9285_11))
779#define AR_SREV_9285_11_OR_LATER(_ah) \
780 (((_ah)->ah_macVersion > AR_SREV_VERSION_9285) || \
781 (AR_SREV_9285(ah) && ((_ah)->ah_macRev >= AR_SREV_REVISION_9285_11)))
782#define AR_SREV_9285_12(_ah) \
783 (AR_SREV_9280(ah) && ((_ah)->ah_macRev == AR_SREV_REVISION_9285_12))
784#define AR_SREV_9285_12_OR_LATER(_ah) \
785 (((_ah)->ah_macVersion > AR_SREV_VERSION_9285) || \
786 (AR_SREV_9285(ah) && ((_ah)->ah_macRev >= AR_SREV_REVISION_9285_12)))
771 787
772#define AR_RADIO_SREV_MAJOR 0xf0 788#define AR_RADIO_SREV_MAJOR 0xf0
773#define AR_RAD5133_SREV_MAJOR 0xc0 789#define AR_RAD5133_SREV_MAJOR 0xc0
@@ -1017,6 +1033,97 @@ enum {
1017#define AR_AN_SYNTH9_REFDIVA 0xf8000000 1033#define AR_AN_SYNTH9_REFDIVA 0xf8000000
1018#define AR_AN_SYNTH9_REFDIVA_S 27 1034#define AR_AN_SYNTH9_REFDIVA_S 27
1019 1035
1036#define AR9285_AN_RF2G1 0x7820
1037#define AR9285_AN_RF2G1_ENPACAL 0x00000800
1038#define AR9285_AN_RF2G1_ENPACAL_S 11
1039#define AR9285_AN_RF2G1_PDPADRV1 0x02000000
1040#define AR9285_AN_RF2G1_PDPADRV1_S 25
1041#define AR9285_AN_RF2G1_PDPADRV2 0x01000000
1042#define AR9285_AN_RF2G1_PDPADRV2_S 24
1043#define AR9285_AN_RF2G1_PDPAOUT 0x00800000
1044#define AR9285_AN_RF2G1_PDPAOUT_S 23
1045
1046
1047#define AR9285_AN_RF2G2 0x7824
1048#define AR9285_AN_RF2G2_OFFCAL 0x00001000
1049#define AR9285_AN_RF2G2_OFFCAL_S 12
1050
1051#define AR9285_AN_RF2G3 0x7828
1052#define AR9285_AN_RF2G3_PDVCCOMP 0x02000000
1053#define AR9285_AN_RF2G3_PDVCCOMP_S 25
1054#define AR9285_AN_RF2G3_OB_0 0x00E00000
1055#define AR9285_AN_RF2G3_OB_0_S 21
1056#define AR9285_AN_RF2G3_OB_1 0x001C0000
1057#define AR9285_AN_RF2G3_OB_1_S 18
1058#define AR9285_AN_RF2G3_OB_2 0x00038000
1059#define AR9285_AN_RF2G3_OB_2_S 15
1060#define AR9285_AN_RF2G3_OB_3 0x00007000
1061#define AR9285_AN_RF2G3_OB_3_S 12
1062#define AR9285_AN_RF2G3_OB_4 0x00000E00
1063#define AR9285_AN_RF2G3_OB_4_S 9
1064
1065#define AR9285_AN_RF2G3_DB1_0 0x000001C0
1066#define AR9285_AN_RF2G3_DB1_0_S 6
1067#define AR9285_AN_RF2G3_DB1_1 0x00000038
1068#define AR9285_AN_RF2G3_DB1_1_S 3
1069#define AR9285_AN_RF2G3_DB1_2 0x00000007
1070#define AR9285_AN_RF2G3_DB1_2_S 0
1071#define AR9285_AN_RF2G4 0x782C
1072#define AR9285_AN_RF2G4_DB1_3 0xE0000000
1073#define AR9285_AN_RF2G4_DB1_3_S 29
1074#define AR9285_AN_RF2G4_DB1_4 0x1C000000
1075#define AR9285_AN_RF2G4_DB1_4_S 26
1076
1077#define AR9285_AN_RF2G4_DB2_0 0x03800000
1078#define AR9285_AN_RF2G4_DB2_0_S 23
1079#define AR9285_AN_RF2G4_DB2_1 0x00700000
1080#define AR9285_AN_RF2G4_DB2_1_S 20
1081#define AR9285_AN_RF2G4_DB2_2 0x000E0000
1082#define AR9285_AN_RF2G4_DB2_2_S 17
1083#define AR9285_AN_RF2G4_DB2_3 0x0001C000
1084#define AR9285_AN_RF2G4_DB2_3_S 14
1085#define AR9285_AN_RF2G4_DB2_4 0x00003800
1086#define AR9285_AN_RF2G4_DB2_4_S 11
1087
1088#define AR9285_AN_RF2G6 0x7834
1089#define AR9285_AN_RF2G6_CCOMP 0x00007800
1090#define AR9285_AN_RF2G6_CCOMP_S 11
1091#define AR9285_AN_RF2G6_OFFS 0x03f00000
1092#define AR9285_AN_RF2G6_OFFS_S 20
1093
1094#define AR9285_AN_RF2G7 0x7838
1095#define AR9285_AN_RF2G7_PWDDB 0x00000002
1096#define AR9285_AN_RF2G7_PWDDB_S 1
1097#define AR9285_AN_RF2G7_PADRVGN2TAB0 0xE0000000
1098#define AR9285_AN_RF2G7_PADRVGN2TAB0_S 29
1099
1100#define AR9285_AN_RF2G8 0x783C
1101#define AR9285_AN_RF2G8_PADRVGN2TAB0 0x0001C000
1102#define AR9285_AN_RF2G8_PADRVGN2TAB0_S 14
1103
1104
1105#define AR9285_AN_RF2G9 0x7840
1106#define AR9285_AN_RXTXBB1 0x7854
1107#define AR9285_AN_RXTXBB1_PDRXTXBB1 0x00000020
1108#define AR9285_AN_RXTXBB1_PDRXTXBB1_S 5
1109#define AR9285_AN_RXTXBB1_PDV2I 0x00000080
1110#define AR9285_AN_RXTXBB1_PDV2I_S 7
1111#define AR9285_AN_RXTXBB1_PDDACIF 0x00000100
1112#define AR9285_AN_RXTXBB1_PDDACIF_S 8
1113#define AR9285_AN_RXTXBB1_SPARE9 0x00000001
1114#define AR9285_AN_RXTXBB1_SPARE9_S 0
1115
1116#define AR9285_AN_TOP2 0x7868
1117
1118#define AR9285_AN_TOP3 0x786c
1119#define AR9285_AN_TOP3_XPABIAS_LVL 0x0000000C
1120#define AR9285_AN_TOP3_XPABIAS_LVL_S 2
1121#define AR9285_AN_TOP3_PWDDAC 0x00800000
1122#define AR9285_AN_TOP3_PWDDAC_S 23
1123
1124#define AR9285_AN_TOP4 0x7870
1125#define AR9285_AN_TOP4_DEFAULT 0x10142c00
1126
1020#define AR_STA_ID0 0x8000 1127#define AR_STA_ID0 0x8000
1021#define AR_STA_ID1 0x8004 1128#define AR_STA_ID1 0x8004
1022#define AR_STA_ID1_SADH_MASK 0x0000FFFF 1129#define AR_STA_ID1_SADH_MASK 0x0000FFFF
diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath9k/regd.c
index 62e28887ccd3..64043e99facf 100644
--- a/drivers/net/wireless/ath9k/regd.c
+++ b/drivers/net/wireless/ath9k/regd.c
@@ -42,7 +42,7 @@ ath9k_regd_sort(void *a, u32 n, u32 size, ath_hal_cmp_t *cmp)
42 u8 *u = t - size; 42 u8 *u = t - size;
43 if (cmp(u, t) <= 0) 43 if (cmp(u, t) <= 0)
44 break; 44 break;
45 swap(u, t, size); 45 swap_array(u, t, size);
46 } 46 }
47} 47}
48 48
@@ -78,8 +78,7 @@ static bool ath9k_regd_is_eeprom_valid(struct ath_hal *ah)
78 return true; 78 return true;
79 } 79 }
80 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 80 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
81 "%s: invalid regulatory domain/country code 0x%x\n", 81 "invalid regulatory domain/country code 0x%x\n", rd);
82 __func__, rd);
83 return false; 82 return false;
84} 83}
85 84
@@ -107,13 +106,12 @@ static bool ath9k_regd_is_ccode_valid(struct ath_hal *ah,
107 return true; 106 return true;
108 107
109 rd = ath9k_regd_get_eepromRD(ah); 108 rd = ath9k_regd_get_eepromRD(ah);
110 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: EEPROM regdomain 0x%x\n", 109 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "EEPROM regdomain 0x%x\n", rd);
111 __func__, rd);
112 110
113 if (rd & COUNTRY_ERD_FLAG) { 111 if (rd & COUNTRY_ERD_FLAG) {
114 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 112 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
115 "%s: EEPROM setting is country code %u\n", 113 "EEPROM setting is country code %u\n",
116 __func__, rd & ~COUNTRY_ERD_FLAG); 114 rd & ~COUNTRY_ERD_FLAG);
117 return cc == (rd & ~COUNTRY_ERD_FLAG); 115 return cc == (rd & ~COUNTRY_ERD_FLAG);
118 } 116 }
119 117
@@ -290,8 +288,7 @@ ath9k_regd_get_wmode_regdomain(struct ath_hal *ah, int regDmn,
290 } 288 }
291 if (!found) { 289 if (!found) {
292 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 290 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
293 "%s: Failed to find reg domain pair %u\n", 291 "Failed to find reg domain pair %u\n", regDmn);
294 __func__, regDmn);
295 return false; 292 return false;
296 } 293 }
297 if (!(channelFlag & CHANNEL_2GHZ)) { 294 if (!(channelFlag & CHANNEL_2GHZ)) {
@@ -307,8 +304,7 @@ ath9k_regd_get_wmode_regdomain(struct ath_hal *ah, int regDmn,
307 found = ath9k_regd_is_valid_reg_domain(regDmn, rd); 304 found = ath9k_regd_is_valid_reg_domain(regDmn, rd);
308 if (!found) { 305 if (!found) {
309 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 306 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
310 "%s: Failed to find unitary reg domain %u\n", 307 "Failed to find unitary reg domain %u\n", regDmn);
311 __func__, regDmn);
312 return false; 308 return false;
313 } else { 309 } else {
314 rd->pscan &= regPair->pscanMask; 310 rd->pscan &= regPair->pscanMask;
@@ -430,30 +426,27 @@ ath9k_regd_add_channel(struct ath_hal *ah,
430 426
431 if (!(c_lo <= c && c <= c_hi)) { 427 if (!(c_lo <= c && c <= c_hi)) {
432 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 428 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
433 "%s: c %u out of range [%u..%u]\n", 429 "c %u out of range [%u..%u]\n",
434 __func__, c, c_lo, c_hi); 430 c, c_lo, c_hi);
435 return false; 431 return false;
436 } 432 }
437 if ((fband->channelBW == CHANNEL_HALF_BW) && 433 if ((fband->channelBW == CHANNEL_HALF_BW) &&
438 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_HALFRATE)) { 434 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_HALFRATE)) {
439 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 435 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
440 "%s: Skipping %u half rate channel\n", 436 "Skipping %u half rate channel\n", c);
441 __func__, c);
442 return false; 437 return false;
443 } 438 }
444 439
445 if ((fband->channelBW == CHANNEL_QUARTER_BW) && 440 if ((fband->channelBW == CHANNEL_QUARTER_BW) &&
446 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_QUARTERRATE)) { 441 !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_QUARTERRATE)) {
447 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 442 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
448 "%s: Skipping %u quarter rate channel\n", 443 "Skipping %u quarter rate channel\n", c);
449 __func__, c);
450 return false; 444 return false;
451 } 445 }
452 446
453 if (((c + fband->channelSep) / 2) > (maxChan + HALF_MAXCHANBW)) { 447 if (((c + fband->channelSep) / 2) > (maxChan + HALF_MAXCHANBW)) {
454 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 448 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
455 "%s: c %u > maxChan %u\n", 449 "c %u > maxChan %u\n", c, maxChan);
456 __func__, c, maxChan);
457 return false; 450 return false;
458 } 451 }
459 452
@@ -463,7 +456,7 @@ ath9k_regd_add_channel(struct ath_hal *ah,
463 return false; 456 return false;
464 } 457 }
465 458
466 if ((rd->flags & NO_HOSTAP) && (ah->ah_opmode == ATH9K_M_HOSTAP)) { 459 if ((rd->flags & NO_HOSTAP) && (ah->ah_opmode == NL80211_IFTYPE_AP)) {
467 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 460 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
468 "Skipping HOSTAP channel\n"); 461 "Skipping HOSTAP channel\n");
469 return false; 462 return false;
@@ -606,8 +599,7 @@ static bool ath9k_regd_japan_check(struct ath_hal *ah,
606 } 599 }
607 600
608 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 601 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
609 "%s: Skipping %d freq band\n", 602 "Skipping %d freq band\n", j_bandcheck[i].freqbandbit);
610 __func__, j_bandcheck[i].freqbandbit);
611 603
612 return skipband; 604 return skipband;
613} 605}
@@ -632,20 +624,19 @@ ath9k_regd_init_channels(struct ath_hal *ah,
632 unsigned long *modes_avail; 624 unsigned long *modes_avail;
633 DECLARE_BITMAP(modes_allowed, ATH9K_MODE_MAX); 625 DECLARE_BITMAP(modes_allowed, ATH9K_MODE_MAX);
634 626
635 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: cc %u %s %s\n", 627 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "cc %u %s %s\n", cc,
636 __func__, cc,
637 enableOutdoor ? "Enable outdoor" : "", 628 enableOutdoor ? "Enable outdoor" : "",
638 enableExtendedChannels ? "Enable ecm" : ""); 629 enableExtendedChannels ? "Enable ecm" : "");
639 630
640 if (!ath9k_regd_is_ccode_valid(ah, cc)) { 631 if (!ath9k_regd_is_ccode_valid(ah, cc)) {
641 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 632 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
642 "%s: invalid country code %d\n", __func__, cc); 633 "Invalid country code %d\n", cc);
643 return false; 634 return false;
644 } 635 }
645 636
646 if (!ath9k_regd_is_eeprom_valid(ah)) { 637 if (!ath9k_regd_is_eeprom_valid(ah)) {
647 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 638 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
648 "%s: invalid EEPROM contents\n", __func__); 639 "Invalid EEPROM contents\n");
649 return false; 640 return false;
650 } 641 }
651 642
@@ -693,9 +684,9 @@ ath9k_regd_init_channels(struct ath_hal *ah,
693 ~CHANNEL_2GHZ, 684 ~CHANNEL_2GHZ,
694 &rd5GHz)) { 685 &rd5GHz)) {
695 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 686 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
696 "%s: couldn't find unitary " 687 "Couldn't find unitary "
697 "5GHz reg domain for country %u\n", 688 "5GHz reg domain for country %u\n",
698 __func__, ah->ah_countryCode); 689 ah->ah_countryCode);
699 return false; 690 return false;
700 } 691 }
701 if (!ath9k_regd_get_wmode_regdomain(ah, 692 if (!ath9k_regd_get_wmode_regdomain(ah,
@@ -703,9 +694,9 @@ ath9k_regd_init_channels(struct ath_hal *ah,
703 CHANNEL_2GHZ, 694 CHANNEL_2GHZ,
704 &rd2GHz)) { 695 &rd2GHz)) {
705 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 696 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
706 "%s: couldn't find unitary 2GHz " 697 "Couldn't find unitary 2GHz "
707 "reg domain for country %u\n", 698 "reg domain for country %u\n",
708 __func__, ah->ah_countryCode); 699 ah->ah_countryCode);
709 return false; 700 return false;
710 } 701 }
711 702
@@ -717,9 +708,9 @@ ath9k_regd_init_channels(struct ath_hal *ah,
717 ~CHANNEL_2GHZ, 708 ~CHANNEL_2GHZ,
718 &rd5GHz)) { 709 &rd5GHz)) {
719 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 710 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
720 "%s: couldn't find unitary 5GHz " 711 "Couldn't find unitary 5GHz "
721 "reg domain for country %u\n", 712 "reg domain for country %u\n",
722 __func__, ah->ah_countryCode); 713 ah->ah_countryCode);
723 return false; 714 return false;
724 } 715 }
725 } 716 }
@@ -749,15 +740,14 @@ ath9k_regd_init_channels(struct ath_hal *ah,
749 740
750 if (!test_bit(cm->mode, modes_avail)) { 741 if (!test_bit(cm->mode, modes_avail)) {
751 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 742 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
752 "%s: !avail mode %d flags 0x%x\n", 743 "!avail mode %d flags 0x%x\n",
753 __func__, cm->mode, cm->flags); 744 cm->mode, cm->flags);
754 continue; 745 continue;
755 } 746 }
756 if (!ath9k_get_channel_edges(ah, cm->flags, &c_lo, &c_hi)) { 747 if (!ath9k_get_channel_edges(ah, cm->flags, &c_lo, &c_hi)) {
757 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 748 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
758 "%s: channels 0x%x not supported " 749 "channels 0x%x not supported "
759 "by hardware\n", 750 "by hardware\n", cm->flags);
760 __func__, cm->flags);
761 continue; 751 continue;
762 } 752 }
763 753
@@ -788,8 +778,7 @@ ath9k_regd_init_channels(struct ath_hal *ah,
788 break; 778 break;
789 default: 779 default:
790 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 780 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
791 "%s: Unknown HAL mode 0x%x\n", __func__, 781 "Unknown HAL mode 0x%x\n", cm->mode);
792 cm->mode);
793 continue; 782 continue;
794 } 783 }
795 784
@@ -841,9 +830,8 @@ ath9k_regd_init_channels(struct ath_hal *ah,
841 if (next >= maxchans) { 830 if (next >= maxchans) {
842 DPRINTF(ah->ah_sc, 831 DPRINTF(ah->ah_sc,
843 ATH_DBG_REGULATORY, 832 ATH_DBG_REGULATORY,
844 "%s: too many channels " 833 "too many channels "
845 "for channel table\n", 834 "for channel table\n");
846 __func__);
847 goto done; 835 goto done;
848 } 836 }
849 if (ath9k_regd_add_channel(ah, 837 if (ath9k_regd_add_channel(ah,
@@ -869,9 +857,8 @@ done:
869 857
870 if (next > ARRAY_SIZE(ah->ah_channels)) { 858 if (next > ARRAY_SIZE(ah->ah_channels)) {
871 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 859 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
872 "%s: too many channels %u; truncating to %u\n", 860 "too many channels %u; truncating to %u\n",
873 __func__, next, 861 next, (int) ARRAY_SIZE(ah->ah_channels));
874 (int) ARRAY_SIZE(ah->ah_channels));
875 next = ARRAY_SIZE(ah->ah_channels); 862 next = ARRAY_SIZE(ah->ah_channels);
876 } 863 }
877#ifdef ATH_NF_PER_CHAN 864#ifdef ATH_NF_PER_CHAN
@@ -919,7 +906,7 @@ ath9k_regd_check_channel(struct ath_hal *ah,
919 int n, lim; 906 int n, lim;
920 907
921 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 908 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
922 "%s: channel %u/0x%x (0x%x) requested\n", __func__, 909 "channel %u/0x%x (0x%x) requested\n",
923 c->channel, c->channelFlags, flags); 910 c->channel, c->channelFlags, flags);
924 911
925 cc = ah->ah_curchan; 912 cc = ah->ah_curchan;
@@ -950,15 +937,15 @@ ath9k_regd_check_channel(struct ath_hal *ah,
950 d = flags - (cc->channelFlags & CHAN_FLAGS); 937 d = flags - (cc->channelFlags & CHAN_FLAGS);
951 } 938 }
952 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 939 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY,
953 "%s: channel %u/0x%x d %d\n", __func__, 940 "channel %u/0x%x d %d\n",
954 cc->channel, cc->channelFlags, d); 941 cc->channel, cc->channelFlags, d);
955 if (d > 0) { 942 if (d > 0) {
956 base = cc + 1; 943 base = cc + 1;
957 lim--; 944 lim--;
958 } 945 }
959 } 946 }
960 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: no match for %u/0x%x\n", 947 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "no match for %u/0x%x\n",
961 __func__, c->channel, c->channelFlags); 948 c->channel, c->channelFlags);
962 return NULL; 949 return NULL;
963} 950}
964 951
diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath9k/regd.h
index 0ecd344fbd98..512d990aa7ea 100644
--- a/drivers/net/wireless/ath9k/regd.h
+++ b/drivers/net/wireless/ath9k/regd.h
@@ -125,7 +125,7 @@
125 125
126#define CHAN_FLAGS (CHANNEL_ALL|CHANNEL_HALF|CHANNEL_QUARTER) 126#define CHAN_FLAGS (CHANNEL_ALL|CHANNEL_HALF|CHANNEL_QUARTER)
127 127
128#define swap(_a, _b, _size) { \ 128#define swap_array(_a, _b, _size) { \
129 u8 *s = _b; \ 129 u8 *s = _b; \
130 int i = _size; \ 130 int i = _size; \
131 do { \ 131 do { \
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index 3a4757942b3f..3bfc3b90f256 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -14,10 +14,6 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17/*
18 * Implementation of transmit path.
19 */
20
21#include "core.h" 17#include "core.h"
22 18
23#define BITS_PER_BYTE 8 19#define BITS_PER_BYTE 8
@@ -65,11 +61,12 @@ static u32 bits_per_symbol[][2] = {
65 * NB: must be called with txq lock held 61 * NB: must be called with txq lock held
66 */ 62 */
67 63
68static void ath_tx_txqaddbuf(struct ath_softc *sc, 64static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
69 struct ath_txq *txq, struct list_head *head) 65 struct list_head *head)
70{ 66{
71 struct ath_hal *ah = sc->sc_ah; 67 struct ath_hal *ah = sc->sc_ah;
72 struct ath_buf *bf; 68 struct ath_buf *bf;
69
73 /* 70 /*
74 * Insert the frame on the outbound list and 71 * Insert the frame on the outbound list and
75 * pass it on to the hardware. 72 * pass it on to the hardware.
@@ -86,18 +83,16 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc,
86 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); 83 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
87 84
88 DPRINTF(sc, ATH_DBG_QUEUE, 85 DPRINTF(sc, ATH_DBG_QUEUE,
89 "%s: txq depth = %d\n", __func__, txq->axq_depth); 86 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
90 87
91 if (txq->axq_link == NULL) { 88 if (txq->axq_link == NULL) {
92 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 89 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
93 DPRINTF(sc, ATH_DBG_XMIT, 90 DPRINTF(sc, ATH_DBG_XMIT,
94 "%s: TXDP[%u] = %llx (%p)\n", 91 "TXDP[%u] = %llx (%p)\n",
95 __func__, txq->axq_qnum, 92 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
96 ito64(bf->bf_daddr), bf->bf_desc);
97 } else { 93 } else {
98 *txq->axq_link = bf->bf_daddr; 94 *txq->axq_link = bf->bf_daddr;
99 DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n", 95 DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
100 __func__,
101 txq->axq_qnum, txq->axq_link, 96 txq->axq_qnum, txq->axq_link,
102 ito64(bf->bf_daddr), bf->bf_desc); 97 ito64(bf->bf_daddr), bf->bf_desc);
103 } 98 }
@@ -105,46 +100,94 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc,
105 ath9k_hw_txstart(ah, txq->axq_qnum); 100 ath9k_hw_txstart(ah, txq->axq_qnum);
106} 101}
107 102
108/* Get transmit rate index using rate in Kbps */ 103static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
109 104 struct ath_xmit_status *tx_status)
110static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate)
111{ 105{
112 int i; 106 struct ieee80211_hw *hw = sc->hw;
113 int ndx = 0; 107 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
108 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
109 int hdrlen, padsize;
114 110
115 for (i = 0; i < rt->rateCount; i++) { 111 DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
116 if (rt->info[i].rateKbps == rate) { 112
117 ndx = i; 113 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
118 break; 114 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
119 } 115 kfree(tx_info_priv);
116 tx_info->rate_driver_data[0] = NULL;
117 }
118
119 if (tx_status->flags & ATH_TX_BAR) {
120 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
121 tx_status->flags &= ~ATH_TX_BAR;
122 }
123
124 if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) {
125 /* Frame was ACKed */
126 tx_info->flags |= IEEE80211_TX_STAT_ACK;
127 }
128
129 tx_info->status.rates[0].count = tx_status->retries;
130 if (tx_info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
131 /* Change idx from internal table index to MCS index */
132 int idx = tx_info->status.rates[0].idx;
133 struct ath_rate_table *rate_table = sc->cur_rate_table;
134 if (idx >= 0 && idx < rate_table->rate_cnt)
135 tx_info->status.rates[0].idx =
136 rate_table->info[idx].ratecode & 0x7f;
137 }
138
139 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
140 padsize = hdrlen & 3;
141 if (padsize && hdrlen >= 24) {
142 /*
143 * Remove MAC header padding before giving the frame back to
144 * mac80211.
145 */
146 memmove(skb->data + padsize, skb->data, hdrlen);
147 skb_pull(skb, padsize);
120 } 148 }
121 149
122 return ndx; 150 ieee80211_tx_status(hw, skb);
123} 151}
124 152
125/* Check if it's okay to send out aggregates */ 153/* Check if it's okay to send out aggregates */
126 154
127static int ath_aggr_query(struct ath_softc *sc, 155static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
128 struct ath_node *an, u8 tidno)
129{ 156{
130 struct ath_atx_tid *tid; 157 struct ath_atx_tid *tid;
131 tid = ATH_AN_2_TID(an, tidno); 158 tid = ATH_AN_2_TID(an, tidno);
132 159
133 if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress) 160 if (tid->state & AGGR_ADDBA_COMPLETE ||
161 tid->state & AGGR_ADDBA_PROGRESS)
134 return 1; 162 return 1;
135 else 163 else
136 return 0; 164 return 0;
137} 165}
138 166
139static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr) 167static void ath_get_beaconconfig(struct ath_softc *sc, int if_id,
168 struct ath_beacon_config *conf)
169{
170 struct ieee80211_hw *hw = sc->hw;
171
172 /* fill in beacon config data */
173
174 conf->beacon_interval = hw->conf.beacon_int;
175 conf->listen_interval = 100;
176 conf->dtim_count = 1;
177 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
178}
179
180/* Calculate Atheros packet type from IEEE80211 packet header */
181
182static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
140{ 183{
184 struct ieee80211_hdr *hdr;
141 enum ath9k_pkt_type htype; 185 enum ath9k_pkt_type htype;
142 __le16 fc; 186 __le16 fc;
143 187
188 hdr = (struct ieee80211_hdr *)skb->data;
144 fc = hdr->frame_control; 189 fc = hdr->frame_control;
145 190
146 /* Calculate Atheros packet type from IEEE80211 packet header */
147
148 if (ieee80211_is_beacon(fc)) 191 if (ieee80211_is_beacon(fc))
149 htype = ATH9K_PKT_TYPE_BEACON; 192 htype = ATH9K_PKT_TYPE_BEACON;
150 else if (ieee80211_is_probe_resp(fc)) 193 else if (ieee80211_is_probe_resp(fc))
@@ -159,232 +202,123 @@ static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr)
159 return htype; 202 return htype;
160} 203}
161 204
162static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl) 205static bool is_pae(struct sk_buff *skb)
163{ 206{
164 struct ieee80211_hdr *hdr; 207 struct ieee80211_hdr *hdr;
165 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
166 struct ath_tx_info_priv *tx_info_priv;
167 __le16 fc; 208 __le16 fc;
168 209
169 hdr = (struct ieee80211_hdr *)skb->data; 210 hdr = (struct ieee80211_hdr *)skb->data;
170 fc = hdr->frame_control; 211 fc = hdr->frame_control;
171 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
172 212
173 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) { 213 if (ieee80211_is_data(fc)) {
174 txctl->use_minrate = 1;
175 txctl->min_rate = tx_info_priv->min_rate;
176 } else if (ieee80211_is_data(fc)) {
177 if (ieee80211_is_nullfunc(fc) || 214 if (ieee80211_is_nullfunc(fc) ||
178 /* Port Access Entity (IEEE 802.1X) */ 215 /* Port Access Entity (IEEE 802.1X) */
179 (skb->protocol == cpu_to_be16(0x888E))) { 216 (skb->protocol == cpu_to_be16(ETH_P_PAE))) {
180 txctl->use_minrate = 1; 217 return true;
181 txctl->min_rate = tx_info_priv->min_rate;
182 } 218 }
183 if (is_multicast_ether_addr(hdr->addr1))
184 txctl->mcast_rate = tx_info_priv->min_rate;
185 } 219 }
186 220
221 return false;
187} 222}
188 223
189/* This function will setup additional txctl information, mostly rate stuff */ 224static int get_hw_crypto_keytype(struct sk_buff *skb)
190/* FIXME: seqno, ps */
191static int ath_tx_prepare(struct ath_softc *sc,
192 struct sk_buff *skb,
193 struct ath_tx_control *txctl)
194{ 225{
195 struct ieee80211_hw *hw = sc->hw;
196 struct ieee80211_hdr *hdr;
197 struct ath_rc_series *rcs;
198 struct ath_txq *txq = NULL;
199 const struct ath9k_rate_table *rt;
200 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 226 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
201 struct ath_tx_info_priv *tx_info_priv;
202 int hdrlen;
203 u8 rix, antenna;
204 __le16 fc;
205 u8 *qc;
206
207 txctl->dev = sc;
208 hdr = (struct ieee80211_hdr *)skb->data;
209 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
210 fc = hdr->frame_control;
211
212 rt = sc->sc_currates;
213 BUG_ON(!rt);
214
215 /* Fill misc fields */
216
217 spin_lock_bh(&sc->node_lock);
218 txctl->an = ath_node_get(sc, hdr->addr1);
219 /* create a temp node, if the node is not there already */
220 if (!txctl->an)
221 txctl->an = ath_node_attach(sc, hdr->addr1, 0);
222 spin_unlock_bh(&sc->node_lock);
223
224 if (ieee80211_is_data_qos(fc)) {
225 qc = ieee80211_get_qos_ctl(hdr);
226 txctl->tidno = qc[0] & 0xf;
227 }
228
229 txctl->if_id = 0;
230 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
231 txctl->txpower = MAX_RATE_POWER; /* FIXME */
232
233 /* Fill Key related fields */
234
235 txctl->keytype = ATH9K_KEY_TYPE_CLEAR;
236 txctl->keyix = ATH9K_TXKEYIX_INVALID;
237 227
238 if (tx_info->control.hw_key) { 228 if (tx_info->control.hw_key) {
239 txctl->keyix = tx_info->control.hw_key->hw_key_idx;
240 txctl->frmlen += tx_info->control.hw_key->icv_len;
241
242 if (tx_info->control.hw_key->alg == ALG_WEP) 229 if (tx_info->control.hw_key->alg == ALG_WEP)
243 txctl->keytype = ATH9K_KEY_TYPE_WEP; 230 return ATH9K_KEY_TYPE_WEP;
244 else if (tx_info->control.hw_key->alg == ALG_TKIP) 231 else if (tx_info->control.hw_key->alg == ALG_TKIP)
245 txctl->keytype = ATH9K_KEY_TYPE_TKIP; 232 return ATH9K_KEY_TYPE_TKIP;
246 else if (tx_info->control.hw_key->alg == ALG_CCMP) 233 else if (tx_info->control.hw_key->alg == ALG_CCMP)
247 txctl->keytype = ATH9K_KEY_TYPE_AES; 234 return ATH9K_KEY_TYPE_AES;
248 }
249
250 /* Fill packet type */
251
252 txctl->atype = get_hal_packet_type(hdr);
253
254 /* Fill qnum */
255
256 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) {
257 txctl->qnum = 0;
258 txq = sc->sc_cabq;
259 } else {
260 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
261 txq = &sc->sc_txq[txctl->qnum];
262 }
263 spin_lock_bh(&txq->axq_lock);
264
265 /* Try to avoid running out of descriptors */
266 if (txq->axq_depth >= (ATH_TXBUF - 20) &&
267 !(txctl->flags & ATH9K_TXDESC_CAB)) {
268 DPRINTF(sc, ATH_DBG_FATAL,
269 "%s: TX queue: %d is full, depth: %d\n",
270 __func__,
271 txctl->qnum,
272 txq->axq_depth);
273 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
274 txq->stopped = 1;
275 spin_unlock_bh(&txq->axq_lock);
276 return -1;
277 } 235 }
278 236
279 spin_unlock_bh(&txq->axq_lock); 237 return ATH9K_KEY_TYPE_CLEAR;
280 238}
281 /* Fill rate */
282
283 fill_min_rates(skb, txctl);
284 239
285 /* Fill flags */ 240/* Called only when tx aggregation is enabled and HT is supported */
286 241
287 txctl->flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ 242static void assign_aggr_tid_seqno(struct sk_buff *skb,
243 struct ath_buf *bf)
244{
245 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
246 struct ieee80211_hdr *hdr;
247 struct ath_node *an;
248 struct ath_atx_tid *tid;
249 __le16 fc;
250 u8 *qc;
288 251
289 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 252 if (!tx_info->control.sta)
290 txctl->flags |= ATH9K_TXDESC_NOACK; 253 return;
291 if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS)
292 txctl->flags |= ATH9K_TXDESC_RTSENA;
293 254
294 /* 255 an = (struct ath_node *)tx_info->control.sta->drv_priv;
295 * Setup for rate calculations. 256 hdr = (struct ieee80211_hdr *)skb->data;
296 */ 257 fc = hdr->frame_control;
297 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
298 rcs = tx_info_priv->rcs;
299 258
300 if (ieee80211_is_data(fc) && !txctl->use_minrate) { 259 /* Get tidno */
301 260
302 /* Enable HT only for DATA frames and not for EAPOL */ 261 if (ieee80211_is_data_qos(fc)) {
303 txctl->ht = (hw->conf.ht_conf.ht_supported && 262 qc = ieee80211_get_qos_ctl(hdr);
304 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)); 263 bf->bf_tidno = qc[0] & 0xf;
264 }
305 265
306 if (is_multicast_ether_addr(hdr->addr1)) { 266 /* Get seqno */
307 rcs[0].rix = (u8)
308 ath_tx_findindex(rt, txctl->mcast_rate);
309 267
310 /* 268 if (ieee80211_is_data(fc) && !is_pae(skb)) {
311 * mcast packets are not re-tried.
312 */
313 rcs[0].tries = 1;
314 }
315 /* For HT capable stations, we save tidno for later use. 269 /* For HT capable stations, we save tidno for later use.
316 * We also override seqno set by upper layer with the one 270 * We also override seqno set by upper layer with the one
317 * in tx aggregation state. 271 * in tx aggregation state.
318 * 272 *
319 * First, the fragmentation stat is determined.
320 * If fragmentation is on, the sequence number is 273 * If fragmentation is on, the sequence number is
321 * not overridden, since it has been 274 * not overridden, since it has been
322 * incremented by the fragmentation routine. 275 * incremented by the fragmentation routine.
276 *
277 * FIXME: check if the fragmentation threshold exceeds
278 * IEEE80211 max.
323 */ 279 */
324 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) && 280 tid = ATH_AN_2_TID(an, bf->bf_tidno);
325 txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) { 281 hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
326 struct ath_atx_tid *tid; 282 IEEE80211_SEQ_SEQ_SHIFT);
283 bf->bf_seqno = tid->seq_next;
284 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
285 }
286}
327 287
328 tid = ATH_AN_2_TID(txctl->an, txctl->tidno); 288static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
289 struct ath_txq *txq)
290{
291 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
292 int flags = 0;
329 293
330 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << 294 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
331 IEEE80211_SEQ_SEQ_SHIFT); 295 flags |= ATH9K_TXDESC_INTREQ;
332 txctl->seqno = tid->seq_next;
333 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
334 }
335 } else {
336 /* for management and control frames,
337 * or for NULL and EAPOL frames */
338 if (txctl->min_rate)
339 rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate);
340 else
341 rcs[0].rix = 0;
342 rcs[0].tries = ATH_MGT_TXMAXTRY;
343 }
344 rix = rcs[0].rix;
345 296
346 if (ieee80211_has_morefrags(fc) || 297 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
347 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) { 298 flags |= ATH9K_TXDESC_NOACK;
348 /* 299 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
349 ** Force hardware to use computed duration for next 300 flags |= ATH9K_TXDESC_RTSENA;
350 ** fragment by disabling multi-rate retry, which
351 ** updates duration based on the multi-rate
352 ** duration table.
353 */
354 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
355 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
356 /* reset tries but keep rate index */
357 rcs[0].tries = ATH_TXMAXTRY;
358 }
359 301
360 /* 302 return flags;
361 * Determine if a tx interrupt should be generated for 303}
362 * this descriptor. We take a tx interrupt to reap 304
363 * descriptors when the h/w hits an EOL condition or 305static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
364 * when the descriptor is specifically marked to generate 306{
365 * an interrupt. We periodically mark descriptors in this 307 struct ath_buf *bf = NULL;
366 * way to insure timely replenishing of the supply needed
367 * for sending frames. Defering interrupts reduces system
368 * load and potentially allows more concurrent work to be
369 * done but if done to aggressively can cause senders to
370 * backup.
371 *
372 * NB: use >= to deal with sc_txintrperiod changing
373 * dynamically through sysctl.
374 */
375 spin_lock_bh(&txq->axq_lock);
376 if ((++txq->axq_intrcnt >= sc->sc_txintrperiod)) {
377 txctl->flags |= ATH9K_TXDESC_INTREQ;
378 txq->axq_intrcnt = 0;
379 }
380 spin_unlock_bh(&txq->axq_lock);
381 308
382 if (is_multicast_ether_addr(hdr->addr1)) { 309 spin_lock_bh(&sc->tx.txbuflock);
383 antenna = sc->sc_mcastantenna + 1; 310
384 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1; 311 if (unlikely(list_empty(&sc->tx.txbuf))) {
312 spin_unlock_bh(&sc->tx.txbuflock);
313 return NULL;
385 } 314 }
386 315
387 return 0; 316 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
317 list_del(&bf->list);
318
319 spin_unlock_bh(&sc->tx.txbuflock);
320
321 return bf;
388} 322}
389 323
390/* To complete a chain of buffers associated a frame */ 324/* To complete a chain of buffers associated a frame */
@@ -396,6 +330,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc,
396{ 330{
397 struct sk_buff *skb = bf->bf_mpdu; 331 struct sk_buff *skb = bf->bf_mpdu;
398 struct ath_xmit_status tx_status; 332 struct ath_xmit_status tx_status;
333 unsigned long flags;
399 334
400 /* 335 /*
401 * Set retry information. 336 * Set retry information.
@@ -414,20 +349,21 @@ static void ath_tx_complete_buf(struct ath_softc *sc,
414 if (bf_isxretried(bf)) 349 if (bf_isxretried(bf))
415 tx_status.flags |= ATH_TX_XRETRY; 350 tx_status.flags |= ATH_TX_XRETRY;
416 } 351 }
352
417 /* Unmap this frame */ 353 /* Unmap this frame */
418 pci_unmap_single(sc->pdev, 354 pci_unmap_single(sc->pdev,
419 bf->bf_dmacontext, 355 bf->bf_dmacontext,
420 skb->len, 356 skb->len,
421 PCI_DMA_TODEVICE); 357 PCI_DMA_TODEVICE);
422 /* complete this frame */ 358 /* complete this frame */
423 ath_tx_complete(sc, skb, &tx_status, bf->bf_node); 359 ath_tx_complete(sc, skb, &tx_status);
424 360
425 /* 361 /*
426 * Return the list of ath_buf of this mpdu to free queue 362 * Return the list of ath_buf of this mpdu to free queue
427 */ 363 */
428 spin_lock_bh(&sc->sc_txbuflock); 364 spin_lock_irqsave(&sc->tx.txbuflock, flags);
429 list_splice_tail_init(bf_q, &sc->sc_txbuf); 365 list_splice_tail_init(bf_q, &sc->tx.txbuf);
430 spin_unlock_bh(&sc->sc_txbuflock); 366 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
431} 367}
432 368
433/* 369/*
@@ -468,7 +404,7 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
468 404
469static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 405static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
470{ 406{
471 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; 407 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
472 408
473 spin_lock_bh(&txq->axq_lock); 409 spin_lock_bh(&txq->axq_lock);
474 410
@@ -481,7 +417,7 @@ static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
481 417
482void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 418void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
483{ 419{
484 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; 420 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
485 421
486 ASSERT(tid->paused > 0); 422 ASSERT(tid->paused > 0);
487 spin_lock_bh(&txq->axq_lock); 423 spin_lock_bh(&txq->axq_lock);
@@ -505,11 +441,9 @@ unlock:
505 441
506/* Compute the number of bad frames */ 442/* Compute the number of bad frames */
507 443
508static int ath_tx_num_badfrms(struct ath_softc *sc, 444static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
509 struct ath_buf *bf, int txok) 445 int txok)
510{ 446{
511 struct ath_node *an = bf->bf_node;
512 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
513 struct ath_buf *bf_last = bf->bf_lastbf; 447 struct ath_buf *bf_last = bf->bf_lastbf;
514 struct ath_desc *ds = bf_last->bf_desc; 448 struct ath_desc *ds = bf_last->bf_desc;
515 u16 seq_st = 0; 449 u16 seq_st = 0;
@@ -518,7 +452,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc,
518 int nbad = 0; 452 int nbad = 0;
519 int isaggr = 0; 453 int isaggr = 0;
520 454
521 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) 455 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
522 return 0; 456 return 0;
523 457
524 isaggr = bf_isaggr(bf); 458 isaggr = bf_isaggr(bf);
@@ -553,8 +487,8 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
553 487
554/* Update block ack window */ 488/* Update block ack window */
555 489
556static void ath_tx_update_baw(struct ath_softc *sc, 490static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
557 struct ath_atx_tid *tid, int seqno) 491 int seqno)
558{ 492{
559 int index, cindex; 493 int index, cindex;
560 494
@@ -577,34 +511,23 @@ static void ath_tx_update_baw(struct ath_softc *sc,
577 * width - 0 for 20 MHz, 1 for 40 MHz 511 * width - 0 for 20 MHz, 1 for 40 MHz
578 * half_gi - to use 4us v/s 3.6 us for symbol time 512 * half_gi - to use 4us v/s 3.6 us for symbol time
579 */ 513 */
580 514static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
581static u32 ath_pkt_duration(struct ath_softc *sc, 515 int width, int half_gi, bool shortPreamble)
582 u8 rix,
583 struct ath_buf *bf,
584 int width,
585 int half_gi,
586 bool shortPreamble)
587{ 516{
588 const struct ath9k_rate_table *rt = sc->sc_currates; 517 struct ath_rate_table *rate_table = sc->cur_rate_table;
589 u32 nbits, nsymbits, duration, nsymbols; 518 u32 nbits, nsymbits, duration, nsymbols;
590 u8 rc; 519 u8 rc;
591 int streams, pktlen; 520 int streams, pktlen;
592 521
593 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen; 522 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
594 rc = rt->info[rix].rateCode; 523 rc = rate_table->info[rix].ratecode;
595 524
596 /* 525 /* for legacy rates, use old function to compute packet duration */
597 * for legacy rates, use old function to compute packet duration
598 */
599 if (!IS_HT_RATE(rc)) 526 if (!IS_HT_RATE(rc))
600 return ath9k_hw_computetxtime(sc->sc_ah, 527 return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
601 rt, 528 rix, shortPreamble);
602 pktlen, 529
603 rix, 530 /* find number of symbols: PLCP + data */
604 shortPreamble);
605 /*
606 * find number of symbols: PLCP + data
607 */
608 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 531 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
609 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; 532 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
610 nsymbols = (nbits + nsymbits - 1) / nsymbits; 533 nsymbols = (nbits + nsymbits - 1) / nsymbits;
@@ -614,11 +537,10 @@ static u32 ath_pkt_duration(struct ath_softc *sc,
614 else 537 else
615 duration = SYMBOL_TIME_HALFGI(nsymbols); 538 duration = SYMBOL_TIME_HALFGI(nsymbols);
616 539
617 /* 540 /* addup duration for legacy/ht training and signal fields */
618 * addup duration for legacy/ht training and signal fields
619 */
620 streams = HT_RC_2_STREAMS(rc); 541 streams = HT_RC_2_STREAMS(rc);
621 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 542 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
543
622 return duration; 544 return duration;
623} 545}
624 546
@@ -627,207 +549,127 @@ static u32 ath_pkt_duration(struct ath_softc *sc,
627static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) 549static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
628{ 550{
629 struct ath_hal *ah = sc->sc_ah; 551 struct ath_hal *ah = sc->sc_ah;
630 const struct ath9k_rate_table *rt; 552 struct ath_rate_table *rt;
631 struct ath_desc *ds = bf->bf_desc; 553 struct ath_desc *ds = bf->bf_desc;
632 struct ath_desc *lastds = bf->bf_lastbf->bf_desc; 554 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
633 struct ath9k_11n_rate_series series[4]; 555 struct ath9k_11n_rate_series series[4];
634 int i, flags, rtsctsena = 0, dynamic_mimops = 0; 556 struct sk_buff *skb;
557 struct ieee80211_tx_info *tx_info;
558 struct ieee80211_tx_rate *rates;
559 struct ieee80211_hdr *hdr;
560 int i, flags, rtsctsena = 0;
635 u32 ctsduration = 0; 561 u32 ctsduration = 0;
636 u8 rix = 0, cix, ctsrate = 0; 562 u8 rix = 0, cix, ctsrate = 0;
637 u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit; 563 __le16 fc;
638 struct ath_node *an = (struct ath_node *) bf->bf_node;
639 564
640 /* 565 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
641 * get the cix for the lowest valid rix. 566
642 */ 567 skb = (struct sk_buff *)bf->bf_mpdu;
643 rt = sc->sc_currates; 568 hdr = (struct ieee80211_hdr *)skb->data;
644 for (i = 4; i--;) { 569 fc = hdr->frame_control;
645 if (bf->bf_rcs[i].tries) { 570 tx_info = IEEE80211_SKB_CB(skb);
646 rix = bf->bf_rcs[i].rix; 571 rates = tx_info->control.rates;
572
573 if (ieee80211_has_morefrags(fc) ||
574 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
575 rates[1].count = rates[2].count = rates[3].count = 0;
576 rates[1].idx = rates[2].idx = rates[3].idx = 0;
577 rates[0].count = ATH_TXMAXTRY;
578 }
579
580 /* get the cix for the lowest valid rix */
581 rt = sc->cur_rate_table;
582 for (i = 3; i >= 0; i--) {
583 if (rates[i].count && (rates[i].idx >= 0)) {
584 rix = rates[i].idx;
647 break; 585 break;
648 } 586 }
649 } 587 }
588
650 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)); 589 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
651 cix = rt->info[rix].controlRate; 590 cix = rt->info[rix].ctrl_rate;
652 591
653 /* 592 /*
654 * If 802.11g protection is enabled, determine whether 593 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
655 * to use RTS/CTS or just CTS. Note that this is only 594 * just CTS. Note that this is only done for OFDM/HT unicast frames.
656 * done for OFDM/HT unicast frames.
657 */ 595 */
658 if (sc->sc_protmode != PROT_M_NONE && 596 if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK)
659 (rt->info[rix].phy == PHY_OFDM || 597 && (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
660 rt->info[rix].phy == PHY_HT) && 598 WLAN_RC_PHY_HT(rt->info[rix].phy))) {
661 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
662 if (sc->sc_protmode == PROT_M_RTSCTS) 599 if (sc->sc_protmode == PROT_M_RTSCTS)
663 flags = ATH9K_TXDESC_RTSENA; 600 flags = ATH9K_TXDESC_RTSENA;
664 else if (sc->sc_protmode == PROT_M_CTSONLY) 601 else if (sc->sc_protmode == PROT_M_CTSONLY)
665 flags = ATH9K_TXDESC_CTSENA; 602 flags = ATH9K_TXDESC_CTSENA;
666 603
667 cix = rt->info[sc->sc_protrix].controlRate; 604 cix = rt->info[sc->sc_protrix].ctrl_rate;
668 rtsctsena = 1; 605 rtsctsena = 1;
669 } 606 }
670 607
671 /* For 11n, the default behavior is to enable RTS for 608 /* For 11n, the default behavior is to enable RTS for hw retried frames.
672 * hw retried frames. We enable the global flag here and 609 * We enable the global flag here and let rate series flags determine
673 * let rate series flags determine which rates will actually 610 * which rates will actually use RTS.
674 * use RTS.
675 */ 611 */
676 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) { 612 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
677 BUG_ON(!an); 613 /* 802.11g protection not needed, use our default behavior */
678 /*
679 * 802.11g protection not needed, use our default behavior
680 */
681 if (!rtsctsena) 614 if (!rtsctsena)
682 flags = ATH9K_TXDESC_RTSENA; 615 flags = ATH9K_TXDESC_RTSENA;
683 /*
684 * For dynamic MIMO PS, RTS needs to precede the first aggregate
685 * and the second aggregate should have any protection at all.
686 */
687 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
688 if (!bf_isaggrburst(bf)) {
689 flags = ATH9K_TXDESC_RTSENA;
690 dynamic_mimops = 1;
691 } else {
692 flags = 0;
693 }
694 }
695 } 616 }
696 617
697 /* 618 /* Set protection if aggregate protection on */
698 * Set protection if aggregate protection on
699 */
700 if (sc->sc_config.ath_aggr_prot && 619 if (sc->sc_config.ath_aggr_prot &&
701 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) { 620 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
702 flags = ATH9K_TXDESC_RTSENA; 621 flags = ATH9K_TXDESC_RTSENA;
703 cix = rt->info[sc->sc_protrix].controlRate; 622 cix = rt->info[sc->sc_protrix].ctrl_rate;
704 rtsctsena = 1; 623 rtsctsena = 1;
705 } 624 }
706 625
707 /* 626 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
708 * For AR5416 - RTS cannot be followed by a frame larger than 8K. 627 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
709 */
710 if (bf_isaggr(bf) && (bf->bf_al > aggr_limit_with_rts)) {
711 /*
712 * Ensure that in the case of SM Dynamic power save
713 * while we are bursting the second aggregate the
714 * RTS is cleared.
715 */
716 flags &= ~(ATH9K_TXDESC_RTSENA); 628 flags &= ~(ATH9K_TXDESC_RTSENA);
717 }
718 629
719 /* 630 /*
720 * CTS transmit rate is derived from the transmit rate 631 * CTS transmit rate is derived from the transmit rate by looking in the
721 * by looking in the h/w rate table. We must also factor 632 * h/w rate table. We must also factor in whether or not a short
722 * in whether or not a short preamble is to be used. 633 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
723 */ 634 */
724 /* NB: cix is set above where RTS/CTS is enabled */ 635 ctsrate = rt->info[cix].ratecode |
725 BUG_ON(cix == 0xff); 636 (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
726 ctsrate = rt->info[cix].rateCode |
727 (bf_isshpreamble(bf) ? rt->info[cix].shortPreamble : 0);
728
729 /*
730 * Setup HAL rate series
731 */
732 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
733 637
734 for (i = 0; i < 4; i++) { 638 for (i = 0; i < 4; i++) {
735 if (!bf->bf_rcs[i].tries) 639 if (!rates[i].count || (rates[i].idx < 0))
736 continue; 640 continue;
737 641
738 rix = bf->bf_rcs[i].rix; 642 rix = rates[i].idx;
739 643
740 series[i].Rate = rt->info[rix].rateCode | 644 series[i].Rate = rt->info[rix].ratecode |
741 (bf_isshpreamble(bf) ? rt->info[rix].shortPreamble : 0); 645 (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0);
742 646
743 series[i].Tries = bf->bf_rcs[i].tries; 647 series[i].Tries = rates[i].count;
744 648
745 series[i].RateFlags = ( 649 series[i].RateFlags = (
746 (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ? 650 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ?
747 ATH9K_RATESERIES_RTS_CTS : 0) | 651 ATH9K_RATESERIES_RTS_CTS : 0) |
748 ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ? 652 ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ?
749 ATH9K_RATESERIES_2040 : 0) | 653 ATH9K_RATESERIES_2040 : 0) |
750 ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ? 654 ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ?
751 ATH9K_RATESERIES_HALFGI : 0); 655 ATH9K_RATESERIES_HALFGI : 0);
752 656
753 series[i].PktDuration = ath_pkt_duration( 657 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
754 sc, rix, bf, 658 (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0,
755 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0, 659 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
756 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG), 660 bf_isshpreamble(bf));
757 bf_isshpreamble(bf));
758 661
759 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) && 662 series[i].ChSel = sc->sc_tx_chainmask;
760 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
761 /*
762 * When sending to an HT node that has enabled static
763 * SM/MIMO power save, send at single stream rates but
764 * use maximum allowed transmit chains per user,
765 * hardware, regulatory, or country limits for
766 * better range.
767 */
768 series[i].ChSel = sc->sc_tx_chainmask;
769 } else {
770 if (bf_isht(bf))
771 series[i].ChSel =
772 ath_chainmask_sel_logic(sc, an);
773 else
774 series[i].ChSel = sc->sc_tx_chainmask;
775 }
776 663
777 if (rtsctsena) 664 if (rtsctsena)
778 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 665 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
779
780 /*
781 * Set RTS for all rates if node is in dynamic powersave
782 * mode and we are using dual stream rates.
783 */
784 if (dynamic_mimops && (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG))
785 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
786 }
787
788 /*
789 * For non-HT devices, calculate RTS/CTS duration in software
790 * and disable multi-rate retry.
791 */
792 if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) {
793 /*
794 * Compute the transmit duration based on the frame
795 * size and the size of an ACK frame. We call into the
796 * HAL to do the computation since it depends on the
797 * characteristics of the actual PHY being used.
798 *
799 * NB: CTS is assumed the same size as an ACK so we can
800 * use the precalculated ACK durations.
801 */
802 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
803 ctsduration += bf_isshpreamble(bf) ?
804 rt->info[cix].spAckDuration :
805 rt->info[cix].lpAckDuration;
806 }
807
808 ctsduration += series[0].PktDuration;
809
810 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
811 ctsduration += bf_isshpreamble(bf) ?
812 rt->info[rix].spAckDuration :
813 rt->info[rix].lpAckDuration;
814 }
815
816 /*
817 * Disable multi-rate retry when using RTS/CTS by clearing
818 * series 1, 2 and 3.
819 */
820 memset(&series[1], 0, sizeof(struct ath9k_11n_rate_series) * 3);
821 } 666 }
822 667
823 /* 668 /* set dur_update_en for l-sig computation except for PS-Poll frames */
824 * set dur_update_en for l-sig computation except for PS-Poll frames 669 ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
825 */ 670 ctsrate, ctsduration,
826 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
827 !bf_ispspoll(bf),
828 ctsrate,
829 ctsduration,
830 series, 4, flags); 671 series, 4, flags);
672
831 if (sc->sc_config.ath_aggr_prot && flags) 673 if (sc->sc_config.ath_aggr_prot && flags)
832 ath9k_hw_set11n_burstduration(ah, ds, 8192); 674 ath9k_hw_set11n_burstduration(ah, ds, 8192);
833} 675}
@@ -836,27 +678,18 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
836 * Function to send a normal HT (non-AMPDU) frame 678 * Function to send a normal HT (non-AMPDU) frame
837 * NB: must be called with txq lock held 679 * NB: must be called with txq lock held
838 */ 680 */
839
840static int ath_tx_send_normal(struct ath_softc *sc, 681static int ath_tx_send_normal(struct ath_softc *sc,
841 struct ath_txq *txq, 682 struct ath_txq *txq,
842 struct ath_atx_tid *tid, 683 struct ath_atx_tid *tid,
843 struct list_head *bf_head) 684 struct list_head *bf_head)
844{ 685{
845 struct ath_buf *bf; 686 struct ath_buf *bf;
846 struct sk_buff *skb;
847 struct ieee80211_tx_info *tx_info;
848 struct ath_tx_info_priv *tx_info_priv;
849 687
850 BUG_ON(list_empty(bf_head)); 688 BUG_ON(list_empty(bf_head));
851 689
852 bf = list_first_entry(bf_head, struct ath_buf, list); 690 bf = list_first_entry(bf_head, struct ath_buf, list);
853 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */ 691 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
854 692
855 skb = (struct sk_buff *)bf->bf_mpdu;
856 tx_info = IEEE80211_SKB_CB(skb);
857 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
858 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
859
860 /* update starting sequence number for subsequent ADDBA request */ 693 /* update starting sequence number for subsequent ADDBA request */
861 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 694 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
862 695
@@ -873,7 +706,7 @@ static int ath_tx_send_normal(struct ath_softc *sc,
873 706
874static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 707static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
875{ 708{
876 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; 709 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
877 struct ath_buf *bf; 710 struct ath_buf *bf;
878 struct list_head bf_head; 711 struct list_head bf_head;
879 INIT_LIST_HEAD(&bf_head); 712 INIT_LIST_HEAD(&bf_head);
@@ -906,8 +739,10 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
906 struct list_head *bf_q, 739 struct list_head *bf_q,
907 int txok) 740 int txok)
908{ 741{
909 struct ath_node *an = bf->bf_node; 742 struct ath_node *an = NULL;
910 struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno); 743 struct sk_buff *skb;
744 struct ieee80211_tx_info *tx_info;
745 struct ath_atx_tid *tid = NULL;
911 struct ath_buf *bf_last = bf->bf_lastbf; 746 struct ath_buf *bf_last = bf->bf_lastbf;
912 struct ath_desc *ds = bf_last->bf_desc; 747 struct ath_desc *ds = bf_last->bf_desc;
913 struct ath_buf *bf_next, *bf_lastq = NULL; 748 struct ath_buf *bf_next, *bf_lastq = NULL;
@@ -915,7 +750,14 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
915 u16 seq_st = 0; 750 u16 seq_st = 0;
916 u32 ba[WME_BA_BMP_SIZE >> 5]; 751 u32 ba[WME_BA_BMP_SIZE >> 5];
917 int isaggr, txfail, txpending, sendbar = 0, needreset = 0; 752 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
918 int isnodegone = (an->an_flags & ATH_NODE_CLEAN); 753
754 skb = (struct sk_buff *)bf->bf_mpdu;
755 tx_info = IEEE80211_SKB_CB(skb);
756
757 if (tx_info->control.sta) {
758 an = (struct ath_node *)tx_info->control.sta->drv_priv;
759 tid = ATH_AN_2_TID(an, bf->bf_tidno);
760 }
919 761
920 isaggr = bf_isaggr(bf); 762 isaggr = bf_isaggr(bf);
921 if (isaggr) { 763 if (isaggr) {
@@ -939,7 +781,8 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
939 * when perform internal reset in this routine. 781 * when perform internal reset in this routine.
940 * Only enable reset in STA mode for now. 782 * Only enable reset in STA mode for now.
941 */ 783 */
942 if (sc->sc_ah->ah_opmode == ATH9K_M_STA) 784 if (sc->sc_ah->ah_opmode ==
785 NL80211_IFTYPE_STATION)
943 needreset = 1; 786 needreset = 1;
944 } 787 }
945 } else { 788 } else {
@@ -961,7 +804,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
961 /* transmit completion */ 804 /* transmit completion */
962 } else { 805 } else {
963 806
964 if (!tid->cleanup_inprogress && !isnodegone && 807 if (!(tid->state & AGGR_CLEANUP) &&
965 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { 808 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
966 if (bf->bf_retries < ATH_MAX_SW_RETRIES) { 809 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
967 ath_tx_set_retry(sc, bf); 810 ath_tx_set_retry(sc, bf);
@@ -1038,18 +881,17 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1038 struct ath_buf *tbf; 881 struct ath_buf *tbf;
1039 882
1040 /* allocate new descriptor */ 883 /* allocate new descriptor */
1041 spin_lock_bh(&sc->sc_txbuflock); 884 spin_lock_bh(&sc->tx.txbuflock);
1042 ASSERT(!list_empty((&sc->sc_txbuf))); 885 ASSERT(!list_empty((&sc->tx.txbuf)));
1043 tbf = list_first_entry(&sc->sc_txbuf, 886 tbf = list_first_entry(&sc->tx.txbuf,
1044 struct ath_buf, list); 887 struct ath_buf, list);
1045 list_del(&tbf->list); 888 list_del(&tbf->list);
1046 spin_unlock_bh(&sc->sc_txbuflock); 889 spin_unlock_bh(&sc->tx.txbuflock);
1047 890
1048 ATH_TXBUF_RESET(tbf); 891 ATH_TXBUF_RESET(tbf);
1049 892
1050 /* copy descriptor content */ 893 /* copy descriptor content */
1051 tbf->bf_mpdu = bf_last->bf_mpdu; 894 tbf->bf_mpdu = bf_last->bf_mpdu;
1052 tbf->bf_node = bf_last->bf_node;
1053 tbf->bf_buf_addr = bf_last->bf_buf_addr; 895 tbf->bf_buf_addr = bf_last->bf_buf_addr;
1054 *(tbf->bf_desc) = *(bf_last->bf_desc); 896 *(tbf->bf_desc) = *(bf_last->bf_desc);
1055 897
@@ -1090,25 +932,16 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1090 bf = bf_next; 932 bf = bf_next;
1091 } 933 }
1092 934
1093 /* 935 if (tid->state & AGGR_CLEANUP) {
1094 * node is already gone. no more assocication
1095 * with the node. the node might have been freed
1096 * any node acces can result in panic.note tid
1097 * is part of the node.
1098 */
1099 if (isnodegone)
1100 return;
1101
1102 if (tid->cleanup_inprogress) {
1103 /* check to see if we're done with cleaning the h/w queue */ 936 /* check to see if we're done with cleaning the h/w queue */
1104 spin_lock_bh(&txq->axq_lock); 937 spin_lock_bh(&txq->axq_lock);
1105 938
1106 if (tid->baw_head == tid->baw_tail) { 939 if (tid->baw_head == tid->baw_tail) {
1107 tid->addba_exchangecomplete = 0; 940 tid->state &= ~AGGR_ADDBA_COMPLETE;
1108 tid->addba_exchangeattempts = 0; 941 tid->addba_exchangeattempts = 0;
1109 spin_unlock_bh(&txq->axq_lock); 942 spin_unlock_bh(&txq->axq_lock);
1110 943
1111 tid->cleanup_inprogress = false; 944 tid->state &= ~AGGR_CLEANUP;
1112 945
1113 /* send buffered frames as singles */ 946 /* send buffered frames as singles */
1114 ath_tx_flush_tid(sc, tid); 947 ath_tx_flush_tid(sc, tid);
@@ -1136,29 +969,45 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1136 return; 969 return;
1137} 970}
1138 971
972static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad)
973{
974 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
975 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
976 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
977
978 tx_info_priv->update_rc = false;
979 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
980 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
981
982 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
983 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
984 if (bf_isdata(bf)) {
985 memcpy(&tx_info_priv->tx, &ds->ds_txstat,
986 sizeof(tx_info_priv->tx));
987 tx_info_priv->n_frames = bf->bf_nframes;
988 tx_info_priv->n_bad_frames = nbad;
989 tx_info_priv->update_rc = true;
990 }
991 }
992}
993
1139/* Process completed xmit descriptors from the specified queue */ 994/* Process completed xmit descriptors from the specified queue */
1140 995
1141static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 996static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1142{ 997{
1143 struct ath_hal *ah = sc->sc_ah; 998 struct ath_hal *ah = sc->sc_ah;
1144 struct ath_buf *bf, *lastbf, *bf_held = NULL; 999 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1145 struct list_head bf_head; 1000 struct list_head bf_head;
1146 struct ath_desc *ds, *tmp_ds; 1001 struct ath_desc *ds;
1147 struct sk_buff *skb; 1002 int txok, nbad = 0;
1148 struct ieee80211_tx_info *tx_info;
1149 struct ath_tx_info_priv *tx_info_priv;
1150 int nacked, txok, nbad = 0, isrifs = 0;
1151 int status; 1003 int status;
1152 1004
1153 DPRINTF(sc, ATH_DBG_QUEUE, 1005 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
1154 "%s: tx queue %d (%x), link %p\n", __func__,
1155 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 1006 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1156 txq->axq_link); 1007 txq->axq_link);
1157 1008
1158 nacked = 0;
1159 for (;;) { 1009 for (;;) {
1160 spin_lock_bh(&txq->axq_lock); 1010 spin_lock_bh(&txq->axq_lock);
1161 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
1162 if (list_empty(&txq->axq_q)) { 1011 if (list_empty(&txq->axq_q)) {
1163 txq->axq_link = NULL; 1012 txq->axq_link = NULL;
1164 txq->axq_linkbuf = NULL; 1013 txq->axq_linkbuf = NULL;
@@ -1229,9 +1078,9 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1229 1078
1230 if (bf_held) { 1079 if (bf_held) {
1231 list_del(&bf_held->list); 1080 list_del(&bf_held->list);
1232 spin_lock_bh(&sc->sc_txbuflock); 1081 spin_lock_bh(&sc->tx.txbuflock);
1233 list_add_tail(&bf_held->list, &sc->sc_txbuf); 1082 list_add_tail(&bf_held->list, &sc->tx.txbuf);
1234 spin_unlock_bh(&sc->sc_txbuflock); 1083 spin_unlock_bh(&sc->tx.txbuflock);
1235 } 1084 }
1236 1085
1237 if (!bf_isampdu(bf)) { 1086 if (!bf_isampdu(bf)) {
@@ -1246,29 +1095,8 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1246 } else { 1095 } else {
1247 nbad = ath_tx_num_badfrms(sc, bf, txok); 1096 nbad = ath_tx_num_badfrms(sc, bf, txok);
1248 } 1097 }
1249 skb = bf->bf_mpdu; 1098
1250 tx_info = IEEE80211_SKB_CB(skb); 1099 ath_tx_rc_status(bf, ds, nbad);
1251 tx_info_priv = (struct ath_tx_info_priv *)
1252 tx_info->driver_data[0];
1253 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1254 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1255 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1256 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
1257 if (ds->ds_txstat.ts_status == 0)
1258 nacked++;
1259
1260 if (bf_isdata(bf)) {
1261 if (isrifs)
1262 tmp_ds = bf->bf_rifslast->bf_desc;
1263 else
1264 tmp_ds = ds;
1265 memcpy(&tx_info_priv->tx,
1266 &tmp_ds->ds_txstat,
1267 sizeof(tx_info_priv->tx));
1268 tx_info_priv->n_frames = bf->bf_nframes;
1269 tx_info_priv->n_bad_frames = nbad;
1270 }
1271 }
1272 1100
1273 /* 1101 /*
1274 * Complete this transmit unit 1102 * Complete this transmit unit
@@ -1299,7 +1127,6 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1299 ath_txq_schedule(sc, txq); 1127 ath_txq_schedule(sc, txq);
1300 spin_unlock_bh(&txq->axq_lock); 1128 spin_unlock_bh(&txq->axq_lock);
1301 } 1129 }
1302 return nacked;
1303} 1130}
1304 1131
1305static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) 1132static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
@@ -1307,9 +1134,9 @@ static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1307 struct ath_hal *ah = sc->sc_ah; 1134 struct ath_hal *ah = sc->sc_ah;
1308 1135
1309 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum); 1136 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1310 DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n", 1137 DPRINTF(sc, ATH_DBG_XMIT, "tx queue [%u] %x, link %p\n",
1311 __func__, txq->axq_qnum, 1138 txq->axq_qnum, ath9k_hw_gettxbuf(ah, txq->axq_qnum),
1312 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link); 1139 txq->axq_link);
1313} 1140}
1314 1141
1315/* Drain only the data queues */ 1142/* Drain only the data queues */
@@ -1317,40 +1144,33 @@ static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
1317static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx) 1144static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1318{ 1145{
1319 struct ath_hal *ah = sc->sc_ah; 1146 struct ath_hal *ah = sc->sc_ah;
1320 int i; 1147 int i, status, npend = 0;
1321 int npend = 0;
1322 1148
1323 /* XXX return value */
1324 if (!(sc->sc_flags & SC_OP_INVALID)) { 1149 if (!(sc->sc_flags & SC_OP_INVALID)) {
1325 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1150 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1326 if (ATH_TXQ_SETUP(sc, i)) { 1151 if (ATH_TXQ_SETUP(sc, i)) {
1327 ath_tx_stopdma(sc, &sc->sc_txq[i]); 1152 ath_tx_stopdma(sc, &sc->tx.txq[i]);
1328
1329 /* The TxDMA may not really be stopped. 1153 /* The TxDMA may not really be stopped.
1330 * Double check the hal tx pending count */ 1154 * Double check the hal tx pending count */
1331 npend += ath9k_hw_numtxpending(ah, 1155 npend += ath9k_hw_numtxpending(ah,
1332 sc->sc_txq[i].axq_qnum); 1156 sc->tx.txq[i].axq_qnum);
1333 } 1157 }
1334 } 1158 }
1335 } 1159 }
1336 1160
1337 if (npend) { 1161 if (npend) {
1338 int status;
1339
1340 /* TxDMA not stopped, reset the hal */ 1162 /* TxDMA not stopped, reset the hal */
1341 DPRINTF(sc, ATH_DBG_XMIT, 1163 DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n");
1342 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1343 1164
1344 spin_lock_bh(&sc->sc_resetlock); 1165 spin_lock_bh(&sc->sc_resetlock);
1345 if (!ath9k_hw_reset(ah, 1166 if (!ath9k_hw_reset(ah,
1346 sc->sc_ah->ah_curchan, 1167 sc->sc_ah->ah_curchan,
1347 sc->sc_ht_info.tx_chan_width, 1168 sc->tx_chan_width,
1348 sc->sc_tx_chainmask, sc->sc_rx_chainmask, 1169 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1349 sc->sc_ht_extprotspacing, true, &status)) { 1170 sc->sc_ht_extprotspacing, true, &status)) {
1350 1171
1351 DPRINTF(sc, ATH_DBG_FATAL, 1172 DPRINTF(sc, ATH_DBG_FATAL,
1352 "%s: unable to reset hardware; hal status %u\n", 1173 "Unable to reset hardware; hal status %u\n",
1353 __func__,
1354 status); 1174 status);
1355 } 1175 }
1356 spin_unlock_bh(&sc->sc_resetlock); 1176 spin_unlock_bh(&sc->sc_resetlock);
@@ -1358,7 +1178,7 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1358 1178
1359 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1179 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1360 if (ATH_TXQ_SETUP(sc, i)) 1180 if (ATH_TXQ_SETUP(sc, i))
1361 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx); 1181 ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx);
1362 } 1182 }
1363} 1183}
1364 1184
@@ -1390,24 +1210,17 @@ static void ath_tx_addto_baw(struct ath_softc *sc,
1390 * Function to send an A-MPDU 1210 * Function to send an A-MPDU
1391 * NB: must be called with txq lock held 1211 * NB: must be called with txq lock held
1392 */ 1212 */
1393
1394static int ath_tx_send_ampdu(struct ath_softc *sc, 1213static int ath_tx_send_ampdu(struct ath_softc *sc,
1395 struct ath_txq *txq,
1396 struct ath_atx_tid *tid, 1214 struct ath_atx_tid *tid,
1397 struct list_head *bf_head, 1215 struct list_head *bf_head,
1398 struct ath_tx_control *txctl) 1216 struct ath_tx_control *txctl)
1399{ 1217{
1400 struct ath_buf *bf; 1218 struct ath_buf *bf;
1401 struct sk_buff *skb;
1402 struct ieee80211_tx_info *tx_info;
1403 struct ath_tx_info_priv *tx_info_priv;
1404 1219
1405 BUG_ON(list_empty(bf_head)); 1220 BUG_ON(list_empty(bf_head));
1406 1221
1407 bf = list_first_entry(bf_head, struct ath_buf, list); 1222 bf = list_first_entry(bf_head, struct ath_buf, list);
1408 bf->bf_state.bf_type |= BUF_AMPDU; 1223 bf->bf_state.bf_type |= BUF_AMPDU;
1409 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
1410 bf->bf_tidno = txctl->tidno;
1411 1224
1412 /* 1225 /*
1413 * Do not queue to h/w when any of the following conditions is true: 1226 * Do not queue to h/w when any of the following conditions is true:
@@ -1418,21 +1231,16 @@ static int ath_tx_send_ampdu(struct ath_softc *sc,
1418 */ 1231 */
1419 if (!list_empty(&tid->buf_q) || tid->paused || 1232 if (!list_empty(&tid->buf_q) || tid->paused ||
1420 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || 1233 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) ||
1421 txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { 1234 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) {
1422 /* 1235 /*
1423 * Add this frame to software queue for scheduling later 1236 * Add this frame to software queue for scheduling later
1424 * for aggregation. 1237 * for aggregation.
1425 */ 1238 */
1426 list_splice_tail_init(bf_head, &tid->buf_q); 1239 list_splice_tail_init(bf_head, &tid->buf_q);
1427 ath_tx_queue_tid(txq, tid); 1240 ath_tx_queue_tid(txctl->txq, tid);
1428 return 0; 1241 return 0;
1429 } 1242 }
1430 1243
1431 skb = (struct sk_buff *)bf->bf_mpdu;
1432 tx_info = IEEE80211_SKB_CB(skb);
1433 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1434 memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1435
1436 /* Add sub-frame to BAW */ 1244 /* Add sub-frame to BAW */
1437 ath_tx_addto_baw(sc, tid, bf); 1245 ath_tx_addto_baw(sc, tid, bf);
1438 1246
@@ -1440,7 +1248,8 @@ static int ath_tx_send_ampdu(struct ath_softc *sc,
1440 bf->bf_nframes = 1; 1248 bf->bf_nframes = 1;
1441 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ 1249 bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */
1442 ath_buf_set_rate(sc, bf); 1250 ath_buf_set_rate(sc, bf);
1443 ath_tx_txqaddbuf(sc, txq, bf_head); 1251 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
1252
1444 return 0; 1253 return 0;
1445} 1254}
1446 1255
@@ -1448,25 +1257,24 @@ static int ath_tx_send_ampdu(struct ath_softc *sc,
1448 * looks up the rate 1257 * looks up the rate
1449 * returns aggr limit based on lowest of the rates 1258 * returns aggr limit based on lowest of the rates
1450 */ 1259 */
1451
1452static u32 ath_lookup_rate(struct ath_softc *sc, 1260static u32 ath_lookup_rate(struct ath_softc *sc,
1453 struct ath_buf *bf) 1261 struct ath_buf *bf,
1262 struct ath_atx_tid *tid)
1454{ 1263{
1455 const struct ath9k_rate_table *rt = sc->sc_currates; 1264 struct ath_rate_table *rate_table = sc->cur_rate_table;
1456 struct sk_buff *skb; 1265 struct sk_buff *skb;
1457 struct ieee80211_tx_info *tx_info; 1266 struct ieee80211_tx_info *tx_info;
1267 struct ieee80211_tx_rate *rates;
1458 struct ath_tx_info_priv *tx_info_priv; 1268 struct ath_tx_info_priv *tx_info_priv;
1459 u32 max_4ms_framelen, frame_length; 1269 u32 max_4ms_framelen, frame_length;
1460 u16 aggr_limit, legacy = 0, maxampdu; 1270 u16 aggr_limit, legacy = 0, maxampdu;
1461 int i; 1271 int i;
1462 1272
1463
1464 skb = (struct sk_buff *)bf->bf_mpdu; 1273 skb = (struct sk_buff *)bf->bf_mpdu;
1465 tx_info = IEEE80211_SKB_CB(skb); 1274 tx_info = IEEE80211_SKB_CB(skb);
1466 tx_info_priv = (struct ath_tx_info_priv *) 1275 rates = tx_info->control.rates;
1467 tx_info->driver_data[0]; 1276 tx_info_priv =
1468 memcpy(bf->bf_rcs, 1277 (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
1469 tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0]));
1470 1278
1471 /* 1279 /*
1472 * Find the lowest frame length among the rate series that will have a 1280 * Find the lowest frame length among the rate series that will have a
@@ -1476,14 +1284,14 @@ static u32 ath_lookup_rate(struct ath_softc *sc,
1476 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; 1284 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
1477 1285
1478 for (i = 0; i < 4; i++) { 1286 for (i = 0; i < 4; i++) {
1479 if (bf->bf_rcs[i].tries) { 1287 if (rates[i].count) {
1480 frame_length = bf->bf_rcs[i].max_4ms_framelen; 1288 if (!WLAN_RC_PHY_HT(rate_table->info[rates[i].idx].phy)) {
1481
1482 if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) {
1483 legacy = 1; 1289 legacy = 1;
1484 break; 1290 break;
1485 } 1291 }
1486 1292
1293 frame_length =
1294 rate_table->info[rates[i].idx].max_4ms_framelen;
1487 max_4ms_framelen = min(max_4ms_framelen, frame_length); 1295 max_4ms_framelen = min(max_4ms_framelen, frame_length);
1488 } 1296 }
1489 } 1297 }
@@ -1504,7 +1312,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc,
1504 * The IE, however can hold upto 65536, which shows up here 1312 * The IE, however can hold upto 65536, which shows up here
1505 * as zero. Ignore 65536 since we are constrained by hw. 1313 * as zero. Ignore 65536 since we are constrained by hw.
1506 */ 1314 */
1507 maxampdu = sc->sc_ht_info.maxampdu; 1315 maxampdu = tid->an->maxampdu;
1508 if (maxampdu) 1316 if (maxampdu)
1509 aggr_limit = min(aggr_limit, maxampdu); 1317 aggr_limit = min(aggr_limit, maxampdu);
1510 1318
@@ -1516,12 +1324,14 @@ static u32 ath_lookup_rate(struct ath_softc *sc,
1516 * meet the minimum required mpdudensity. 1324 * meet the minimum required mpdudensity.
1517 * caller should make sure that the rate is HT rate . 1325 * caller should make sure that the rate is HT rate .
1518 */ 1326 */
1519
1520static int ath_compute_num_delims(struct ath_softc *sc, 1327static int ath_compute_num_delims(struct ath_softc *sc,
1328 struct ath_atx_tid *tid,
1521 struct ath_buf *bf, 1329 struct ath_buf *bf,
1522 u16 frmlen) 1330 u16 frmlen)
1523{ 1331{
1524 const struct ath9k_rate_table *rt = sc->sc_currates; 1332 struct ath_rate_table *rt = sc->cur_rate_table;
1333 struct sk_buff *skb = bf->bf_mpdu;
1334 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1525 u32 nsymbits, nsymbols, mpdudensity; 1335 u32 nsymbits, nsymbols, mpdudensity;
1526 u16 minlen; 1336 u16 minlen;
1527 u8 rc, flags, rix; 1337 u8 rc, flags, rix;
@@ -1545,7 +1355,7 @@ static int ath_compute_num_delims(struct ath_softc *sc,
1545 * required minimum length for subframe. Take into account 1355 * required minimum length for subframe. Take into account
1546 * whether high rate is 20 or 40Mhz and half or full GI. 1356 * whether high rate is 20 or 40Mhz and half or full GI.
1547 */ 1357 */
1548 mpdudensity = sc->sc_ht_info.mpdudensity; 1358 mpdudensity = tid->an->mpdudensity;
1549 1359
1550 /* 1360 /*
1551 * If there is no mpdu density restriction, no further calculation 1361 * If there is no mpdu density restriction, no further calculation
@@ -1554,11 +1364,11 @@ static int ath_compute_num_delims(struct ath_softc *sc,
1554 if (mpdudensity == 0) 1364 if (mpdudensity == 0)
1555 return ndelim; 1365 return ndelim;
1556 1366
1557 rix = bf->bf_rcs[0].rix; 1367 rix = tx_info->control.rates[0].idx;
1558 flags = bf->bf_rcs[0].flags; 1368 flags = tx_info->control.rates[0].flags;
1559 rc = rt->info[rix].rateCode; 1369 rc = rt->info[rix].ratecode;
1560 width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0; 1370 width = (flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
1561 half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0; 1371 half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0;
1562 1372
1563 if (half_gi) 1373 if (half_gi)
1564 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity); 1374 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
@@ -1585,7 +1395,6 @@ static int ath_compute_num_delims(struct ath_softc *sc,
1585 * For aggregation from software buffer queue. 1395 * For aggregation from software buffer queue.
1586 * NB: must be called with txq lock held 1396 * NB: must be called with txq lock held
1587 */ 1397 */
1588
1589static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 1398static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1590 struct ath_atx_tid *tid, 1399 struct ath_atx_tid *tid,
1591 struct list_head *bf_q, 1400 struct list_head *bf_q,
@@ -1600,7 +1409,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1600 u16 aggr_limit = 0, al = 0, bpad = 0, 1409 u16 aggr_limit = 0, al = 0, bpad = 0,
1601 al_delta, h_baw = tid->baw_size / 2; 1410 al_delta, h_baw = tid->baw_size / 2;
1602 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 1411 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
1603 int prev_al = 0, is_ds_rate = 0; 1412 int prev_al = 0;
1604 INIT_LIST_HEAD(&bf_head); 1413 INIT_LIST_HEAD(&bf_head);
1605 1414
1606 BUG_ON(list_empty(&tid->buf_q)); 1415 BUG_ON(list_empty(&tid->buf_q));
@@ -1619,13 +1428,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1619 } 1428 }
1620 1429
1621 if (!rl) { 1430 if (!rl) {
1622 aggr_limit = ath_lookup_rate(sc, bf); 1431 aggr_limit = ath_lookup_rate(sc, bf, tid);
1623 rl = 1; 1432 rl = 1;
1624 /*
1625 * Is rate dual stream
1626 */
1627 is_ds_rate =
1628 (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0;
1629 } 1433 }
1630 1434
1631 /* 1435 /*
@@ -1657,7 +1461,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1657 * Get the delimiters needed to meet the MPDU 1461 * Get the delimiters needed to meet the MPDU
1658 * density for this node. 1462 * density for this node.
1659 */ 1463 */
1660 ndelim = ath_compute_num_delims(sc, bf_first, bf->bf_frmlen); 1464 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
1661 1465
1662 bpad = PADBYTES(al_delta) + (ndelim << 2); 1466 bpad = PADBYTES(al_delta) + (ndelim << 2);
1663 1467
@@ -1713,7 +1517,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1713 * process pending frames possibly doing a-mpdu aggregation 1517 * process pending frames possibly doing a-mpdu aggregation
1714 * NB: must be called with txq lock held 1518 * NB: must be called with txq lock held
1715 */ 1519 */
1716
1717static void ath_tx_sched_aggr(struct ath_softc *sc, 1520static void ath_tx_sched_aggr(struct ath_softc *sc,
1718 struct ath_txq *txq, struct ath_atx_tid *tid) 1521 struct ath_txq *txq, struct ath_atx_tid *tid)
1719{ 1522{
@@ -1799,8 +1602,8 @@ static void ath_tx_sched_aggr(struct ath_softc *sc,
1799 1602
1800static void ath_tid_drain(struct ath_softc *sc, 1603static void ath_tid_drain(struct ath_softc *sc,
1801 struct ath_txq *txq, 1604 struct ath_txq *txq,
1802 struct ath_atx_tid *tid, 1605 struct ath_atx_tid *tid)
1803 bool bh_flag) 1606
1804{ 1607{
1805 struct ath_buf *bf; 1608 struct ath_buf *bf;
1806 struct list_head bf_head; 1609 struct list_head bf_head;
@@ -1821,18 +1624,12 @@ static void ath_tid_drain(struct ath_softc *sc,
1821 * do not indicate packets while holding txq spinlock. 1624 * do not indicate packets while holding txq spinlock.
1822 * unlock is intentional here 1625 * unlock is intentional here
1823 */ 1626 */
1824 if (likely(bh_flag)) 1627 spin_unlock(&txq->axq_lock);
1825 spin_unlock_bh(&txq->axq_lock);
1826 else
1827 spin_unlock(&txq->axq_lock);
1828 1628
1829 /* complete this sub-frame */ 1629 /* complete this sub-frame */
1830 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); 1630 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1831 1631
1832 if (likely(bh_flag)) 1632 spin_lock(&txq->axq_lock);
1833 spin_lock_bh(&txq->axq_lock);
1834 else
1835 spin_lock(&txq->axq_lock);
1836 } 1633 }
1837 1634
1838 /* 1635 /*
@@ -1849,10 +1646,8 @@ static void ath_tid_drain(struct ath_softc *sc,
1849 * Drain all pending buffers 1646 * Drain all pending buffers
1850 * NB: must be called with txq lock held 1647 * NB: must be called with txq lock held
1851 */ 1648 */
1852
1853static void ath_txq_drain_pending_buffers(struct ath_softc *sc, 1649static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1854 struct ath_txq *txq, 1650 struct ath_txq *txq)
1855 bool bh_flag)
1856{ 1651{
1857 struct ath_atx_ac *ac, *ac_tmp; 1652 struct ath_atx_ac *ac, *ac_tmp;
1858 struct ath_atx_tid *tid, *tid_tmp; 1653 struct ath_atx_tid *tid, *tid_tmp;
@@ -1863,51 +1658,33 @@ static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
1863 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { 1658 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) {
1864 list_del(&tid->list); 1659 list_del(&tid->list);
1865 tid->sched = false; 1660 tid->sched = false;
1866 ath_tid_drain(sc, txq, tid, bh_flag); 1661 ath_tid_drain(sc, txq, tid);
1867 } 1662 }
1868 } 1663 }
1869} 1664}
1870 1665
1871static int ath_tx_start_dma(struct ath_softc *sc, 1666static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf,
1872 struct sk_buff *skb, 1667 struct sk_buff *skb,
1873 struct scatterlist *sg, 1668 struct ath_tx_control *txctl)
1874 u32 n_sg,
1875 struct ath_tx_control *txctl)
1876{ 1669{
1877 struct ath_node *an = txctl->an; 1670 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1878 struct ath_buf *bf = NULL;
1879 struct list_head bf_head;
1880 struct ath_desc *ds;
1881 struct ath_hal *ah = sc->sc_ah;
1882 struct ath_txq *txq;
1883 struct ath_tx_info_priv *tx_info_priv;
1884 struct ath_rc_series *rcs;
1885 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1671 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1886 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1672 struct ath_tx_info_priv *tx_info_priv;
1887 __le16 fc = hdr->frame_control; 1673 int hdrlen;
1888 1674 __le16 fc;
1889 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB))
1890 txq = sc->sc_cabq;
1891 else
1892 txq = &sc->sc_txq[txctl->qnum];
1893 1675
1894 /* For each sglist entry, allocate an ath_buf for DMA */ 1676 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC);
1895 INIT_LIST_HEAD(&bf_head); 1677 if (unlikely(!tx_info_priv))
1896 spin_lock_bh(&sc->sc_txbuflock);
1897 if (unlikely(list_empty(&sc->sc_txbuf))) {
1898 spin_unlock_bh(&sc->sc_txbuflock);
1899 return -ENOMEM; 1678 return -ENOMEM;
1900 } 1679 tx_info->rate_driver_data[0] = tx_info_priv;
1680 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1681 fc = hdr->frame_control;
1901 1682
1902 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list); 1683 ATH_TXBUF_RESET(bf);
1903 list_del(&bf->list);
1904 spin_unlock_bh(&sc->sc_txbuflock);
1905 1684
1906 list_add_tail(&bf->list, &bf_head); 1685 /* Frame type */
1907 1686
1908 /* set up this buffer */ 1687 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
1909 ATH_TXBUF_RESET(bf);
1910 bf->bf_frmlen = txctl->frmlen;
1911 1688
1912 ieee80211_is_data(fc) ? 1689 ieee80211_is_data(fc) ?
1913 (bf->bf_state.bf_type |= BUF_DATA) : 1690 (bf->bf_state.bf_type |= BUF_DATA) :
@@ -1921,120 +1698,158 @@ static int ath_tx_start_dma(struct ath_softc *sc,
1921 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ? 1698 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
1922 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) : 1699 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1923 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE); 1700 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1701 (sc->hw->conf.ht.enabled && !is_pae(skb) &&
1702 (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ?
1703 (bf->bf_state.bf_type |= BUF_HT) :
1704 (bf->bf_state.bf_type &= ~BUF_HT);
1705
1706 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
1707
1708 /* Crypto */
1709
1710 bf->bf_keytype = get_hw_crypto_keytype(skb);
1711
1712 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1713 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1714 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1715 } else {
1716 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1717 }
1718
1719 /* Assign seqno, tidno */
1720
1721 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR))
1722 assign_aggr_tid_seqno(skb, bf);
1723
1724 /* DMA setup */
1924 1725
1925 bf->bf_flags = txctl->flags;
1926 bf->bf_keytype = txctl->keytype;
1927 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
1928 rcs = tx_info_priv->rcs;
1929 bf->bf_rcs[0] = rcs[0];
1930 bf->bf_rcs[1] = rcs[1];
1931 bf->bf_rcs[2] = rcs[2];
1932 bf->bf_rcs[3] = rcs[3];
1933 bf->bf_node = an;
1934 bf->bf_mpdu = skb; 1726 bf->bf_mpdu = skb;
1935 bf->bf_buf_addr = sg_dma_address(sg); 1727
1728 bf->bf_dmacontext = pci_map_single(sc->pdev, skb->data,
1729 skb->len, PCI_DMA_TODEVICE);
1730 if (unlikely(pci_dma_mapping_error(sc->pdev, bf->bf_dmacontext))) {
1731 bf->bf_mpdu = NULL;
1732 DPRINTF(sc, ATH_DBG_CONFIG,
1733 "pci_dma_mapping_error() on TX\n");
1734 return -ENOMEM;
1735 }
1736
1737 bf->bf_buf_addr = bf->bf_dmacontext;
1738 return 0;
1739}
1740
1741/* FIXME: tx power */
1742static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1743 struct ath_tx_control *txctl)
1744{
1745 struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
1746 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1747 struct ath_node *an = NULL;
1748 struct list_head bf_head;
1749 struct ath_desc *ds;
1750 struct ath_atx_tid *tid;
1751 struct ath_hal *ah = sc->sc_ah;
1752 int frm_type;
1753
1754 frm_type = get_hw_packet_type(skb);
1755
1756 INIT_LIST_HEAD(&bf_head);
1757 list_add_tail(&bf->list, &bf_head);
1936 1758
1937 /* setup descriptor */ 1759 /* setup descriptor */
1760
1938 ds = bf->bf_desc; 1761 ds = bf->bf_desc;
1939 ds->ds_link = 0; 1762 ds->ds_link = 0;
1940 ds->ds_data = bf->bf_buf_addr; 1763 ds->ds_data = bf->bf_buf_addr;
1941 1764
1942 /* 1765 /* Formulate first tx descriptor with tx controls */
1943 * Save the DMA context in the first ath_buf
1944 */
1945 bf->bf_dmacontext = txctl->dmacontext;
1946 1766
1947 /* 1767 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1948 * Formulate first tx descriptor with tx controls. 1768 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
1949 */ 1769
1950 ath9k_hw_set11n_txdesc(ah, 1770 ath9k_hw_filltxdesc(ah, ds,
1951 ds, 1771 skb->len, /* segment length */
1952 bf->bf_frmlen, /* frame length */ 1772 true, /* first segment */
1953 txctl->atype, /* Atheros packet type */ 1773 true, /* last segment */
1954 min(txctl->txpower, (u16)60), /* txpower */ 1774 ds); /* first descriptor */
1955 txctl->keyix, /* key cache index */
1956 txctl->keytype, /* key type */
1957 txctl->flags); /* flags */
1958 ath9k_hw_filltxdesc(ah,
1959 ds,
1960 sg_dma_len(sg), /* segment length */
1961 true, /* first segment */
1962 (n_sg == 1) ? true : false, /* last segment */
1963 ds); /* first descriptor */
1964 1775
1965 bf->bf_lastfrm = bf; 1776 bf->bf_lastfrm = bf;
1966 (txctl->ht) ?
1967 (bf->bf_state.bf_type |= BUF_HT) :
1968 (bf->bf_state.bf_type &= ~BUF_HT);
1969 1777
1970 spin_lock_bh(&txq->axq_lock); 1778 spin_lock_bh(&txctl->txq->axq_lock);
1971 1779
1972 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) { 1780 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1973 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno); 1781 tx_info->control.sta) {
1974 if (ath_aggr_query(sc, an, txctl->tidno)) { 1782 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1783 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1784
1785 if (ath_aggr_query(sc, an, bf->bf_tidno)) {
1975 /* 1786 /*
1976 * Try aggregation if it's a unicast data frame 1787 * Try aggregation if it's a unicast data frame
1977 * and the destination is HT capable. 1788 * and the destination is HT capable.
1978 */ 1789 */
1979 ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl); 1790 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1980 } else { 1791 } else {
1981 /* 1792 /*
1982 * Send this frame as regular when ADDBA exchange 1793 * Send this frame as regular when ADDBA
1983 * is neither complete nor pending. 1794 * exchange is neither complete nor pending.
1984 */ 1795 */
1985 ath_tx_send_normal(sc, txq, tid, &bf_head); 1796 ath_tx_send_normal(sc, txctl->txq,
1797 tid, &bf_head);
1986 } 1798 }
1987 } else { 1799 } else {
1988 bf->bf_lastbf = bf; 1800 bf->bf_lastbf = bf;
1989 bf->bf_nframes = 1; 1801 bf->bf_nframes = 1;
1990 ath_buf_set_rate(sc, bf);
1991
1992 if (ieee80211_is_back_req(fc)) {
1993 /* This is required for resuming tid
1994 * during BAR completion */
1995 bf->bf_tidno = txctl->tidno;
1996 }
1997 1802
1998 ath_tx_txqaddbuf(sc, txq, &bf_head); 1803 ath_buf_set_rate(sc, bf);
1804 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
1999 } 1805 }
2000 spin_unlock_bh(&txq->axq_lock); 1806
2001 return 0; 1807 spin_unlock_bh(&txctl->txq->axq_lock);
2002} 1808}
2003 1809
2004static void xmit_map_sg(struct ath_softc *sc, 1810/* Upon failure caller should free skb */
2005 struct sk_buff *skb, 1811int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
2006 struct ath_tx_control *txctl) 1812 struct ath_tx_control *txctl)
2007{ 1813{
2008 struct ath_xmit_status tx_status; 1814 struct ath_buf *bf;
2009 struct ath_atx_tid *tid; 1815 int r;
2010 struct scatterlist sg;
2011 1816
2012 txctl->dmacontext = pci_map_single(sc->pdev, skb->data, 1817 /* Check if a tx buffer is available */
2013 skb->len, PCI_DMA_TODEVICE);
2014 1818
2015 /* setup S/G list */ 1819 bf = ath_tx_get_buffer(sc);
2016 memset(&sg, 0, sizeof(struct scatterlist)); 1820 if (!bf) {
2017 sg_dma_address(&sg) = txctl->dmacontext; 1821 DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n");
2018 sg_dma_len(&sg) = skb->len; 1822 return -1;
1823 }
2019 1824
2020 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) { 1825 r = ath_tx_setup_buffer(sc, bf, skb, txctl);
2021 /* 1826 if (unlikely(r)) {
2022 * We have to do drop frame here. 1827 struct ath_txq *txq = txctl->txq;
2023 */
2024 pci_unmap_single(sc->pdev, txctl->dmacontext,
2025 skb->len, PCI_DMA_TODEVICE);
2026 1828
2027 tx_status.retries = 0; 1829 DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n");
2028 tx_status.flags = ATH_TX_ERROR;
2029 1830
2030 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) { 1831 /* upon ath_tx_processq() this TX queue will be resumed, we
2031 /* Reclaim the seqno. */ 1832 * guarantee this will happen by knowing beforehand that
2032 tid = ATH_AN_2_TID((struct ath_node *) 1833 * we will at least have to run TX completionon one buffer
2033 txctl->an, txctl->tidno); 1834 * on the queue */
2034 DECR(tid->seq_next, IEEE80211_SEQ_MAX); 1835 spin_lock_bh(&txq->axq_lock);
1836 if (ath_txq_depth(sc, txq->axq_qnum) > 1) {
1837 ieee80211_stop_queue(sc->hw,
1838 skb_get_queue_mapping(skb));
1839 txq->stopped = 1;
2035 } 1840 }
2036 ath_tx_complete(sc, skb, &tx_status, txctl->an); 1841 spin_unlock_bh(&txq->axq_lock);
1842
1843 spin_lock_bh(&sc->tx.txbuflock);
1844 list_add_tail(&bf->list, &sc->tx.txbuf);
1845 spin_unlock_bh(&sc->tx.txbuflock);
1846
1847 return r;
2037 } 1848 }
1849
1850 ath_tx_start_dma(sc, bf, txctl);
1851
1852 return 0;
2038} 1853}
2039 1854
2040/* Initialize TX queue and h/w */ 1855/* Initialize TX queue and h/w */
@@ -2044,26 +1859,25 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2044 int error = 0; 1859 int error = 0;
2045 1860
2046 do { 1861 do {
2047 spin_lock_init(&sc->sc_txbuflock); 1862 spin_lock_init(&sc->tx.txbuflock);
2048 1863
2049 /* Setup tx descriptors */ 1864 /* Setup tx descriptors */
2050 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 1865 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2051 "tx", nbufs, 1); 1866 "tx", nbufs, 1);
2052 if (error != 0) { 1867 if (error != 0) {
2053 DPRINTF(sc, ATH_DBG_FATAL, 1868 DPRINTF(sc, ATH_DBG_FATAL,
2054 "%s: failed to allocate tx descriptors: %d\n", 1869 "Failed to allocate tx descriptors: %d\n",
2055 __func__, error); 1870 error);
2056 break; 1871 break;
2057 } 1872 }
2058 1873
2059 /* XXX allocate beacon state together with vap */ 1874 /* XXX allocate beacon state together with vap */
2060 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 1875 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2061 "beacon", ATH_BCBUF, 1); 1876 "beacon", ATH_BCBUF, 1);
2062 if (error != 0) { 1877 if (error != 0) {
2063 DPRINTF(sc, ATH_DBG_FATAL, 1878 DPRINTF(sc, ATH_DBG_FATAL,
2064 "%s: failed to allocate " 1879 "Failed to allocate beacon descriptors: %d\n",
2065 "beacon descripotrs: %d\n", 1880 error);
2066 __func__, error);
2067 break; 1881 break;
2068 } 1882 }
2069 1883
@@ -2080,12 +1894,12 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2080int ath_tx_cleanup(struct ath_softc *sc) 1894int ath_tx_cleanup(struct ath_softc *sc)
2081{ 1895{
2082 /* cleanup beacon descriptors */ 1896 /* cleanup beacon descriptors */
2083 if (sc->sc_bdma.dd_desc_len != 0) 1897 if (sc->beacon.bdma.dd_desc_len != 0)
2084 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 1898 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
2085 1899
2086 /* cleanup tx descriptors */ 1900 /* cleanup tx descriptors */
2087 if (sc->sc_txdma.dd_desc_len != 0) 1901 if (sc->tx.txdma.dd_desc_len != 0)
2088 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 1902 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2089 1903
2090 return 0; 1904 return 0;
2091} 1905}
@@ -2133,15 +1947,15 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2133 */ 1947 */
2134 return NULL; 1948 return NULL;
2135 } 1949 }
2136 if (qnum >= ARRAY_SIZE(sc->sc_txq)) { 1950 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
2137 DPRINTF(sc, ATH_DBG_FATAL, 1951 DPRINTF(sc, ATH_DBG_FATAL,
2138 "%s: hal qnum %u out of range, max %u!\n", 1952 "qnum %u out of range, max %u!\n",
2139 __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq)); 1953 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
2140 ath9k_hw_releasetxqueue(ah, qnum); 1954 ath9k_hw_releasetxqueue(ah, qnum);
2141 return NULL; 1955 return NULL;
2142 } 1956 }
2143 if (!ATH_TXQ_SETUP(sc, qnum)) { 1957 if (!ATH_TXQ_SETUP(sc, qnum)) {
2144 struct ath_txq *txq = &sc->sc_txq[qnum]; 1958 struct ath_txq *txq = &sc->tx.txq[qnum];
2145 1959
2146 txq->axq_qnum = qnum; 1960 txq->axq_qnum = qnum;
2147 txq->axq_link = NULL; 1961 txq->axq_link = NULL;
@@ -2151,11 +1965,10 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2151 txq->axq_depth = 0; 1965 txq->axq_depth = 0;
2152 txq->axq_aggr_depth = 0; 1966 txq->axq_aggr_depth = 0;
2153 txq->axq_totalqueued = 0; 1967 txq->axq_totalqueued = 0;
2154 txq->axq_intrcnt = 0;
2155 txq->axq_linkbuf = NULL; 1968 txq->axq_linkbuf = NULL;
2156 sc->sc_txqsetup |= 1<<qnum; 1969 sc->tx.txqsetup |= 1<<qnum;
2157 } 1970 }
2158 return &sc->sc_txq[qnum]; 1971 return &sc->tx.txq[qnum];
2159} 1972}
2160 1973
2161/* Reclaim resources for a setup queue */ 1974/* Reclaim resources for a setup queue */
@@ -2163,7 +1976,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2163void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1976void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
2164{ 1977{
2165 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1978 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
2166 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 1979 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
2167} 1980}
2168 1981
2169/* 1982/*
@@ -2180,15 +1993,15 @@ int ath_tx_setup(struct ath_softc *sc, int haltype)
2180{ 1993{
2181 struct ath_txq *txq; 1994 struct ath_txq *txq;
2182 1995
2183 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { 1996 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
2184 DPRINTF(sc, ATH_DBG_FATAL, 1997 DPRINTF(sc, ATH_DBG_FATAL,
2185 "%s: HAL AC %u out of range, max %zu!\n", 1998 "HAL AC %u out of range, max %zu!\n",
2186 __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q)); 1999 haltype, ARRAY_SIZE(sc->tx.hwq_map));
2187 return 0; 2000 return 0;
2188 } 2001 }
2189 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); 2002 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
2190 if (txq != NULL) { 2003 if (txq != NULL) {
2191 sc->sc_haltype2q[haltype] = txq->axq_qnum; 2004 sc->tx.hwq_map[haltype] = txq->axq_qnum;
2192 return 1; 2005 return 1;
2193 } else 2006 } else
2194 return 0; 2007 return 0;
@@ -2200,20 +2013,19 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2200 2013
2201 switch (qtype) { 2014 switch (qtype) {
2202 case ATH9K_TX_QUEUE_DATA: 2015 case ATH9K_TX_QUEUE_DATA:
2203 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { 2016 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
2204 DPRINTF(sc, ATH_DBG_FATAL, 2017 DPRINTF(sc, ATH_DBG_FATAL,
2205 "%s: HAL AC %u out of range, max %zu!\n", 2018 "HAL AC %u out of range, max %zu!\n",
2206 __func__, 2019 haltype, ARRAY_SIZE(sc->tx.hwq_map));
2207 haltype, ARRAY_SIZE(sc->sc_haltype2q));
2208 return -1; 2020 return -1;
2209 } 2021 }
2210 qnum = sc->sc_haltype2q[haltype]; 2022 qnum = sc->tx.hwq_map[haltype];
2211 break; 2023 break;
2212 case ATH9K_TX_QUEUE_BEACON: 2024 case ATH9K_TX_QUEUE_BEACON:
2213 qnum = sc->sc_bhalq; 2025 qnum = sc->beacon.beaconq;
2214 break; 2026 break;
2215 case ATH9K_TX_QUEUE_CAB: 2027 case ATH9K_TX_QUEUE_CAB:
2216 qnum = sc->sc_cabq->axq_qnum; 2028 qnum = sc->beacon.cabq->axq_qnum;
2217 break; 2029 break;
2218 default: 2030 default:
2219 qnum = -1; 2031 qnum = -1;
@@ -2221,6 +2033,34 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
2221 return qnum; 2033 return qnum;
2222} 2034}
2223 2035
2036/* Get a transmit queue, if available */
2037
2038struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2039{
2040 struct ath_txq *txq = NULL;
2041 int qnum;
2042
2043 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
2044 txq = &sc->tx.txq[qnum];
2045
2046 spin_lock_bh(&txq->axq_lock);
2047
2048 /* Try to avoid running out of descriptors */
2049 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
2050 DPRINTF(sc, ATH_DBG_FATAL,
2051 "TX queue: %d is full, depth: %d\n",
2052 qnum, txq->axq_depth);
2053 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
2054 txq->stopped = 1;
2055 spin_unlock_bh(&txq->axq_lock);
2056 return NULL;
2057 }
2058
2059 spin_unlock_bh(&txq->axq_lock);
2060
2061 return txq;
2062}
2063
2224/* Update parameters for a transmit queue */ 2064/* Update parameters for a transmit queue */
2225 2065
2226int ath_txq_update(struct ath_softc *sc, int qnum, 2066int ath_txq_update(struct ath_softc *sc, int qnum,
@@ -2230,17 +2070,17 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
2230 int error = 0; 2070 int error = 0;
2231 struct ath9k_tx_queue_info qi; 2071 struct ath9k_tx_queue_info qi;
2232 2072
2233 if (qnum == sc->sc_bhalq) { 2073 if (qnum == sc->beacon.beaconq) {
2234 /* 2074 /*
2235 * XXX: for beacon queue, we just save the parameter. 2075 * XXX: for beacon queue, we just save the parameter.
2236 * It will be picked up by ath_beaconq_config when 2076 * It will be picked up by ath_beaconq_config when
2237 * it's necessary. 2077 * it's necessary.
2238 */ 2078 */
2239 sc->sc_beacon_qi = *qinfo; 2079 sc->beacon.beacon_qi = *qinfo;
2240 return 0; 2080 return 0;
2241 } 2081 }
2242 2082
2243 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum); 2083 ASSERT(sc->tx.txq[qnum].axq_qnum == qnum);
2244 2084
2245 ath9k_hw_get_txq_props(ah, qnum, &qi); 2085 ath9k_hw_get_txq_props(ah, qnum, &qi);
2246 qi.tqi_aifs = qinfo->tqi_aifs; 2086 qi.tqi_aifs = qinfo->tqi_aifs;
@@ -2251,8 +2091,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
2251 2091
2252 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 2092 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
2253 DPRINTF(sc, ATH_DBG_FATAL, 2093 DPRINTF(sc, ATH_DBG_FATAL,
2254 "%s: unable to update hardware queue %u!\n", 2094 "Unable to update hardware queue %u!\n", qnum);
2255 __func__, qnum);
2256 error = -EIO; 2095 error = -EIO;
2257 } else { 2096 } else {
2258 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */ 2097 ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
@@ -2264,7 +2103,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
2264int ath_cabq_update(struct ath_softc *sc) 2103int ath_cabq_update(struct ath_softc *sc)
2265{ 2104{
2266 struct ath9k_tx_queue_info qi; 2105 struct ath9k_tx_queue_info qi;
2267 int qnum = sc->sc_cabq->axq_qnum; 2106 int qnum = sc->beacon.cabq->axq_qnum;
2268 struct ath_beacon_config conf; 2107 struct ath_beacon_config conf;
2269 2108
2270 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 2109 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
@@ -2284,27 +2123,6 @@ int ath_cabq_update(struct ath_softc *sc)
2284 return 0; 2123 return 0;
2285} 2124}
2286 2125
2287int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2288{
2289 struct ath_tx_control txctl;
2290 int error = 0;
2291
2292 memset(&txctl, 0, sizeof(struct ath_tx_control));
2293 error = ath_tx_prepare(sc, skb, &txctl);
2294 if (error == 0)
2295 /*
2296 * Start DMA mapping.
2297 * ath_tx_start_dma() will be called either synchronously
2298 * or asynchrounsly once DMA is complete.
2299 */
2300 xmit_map_sg(sc, skb, &txctl);
2301 else
2302 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2303
2304 /* failed packets will be dropped by the caller */
2305 return error;
2306}
2307
2308/* Deferred processing of transmit interrupt */ 2126/* Deferred processing of transmit interrupt */
2309 2127
2310void ath_tx_tasklet(struct ath_softc *sc) 2128void ath_tx_tasklet(struct ath_softc *sc)
@@ -2319,7 +2137,7 @@ void ath_tx_tasklet(struct ath_softc *sc)
2319 */ 2137 */
2320 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2138 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2321 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2139 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2322 ath_tx_processq(sc, &sc->sc_txq[i]); 2140 ath_tx_processq(sc, &sc->tx.txq[i]);
2323 } 2141 }
2324} 2142}
2325 2143
@@ -2351,9 +2169,9 @@ void ath_tx_draintxq(struct ath_softc *sc,
2351 list_del(&bf->list); 2169 list_del(&bf->list);
2352 spin_unlock_bh(&txq->axq_lock); 2170 spin_unlock_bh(&txq->axq_lock);
2353 2171
2354 spin_lock_bh(&sc->sc_txbuflock); 2172 spin_lock_bh(&sc->tx.txbuflock);
2355 list_add_tail(&bf->list, &sc->sc_txbuf); 2173 list_add_tail(&bf->list, &sc->tx.txbuf);
2356 spin_unlock_bh(&sc->sc_txbuflock); 2174 spin_unlock_bh(&sc->tx.txbuflock);
2357 continue; 2175 continue;
2358 } 2176 }
2359 2177
@@ -2378,8 +2196,7 @@ void ath_tx_draintxq(struct ath_softc *sc,
2378 if (sc->sc_flags & SC_OP_TXAGGR) { 2196 if (sc->sc_flags & SC_OP_TXAGGR) {
2379 if (!retry_tx) { 2197 if (!retry_tx) {
2380 spin_lock_bh(&txq->axq_lock); 2198 spin_lock_bh(&txq->axq_lock);
2381 ath_txq_drain_pending_buffers(sc, txq, 2199 ath_txq_drain_pending_buffers(sc, txq);
2382 ATH9K_BH_STATUS_CHANGE);
2383 spin_unlock_bh(&txq->axq_lock); 2200 spin_unlock_bh(&txq->axq_lock);
2384 } 2201 }
2385 } 2202 }
@@ -2392,9 +2209,9 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2392 /* stop beacon queue. The beacon will be freed when 2209 /* stop beacon queue. The beacon will be freed when
2393 * we go to INIT state */ 2210 * we go to INIT state */
2394 if (!(sc->sc_flags & SC_OP_INVALID)) { 2211 if (!(sc->sc_flags & SC_OP_INVALID)) {
2395 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); 2212 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2396 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__, 2213 DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n",
2397 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq)); 2214 ath9k_hw_gettxbuf(sc->sc_ah, sc->beacon.beaconq));
2398 } 2215 }
2399 2216
2400 ath_drain_txdataq(sc, retry_tx); 2217 ath_drain_txdataq(sc, retry_tx);
@@ -2402,72 +2219,47 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2402 2219
2403u32 ath_txq_depth(struct ath_softc *sc, int qnum) 2220u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2404{ 2221{
2405 return sc->sc_txq[qnum].axq_depth; 2222 return sc->tx.txq[qnum].axq_depth;
2406} 2223}
2407 2224
2408u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum) 2225u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2409{ 2226{
2410 return sc->sc_txq[qnum].axq_aggr_depth; 2227 return sc->tx.txq[qnum].axq_aggr_depth;
2411} 2228}
2412 2229
2413/* Check if an ADDBA is required. A valid node must be passed. */ 2230bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
2414enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2415 struct ath_node *an,
2416 u8 tidno)
2417{ 2231{
2418 struct ath_atx_tid *txtid; 2232 struct ath_atx_tid *txtid;
2419 DECLARE_MAC_BUF(mac);
2420 2233
2421 if (!(sc->sc_flags & SC_OP_TXAGGR)) 2234 if (!(sc->sc_flags & SC_OP_TXAGGR))
2422 return AGGR_NOT_REQUIRED; 2235 return false;
2423 2236
2424 /* ADDBA exchange must be completed before sending aggregates */
2425 txtid = ATH_AN_2_TID(an, tidno); 2237 txtid = ATH_AN_2_TID(an, tidno);
2426 2238
2427 if (txtid->addba_exchangecomplete) 2239 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2428 return AGGR_EXCHANGE_DONE; 2240 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
2429
2430 if (txtid->cleanup_inprogress)
2431 return AGGR_CLEANUP_PROGRESS;
2432
2433 if (txtid->addba_exchangeinprogress)
2434 return AGGR_EXCHANGE_PROGRESS;
2435
2436 if (!txtid->addba_exchangecomplete) {
2437 if (!txtid->addba_exchangeinprogress &&
2438 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) { 2241 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
2439 txtid->addba_exchangeattempts++; 2242 txtid->addba_exchangeattempts++;
2440 return AGGR_REQUIRED; 2243 return true;
2441 } 2244 }
2442 } 2245 }
2443 2246
2444 return AGGR_NOT_REQUIRED; 2247 return false;
2445} 2248}
2446 2249
2447/* Start TX aggregation */ 2250/* Start TX aggregation */
2448 2251
2449int ath_tx_aggr_start(struct ath_softc *sc, 2252int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
2450 const u8 *addr, 2253 u16 tid, u16 *ssn)
2451 u16 tid,
2452 u16 *ssn)
2453{ 2254{
2454 struct ath_atx_tid *txtid; 2255 struct ath_atx_tid *txtid;
2455 struct ath_node *an; 2256 struct ath_node *an;
2456 2257
2457 spin_lock_bh(&sc->node_lock); 2258 an = (struct ath_node *)sta->drv_priv;
2458 an = ath_node_find(sc, (u8 *) addr);
2459 spin_unlock_bh(&sc->node_lock);
2460
2461 if (!an) {
2462 DPRINTF(sc, ATH_DBG_AGGR,
2463 "%s: Node not found to initialize "
2464 "TX aggregation\n", __func__);
2465 return -1;
2466 }
2467 2259
2468 if (sc->sc_flags & SC_OP_TXAGGR) { 2260 if (sc->sc_flags & SC_OP_TXAGGR) {
2469 txtid = ATH_AN_2_TID(an, tid); 2261 txtid = ATH_AN_2_TID(an, tid);
2470 txtid->addba_exchangeinprogress = 1; 2262 txtid->state |= AGGR_ADDBA_PROGRESS;
2471 ath_tx_pause_tid(sc, txtid); 2263 ath_tx_pause_tid(sc, txtid);
2472 } 2264 }
2473 2265
@@ -2476,24 +2268,31 @@ int ath_tx_aggr_start(struct ath_softc *sc,
2476 2268
2477/* Stop tx aggregation */ 2269/* Stop tx aggregation */
2478 2270
2479int ath_tx_aggr_stop(struct ath_softc *sc, 2271int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2480 const u8 *addr,
2481 u16 tid)
2482{ 2272{
2273 struct ath_node *an = (struct ath_node *)sta->drv_priv;
2274
2275 ath_tx_aggr_teardown(sc, an, tid);
2276 return 0;
2277}
2278
2279/* Resume tx aggregation */
2280
2281void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
2282{
2283 struct ath_atx_tid *txtid;
2483 struct ath_node *an; 2284 struct ath_node *an;
2484 2285
2485 spin_lock_bh(&sc->node_lock); 2286 an = (struct ath_node *)sta->drv_priv;
2486 an = ath_node_find(sc, (u8 *) addr);
2487 spin_unlock_bh(&sc->node_lock);
2488 2287
2489 if (!an) { 2288 if (sc->sc_flags & SC_OP_TXAGGR) {
2490 DPRINTF(sc, ATH_DBG_AGGR, 2289 txtid = ATH_AN_2_TID(an, tid);
2491 "%s: TX aggr stop for non-existent node\n", __func__); 2290 txtid->baw_size =
2492 return -1; 2291 IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
2292 txtid->state |= AGGR_ADDBA_COMPLETE;
2293 txtid->state &= ~AGGR_ADDBA_PROGRESS;
2294 ath_tx_resume_tid(sc, txtid);
2493 } 2295 }
2494
2495 ath_tx_aggr_teardown(sc, an, tid);
2496 return 0;
2497} 2296}
2498 2297
2499/* 2298/*
@@ -2503,21 +2302,18 @@ int ath_tx_aggr_stop(struct ath_softc *sc,
2503 * - Discard all retry frames from the s/w queue. 2302 * - Discard all retry frames from the s/w queue.
2504 */ 2303 */
2505 2304
2506void ath_tx_aggr_teardown(struct ath_softc *sc, 2305void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
2507 struct ath_node *an, u8 tid)
2508{ 2306{
2509 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 2307 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2510 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum]; 2308 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
2511 struct ath_buf *bf; 2309 struct ath_buf *bf;
2512 struct list_head bf_head; 2310 struct list_head bf_head;
2513 INIT_LIST_HEAD(&bf_head); 2311 INIT_LIST_HEAD(&bf_head);
2514 2312
2515 DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__); 2313 if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */
2516
2517 if (txtid->cleanup_inprogress) /* cleanup is in progress */
2518 return; 2314 return;
2519 2315
2520 if (!txtid->addba_exchangecomplete) { 2316 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
2521 txtid->addba_exchangeattempts = 0; 2317 txtid->addba_exchangeattempts = 0;
2522 return; 2318 return;
2523 } 2319 }
@@ -2547,9 +2343,9 @@ void ath_tx_aggr_teardown(struct ath_softc *sc,
2547 2343
2548 if (txtid->baw_head != txtid->baw_tail) { 2344 if (txtid->baw_head != txtid->baw_tail) {
2549 spin_unlock_bh(&txq->axq_lock); 2345 spin_unlock_bh(&txq->axq_lock);
2550 txtid->cleanup_inprogress = true; 2346 txtid->state |= AGGR_CLEANUP;
2551 } else { 2347 } else {
2552 txtid->addba_exchangecomplete = 0; 2348 txtid->state &= ~AGGR_ADDBA_COMPLETE;
2553 txtid->addba_exchangeattempts = 0; 2349 txtid->addba_exchangeattempts = 0;
2554 spin_unlock_bh(&txq->axq_lock); 2350 spin_unlock_bh(&txq->axq_lock);
2555 ath_tx_flush_tid(sc, txtid); 2351 ath_tx_flush_tid(sc, txtid);
@@ -2591,10 +2387,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2591 if (tid->paused) /* check next tid to keep h/w busy */ 2387 if (tid->paused) /* check next tid to keep h/w busy */
2592 continue; 2388 continue;
2593 2389
2594 if (!(tid->an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) || 2390 if ((txq->axq_depth % 2) == 0)
2595 ((txq->axq_depth % 2) == 0)) {
2596 ath_tx_sched_aggr(sc, txq, tid); 2391 ath_tx_sched_aggr(sc, txq, tid);
2597 }
2598 2392
2599 /* 2393 /*
2600 * add tid to round-robin queue if more frames 2394 * add tid to round-robin queue if more frames
@@ -2625,72 +2419,67 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2625 2419
2626void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2420void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2627{ 2421{
2628 if (sc->sc_flags & SC_OP_TXAGGR) { 2422 struct ath_atx_tid *tid;
2629 struct ath_atx_tid *tid; 2423 struct ath_atx_ac *ac;
2630 struct ath_atx_ac *ac; 2424 int tidno, acno;
2631 int tidno, acno;
2632
2633 sc->sc_ht_info.maxampdu = ATH_AMPDU_LIMIT_DEFAULT;
2634 2425
2635 /* 2426 /*
2636 * Init per tid tx state 2427 * Init per tid tx state
2637 */ 2428 */
2638 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno]; 2429 for (tidno = 0, tid = &an->tid[tidno];
2639 tidno < WME_NUM_TID; 2430 tidno < WME_NUM_TID;
2640 tidno++, tid++) { 2431 tidno++, tid++) {
2641 tid->an = an; 2432 tid->an = an;
2642 tid->tidno = tidno; 2433 tid->tidno = tidno;
2643 tid->seq_start = tid->seq_next = 0; 2434 tid->seq_start = tid->seq_next = 0;
2644 tid->baw_size = WME_MAX_BA; 2435 tid->baw_size = WME_MAX_BA;
2645 tid->baw_head = tid->baw_tail = 0; 2436 tid->baw_head = tid->baw_tail = 0;
2646 tid->sched = false; 2437 tid->sched = false;
2647 tid->paused = false; 2438 tid->paused = false;
2648 tid->cleanup_inprogress = false; 2439 tid->state &= ~AGGR_CLEANUP;
2649 INIT_LIST_HEAD(&tid->buf_q); 2440 INIT_LIST_HEAD(&tid->buf_q);
2650 2441
2651 acno = TID_TO_WME_AC(tidno); 2442 acno = TID_TO_WME_AC(tidno);
2652 tid->ac = &an->an_aggr.tx.ac[acno]; 2443 tid->ac = &an->ac[acno];
2653 2444
2654 /* ADDBA state */ 2445 /* ADDBA state */
2655 tid->addba_exchangecomplete = 0; 2446 tid->state &= ~AGGR_ADDBA_COMPLETE;
2656 tid->addba_exchangeinprogress = 0; 2447 tid->state &= ~AGGR_ADDBA_PROGRESS;
2657 tid->addba_exchangeattempts = 0; 2448 tid->addba_exchangeattempts = 0;
2658 } 2449 }
2659 2450
2660 /* 2451 /*
2661 * Init per ac tx state 2452 * Init per ac tx state
2662 */ 2453 */
2663 for (acno = 0, ac = &an->an_aggr.tx.ac[acno]; 2454 for (acno = 0, ac = &an->ac[acno];
2664 acno < WME_NUM_AC; acno++, ac++) { 2455 acno < WME_NUM_AC; acno++, ac++) {
2665 ac->sched = false; 2456 ac->sched = false;
2666 INIT_LIST_HEAD(&ac->tid_q); 2457 INIT_LIST_HEAD(&ac->tid_q);
2667 2458
2668 switch (acno) { 2459 switch (acno) {
2669 case WME_AC_BE: 2460 case WME_AC_BE:
2670 ac->qnum = ath_tx_get_qnum(sc, 2461 ac->qnum = ath_tx_get_qnum(sc,
2671 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); 2462 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2672 break; 2463 break;
2673 case WME_AC_BK: 2464 case WME_AC_BK:
2674 ac->qnum = ath_tx_get_qnum(sc, 2465 ac->qnum = ath_tx_get_qnum(sc,
2675 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK); 2466 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2676 break; 2467 break;
2677 case WME_AC_VI: 2468 case WME_AC_VI:
2678 ac->qnum = ath_tx_get_qnum(sc, 2469 ac->qnum = ath_tx_get_qnum(sc,
2679 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI); 2470 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2680 break; 2471 break;
2681 case WME_AC_VO: 2472 case WME_AC_VO:
2682 ac->qnum = ath_tx_get_qnum(sc, 2473 ac->qnum = ath_tx_get_qnum(sc,
2683 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO); 2474 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2684 break; 2475 break;
2685 }
2686 } 2476 }
2687 } 2477 }
2688} 2478}
2689 2479
2690/* Cleanupthe pending buffers for the node. */ 2480/* Cleanupthe pending buffers for the node. */
2691 2481
2692void ath_tx_node_cleanup(struct ath_softc *sc, 2482void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2693 struct ath_node *an, bool bh_flag)
2694{ 2483{
2695 int i; 2484 int i;
2696 struct ath_atx_ac *ac, *ac_tmp; 2485 struct ath_atx_ac *ac, *ac_tmp;
@@ -2698,12 +2487,9 @@ void ath_tx_node_cleanup(struct ath_softc *sc,
2698 struct ath_txq *txq; 2487 struct ath_txq *txq;
2699 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2488 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2700 if (ATH_TXQ_SETUP(sc, i)) { 2489 if (ATH_TXQ_SETUP(sc, i)) {
2701 txq = &sc->sc_txq[i]; 2490 txq = &sc->tx.txq[i];
2702 2491
2703 if (likely(bh_flag)) 2492 spin_lock(&txq->axq_lock);
2704 spin_lock_bh(&txq->axq_lock);
2705 else
2706 spin_lock(&txq->axq_lock);
2707 2493
2708 list_for_each_entry_safe(ac, 2494 list_for_each_entry_safe(ac,
2709 ac_tmp, &txq->axq_acq, list) { 2495 ac_tmp, &txq->axq_acq, list) {
@@ -2718,36 +2504,14 @@ void ath_tx_node_cleanup(struct ath_softc *sc,
2718 tid_tmp, &ac->tid_q, list) { 2504 tid_tmp, &ac->tid_q, list) {
2719 list_del(&tid->list); 2505 list_del(&tid->list);
2720 tid->sched = false; 2506 tid->sched = false;
2721 ath_tid_drain(sc, txq, tid, bh_flag); 2507 ath_tid_drain(sc, txq, tid);
2722 tid->addba_exchangecomplete = 0; 2508 tid->state &= ~AGGR_ADDBA_COMPLETE;
2723 tid->addba_exchangeattempts = 0; 2509 tid->addba_exchangeattempts = 0;
2724 tid->cleanup_inprogress = false; 2510 tid->state &= ~AGGR_CLEANUP;
2725 } 2511 }
2726 } 2512 }
2727 2513
2728 if (likely(bh_flag)) 2514 spin_unlock(&txq->axq_lock);
2729 spin_unlock_bh(&txq->axq_lock);
2730 else
2731 spin_unlock(&txq->axq_lock);
2732 }
2733 }
2734}
2735
2736/* Cleanup per node transmit state */
2737
2738void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2739{
2740 if (sc->sc_flags & SC_OP_TXAGGR) {
2741 struct ath_atx_tid *tid;
2742 int tidno, i;
2743
2744 /* Init per tid rx state */
2745 for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno];
2746 tidno < WME_NUM_TID;
2747 tidno++, tid++) {
2748
2749 for (i = 0; i < ATH_TID_MAX_BUFS; i++)
2750 ASSERT(tid->tx_buf[i] == NULL);
2751 } 2515 }
2752 } 2516 }
2753} 2517}
@@ -2758,6 +2522,8 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2758 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2522 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2759 struct ath_tx_control txctl; 2523 struct ath_tx_control txctl;
2760 2524
2525 memset(&txctl, 0, sizeof(struct ath_tx_control));
2526
2761 /* 2527 /*
2762 * As a temporary workaround, assign seq# here; this will likely need 2528 * As a temporary workaround, assign seq# here; this will likely need
2763 * to be cleaned up to work better with Beacon transmission and virtual 2529 * to be cleaned up to work better with Beacon transmission and virtual
@@ -2766,9 +2532,9 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2766 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 2532 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 2533 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2768 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 2534 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2769 sc->seq_no += 0x10; 2535 sc->tx.seq_no += 0x10;
2770 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 2536 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2771 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no); 2537 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2772 } 2538 }
2773 2539
2774 /* Add the padding after the header if this is not already done */ 2540 /* Add the padding after the header if this is not already done */
@@ -2776,8 +2542,7 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2776 if (hdrlen & 3) { 2542 if (hdrlen & 3) {
2777 padsize = hdrlen % 4; 2543 padsize = hdrlen % 4;
2778 if (skb_headroom(skb) < padsize) { 2544 if (skb_headroom(skb) < padsize) {
2779 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding " 2545 DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n");
2780 "failed\n", __func__);
2781 dev_kfree_skb_any(skb); 2546 dev_kfree_skb_any(skb);
2782 return; 2547 return;
2783 } 2548 }
@@ -2785,23 +2550,16 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2785 memmove(skb->data, skb->data + padsize, hdrlen); 2550 memmove(skb->data, skb->data + padsize, hdrlen);
2786 } 2551 }
2787 2552
2788 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n", 2553 txctl.txq = sc->beacon.cabq;
2789 __func__,
2790 skb);
2791 2554
2792 memset(&txctl, 0, sizeof(struct ath_tx_control)); 2555 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
2793 txctl.flags = ATH9K_TXDESC_CAB; 2556
2794 if (ath_tx_prepare(sc, skb, &txctl) == 0) { 2557 if (ath_tx_start(sc, skb, &txctl) != 0) {
2795 /* 2558 DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n");
2796 * Start DMA mapping. 2559 goto exit;
2797 * ath_tx_start_dma() will be called either synchronously
2798 * or asynchrounsly once DMA is complete.
2799 */
2800 xmit_map_sg(sc, skb, &txctl);
2801 } else {
2802 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2803 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ failed\n", __func__);
2804 dev_kfree_skb_any(skb);
2805 } 2560 }
2806}
2807 2561
2562 return;
2563exit:
2564 dev_kfree_skb_any(skb);
2565}
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index ecb02bdaab5b..350157fcd080 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -67,7 +67,7 @@
67#include <linux/moduleparam.h> 67#include <linux/moduleparam.h>
68#include <linux/firmware.h> 68#include <linux/firmware.h>
69#include <linux/jiffies.h> 69#include <linux/jiffies.h>
70#include <net/ieee80211.h> 70#include <linux/ieee80211.h>
71#include "atmel.h" 71#include "atmel.h"
72 72
73#define DRIVER_MAJOR 0 73#define DRIVER_MAJOR 0
@@ -569,7 +569,7 @@ static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
569static void atmel_command_irq(struct atmel_private *priv); 569static void atmel_command_irq(struct atmel_private *priv);
570static int atmel_validate_channel(struct atmel_private *priv, int channel); 570static int atmel_validate_channel(struct atmel_private *priv, int channel);
571static void atmel_management_frame(struct atmel_private *priv, 571static void atmel_management_frame(struct atmel_private *priv,
572 struct ieee80211_hdr_4addr *header, 572 struct ieee80211_hdr *header,
573 u16 frame_len, u8 rssi); 573 u16 frame_len, u8 rssi);
574static void atmel_management_timer(u_long a); 574static void atmel_management_timer(u_long a);
575static void atmel_send_command(struct atmel_private *priv, int command, 575static void atmel_send_command(struct atmel_private *priv, int command,
@@ -577,7 +577,7 @@ static void atmel_send_command(struct atmel_private *priv, int command,
577static int atmel_send_command_wait(struct atmel_private *priv, int command, 577static int atmel_send_command_wait(struct atmel_private *priv, int command,
578 void *cmd, int cmd_size); 578 void *cmd, int cmd_size);
579static void atmel_transmit_management_frame(struct atmel_private *priv, 579static void atmel_transmit_management_frame(struct atmel_private *priv,
580 struct ieee80211_hdr_4addr *header, 580 struct ieee80211_hdr *header,
581 u8 *body, int body_len); 581 u8 *body, int body_len);
582 582
583static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index); 583static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
@@ -785,7 +785,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
785{ 785{
786 static const u8 SNAP_RFC1024[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; 786 static const u8 SNAP_RFC1024[6] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
787 struct atmel_private *priv = netdev_priv(dev); 787 struct atmel_private *priv = netdev_priv(dev);
788 struct ieee80211_hdr_4addr header; 788 struct ieee80211_hdr header;
789 unsigned long flags; 789 unsigned long flags;
790 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; 790 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
791 791
@@ -823,7 +823,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
823 823
824 frame_ctl = IEEE80211_FTYPE_DATA; 824 frame_ctl = IEEE80211_FTYPE_DATA;
825 header.duration_id = 0; 825 header.duration_id = 0;
826 header.seq_ctl = 0; 826 header.seq_ctrl = 0;
827 if (priv->wep_is_on) 827 if (priv->wep_is_on)
828 frame_ctl |= IEEE80211_FCTL_PROTECTED; 828 frame_ctl |= IEEE80211_FCTL_PROTECTED;
829 if (priv->operating_mode == IW_MODE_ADHOC) { 829 if (priv->operating_mode == IW_MODE_ADHOC) {
@@ -840,7 +840,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
840 if (priv->use_wpa) 840 if (priv->use_wpa)
841 memcpy(&header.addr4, SNAP_RFC1024, 6); 841 memcpy(&header.addr4, SNAP_RFC1024, 6);
842 842
843 header.frame_ctl = cpu_to_le16(frame_ctl); 843 header.frame_control = cpu_to_le16(frame_ctl);
844 /* Copy the wireless header into the card */ 844 /* Copy the wireless header into the card */
845 atmel_copy_to_card(dev, buff, (unsigned char *)&header, DATA_FRAME_WS_HEADER_SIZE); 845 atmel_copy_to_card(dev, buff, (unsigned char *)&header, DATA_FRAME_WS_HEADER_SIZE);
846 /* Copy the packet sans its 802.3 header addresses which have been replaced */ 846 /* Copy the packet sans its 802.3 header addresses which have been replaced */
@@ -860,7 +860,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
860} 860}
861 861
862static void atmel_transmit_management_frame(struct atmel_private *priv, 862static void atmel_transmit_management_frame(struct atmel_private *priv,
863 struct ieee80211_hdr_4addr *header, 863 struct ieee80211_hdr *header,
864 u8 *body, int body_len) 864 u8 *body, int body_len)
865{ 865{
866 u16 buff; 866 u16 buff;
@@ -876,7 +876,7 @@ static void atmel_transmit_management_frame(struct atmel_private *priv,
876} 876}
877 877
878static void fast_rx_path(struct atmel_private *priv, 878static void fast_rx_path(struct atmel_private *priv,
879 struct ieee80211_hdr_4addr *header, 879 struct ieee80211_hdr *header,
880 u16 msdu_size, u16 rx_packet_loc, u32 crc) 880 u16 msdu_size, u16 rx_packet_loc, u32 crc)
881{ 881{
882 /* fast path: unfragmented packet copy directly into skbuf */ 882 /* fast path: unfragmented packet copy directly into skbuf */
@@ -914,12 +914,11 @@ static void fast_rx_path(struct atmel_private *priv,
914 } 914 }
915 915
916 memcpy(skbp, header->addr1, 6); /* destination address */ 916 memcpy(skbp, header->addr1, 6); /* destination address */
917 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 917 if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
918 memcpy(&skbp[6], header->addr3, 6); 918 memcpy(&skbp[6], header->addr3, 6);
919 else 919 else
920 memcpy(&skbp[6], header->addr2, 6); /* source address */ 920 memcpy(&skbp[6], header->addr2, 6); /* source address */
921 921
922 priv->dev->last_rx = jiffies;
923 skb->protocol = eth_type_trans(skb, priv->dev); 922 skb->protocol = eth_type_trans(skb, priv->dev);
924 skb->ip_summed = CHECKSUM_NONE; 923 skb->ip_summed = CHECKSUM_NONE;
925 netif_rx(skb); 924 netif_rx(skb);
@@ -950,7 +949,7 @@ static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
950} 949}
951 950
952static void frag_rx_path(struct atmel_private *priv, 951static void frag_rx_path(struct atmel_private *priv,
953 struct ieee80211_hdr_4addr *header, 952 struct ieee80211_hdr *header,
954 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, 953 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
955 u8 frag_no, int more_frags) 954 u8 frag_no, int more_frags)
956{ 955{
@@ -958,7 +957,7 @@ static void frag_rx_path(struct atmel_private *priv,
958 u8 source[6]; 957 u8 source[6];
959 struct sk_buff *skb; 958 struct sk_buff *skb;
960 959
961 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 960 if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
962 memcpy(source, header->addr3, 6); 961 memcpy(source, header->addr3, 6);
963 else 962 else
964 memcpy(source, header->addr2, 6); 963 memcpy(source, header->addr2, 6);
@@ -1026,7 +1025,6 @@ static void frag_rx_path(struct atmel_private *priv,
1026 memcpy(skb_put(skb, priv->frag_len + 12), 1025 memcpy(skb_put(skb, priv->frag_len + 12),
1027 priv->rx_buf, 1026 priv->rx_buf,
1028 priv->frag_len + 12); 1027 priv->frag_len + 12);
1029 priv->dev->last_rx = jiffies;
1030 skb->protocol = eth_type_trans(skb, priv->dev); 1028 skb->protocol = eth_type_trans(skb, priv->dev);
1031 skb->ip_summed = CHECKSUM_NONE; 1029 skb->ip_summed = CHECKSUM_NONE;
1032 netif_rx(skb); 1030 netif_rx(skb);
@@ -1041,7 +1039,7 @@ static void frag_rx_path(struct atmel_private *priv,
1041static void rx_done_irq(struct atmel_private *priv) 1039static void rx_done_irq(struct atmel_private *priv)
1042{ 1040{
1043 int i; 1041 int i;
1044 struct ieee80211_hdr_4addr header; 1042 struct ieee80211_hdr header;
1045 1043
1046 for (i = 0; 1044 for (i = 0;
1047 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID && 1045 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
@@ -1068,10 +1066,10 @@ static void rx_done_irq(struct atmel_private *priv)
1068 goto next; 1066 goto next;
1069 } 1067 }
1070 1068
1071 /* Get header as far as end of seq_ctl */ 1069 /* Get header as far as end of seq_ctrl */
1072 atmel_copy_to_host(priv->dev, (char *)&header, rx_packet_loc, 24); 1070 atmel_copy_to_host(priv->dev, (char *)&header, rx_packet_loc, 24);
1073 frame_ctl = le16_to_cpu(header.frame_ctl); 1071 frame_ctl = le16_to_cpu(header.frame_control);
1074 seq_control = le16_to_cpu(header.seq_ctl); 1072 seq_control = le16_to_cpu(header.seq_ctrl);
1075 1073
1076 /* probe for CRC use here if needed once five packets have 1074 /* probe for CRC use here if needed once five packets have
1077 arrived with the same crc status, we assume we know what's 1075 arrived with the same crc status, we assume we know what's
@@ -1479,7 +1477,6 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
1479 struct net_device *dev; 1477 struct net_device *dev;
1480 struct atmel_private *priv; 1478 struct atmel_private *priv;
1481 int rc; 1479 int rc;
1482 DECLARE_MAC_BUF(mac);
1483 1480
1484 /* Create the network device object. */ 1481 /* Create the network device object. */
1485 dev = alloc_etherdev(sizeof(*priv)); 1482 dev = alloc_etherdev(sizeof(*priv));
@@ -1591,8 +1588,8 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
1591 if (!ent) 1588 if (!ent)
1592 printk(KERN_WARNING "atmel: unable to create /proc entry.\n"); 1589 printk(KERN_WARNING "atmel: unable to create /proc entry.\n");
1593 1590
1594 printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %s\n", 1591 printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %pM\n",
1595 dev->name, DRIVER_MAJOR, DRIVER_MINOR, print_mac(mac, dev->dev_addr)); 1592 dev->name, DRIVER_MAJOR, DRIVER_MINOR, dev->dev_addr);
1596 1593
1597 return dev; 1594 return dev;
1598 1595
@@ -1822,7 +1819,7 @@ static int atmel_set_encodeext(struct net_device *dev,
1822 /* Determine and validate the key index */ 1819 /* Determine and validate the key index */
1823 idx = encoding->flags & IW_ENCODE_INDEX; 1820 idx = encoding->flags & IW_ENCODE_INDEX;
1824 if (idx) { 1821 if (idx) {
1825 if (idx < 1 || idx > WEP_KEYS) 1822 if (idx < 1 || idx > 4)
1826 return -EINVAL; 1823 return -EINVAL;
1827 idx--; 1824 idx--;
1828 } else 1825 } else
@@ -1885,7 +1882,7 @@ static int atmel_get_encodeext(struct net_device *dev,
1885 1882
1886 idx = encoding->flags & IW_ENCODE_INDEX; 1883 idx = encoding->flags & IW_ENCODE_INDEX;
1887 if (idx) { 1884 if (idx) {
1888 if (idx < 1 || idx > WEP_KEYS) 1885 if (idx < 1 || idx > 4)
1889 return -EINVAL; 1886 return -EINVAL;
1890 idx--; 1887 idx--;
1891 } else 1888 } else
@@ -2800,7 +2797,7 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability,
2800 u8 channel) 2797 u8 channel)
2801{ 2798{
2802 int rejoin = 0; 2799 int rejoin = 0;
2803 int new = capability & MFIE_TYPE_POWER_CONSTRAINT ? 2800 int new = capability & WLAN_CAPABILITY_SHORT_PREAMBLE ?
2804 SHORT_PREAMBLE : LONG_PREAMBLE; 2801 SHORT_PREAMBLE : LONG_PREAMBLE;
2805 2802
2806 if (priv->preamble != new) { 2803 if (priv->preamble != new) {
@@ -2829,19 +2826,19 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability,
2829static void send_authentication_request(struct atmel_private *priv, u16 system, 2826static void send_authentication_request(struct atmel_private *priv, u16 system,
2830 u8 *challenge, int challenge_len) 2827 u8 *challenge, int challenge_len)
2831{ 2828{
2832 struct ieee80211_hdr_4addr header; 2829 struct ieee80211_hdr header;
2833 struct auth_body auth; 2830 struct auth_body auth;
2834 2831
2835 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); 2832 header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
2836 header.duration_id = cpu_to_le16(0x8000); 2833 header.duration_id = cpu_to_le16(0x8000);
2837 header.seq_ctl = 0; 2834 header.seq_ctrl = 0;
2838 memcpy(header.addr1, priv->CurrentBSSID, 6); 2835 memcpy(header.addr1, priv->CurrentBSSID, 6);
2839 memcpy(header.addr2, priv->dev->dev_addr, 6); 2836 memcpy(header.addr2, priv->dev->dev_addr, 6);
2840 memcpy(header.addr3, priv->CurrentBSSID, 6); 2837 memcpy(header.addr3, priv->CurrentBSSID, 6);
2841 2838
2842 if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1) 2839 if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
2843 /* no WEP for authentication frames with TrSeqNo 1 */ 2840 /* no WEP for authentication frames with TrSeqNo 1 */
2844 header.frame_ctl |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 2841 header.frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2845 2842
2846 auth.alg = cpu_to_le16(system); 2843 auth.alg = cpu_to_le16(system);
2847 2844
@@ -2864,7 +2861,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2864{ 2861{
2865 u8 *ssid_el_p; 2862 u8 *ssid_el_p;
2866 int bodysize; 2863 int bodysize;
2867 struct ieee80211_hdr_4addr header; 2864 struct ieee80211_hdr header;
2868 struct ass_req_format { 2865 struct ass_req_format {
2869 __le16 capability; 2866 __le16 capability;
2870 __le16 listen_interval; 2867 __le16 listen_interval;
@@ -2877,10 +2874,10 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2877 u8 rates[4]; 2874 u8 rates[4];
2878 } body; 2875 } body;
2879 2876
2880 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2877 header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2881 (is_reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ)); 2878 (is_reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ));
2882 header.duration_id = cpu_to_le16(0x8000); 2879 header.duration_id = cpu_to_le16(0x8000);
2883 header.seq_ctl = 0; 2880 header.seq_ctrl = 0;
2884 2881
2885 memcpy(header.addr1, priv->CurrentBSSID, 6); 2882 memcpy(header.addr1, priv->CurrentBSSID, 6);
2886 memcpy(header.addr2, priv->dev->dev_addr, 6); 2883 memcpy(header.addr2, priv->dev->dev_addr, 6);
@@ -2890,7 +2887,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2890 if (priv->wep_is_on) 2887 if (priv->wep_is_on)
2891 body.capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY); 2888 body.capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
2892 if (priv->preamble == SHORT_PREAMBLE) 2889 if (priv->preamble == SHORT_PREAMBLE)
2893 body.capability |= cpu_to_le16(MFIE_TYPE_POWER_CONSTRAINT); 2890 body.capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
2894 2891
2895 body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period); 2892 body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period);
2896 2893
@@ -2904,10 +2901,10 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2904 bodysize = 12 + priv->SSID_size; 2901 bodysize = 12 + priv->SSID_size;
2905 } 2902 }
2906 2903
2907 ssid_el_p[0] = MFIE_TYPE_SSID; 2904 ssid_el_p[0] = WLAN_EID_SSID;
2908 ssid_el_p[1] = priv->SSID_size; 2905 ssid_el_p[1] = priv->SSID_size;
2909 memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size); 2906 memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size);
2910 ssid_el_p[2 + priv->SSID_size] = MFIE_TYPE_RATES; 2907 ssid_el_p[2 + priv->SSID_size] = WLAN_EID_SUPP_RATES;
2911 ssid_el_p[3 + priv->SSID_size] = 4; /* len of suported rates */ 2908 ssid_el_p[3 + priv->SSID_size] = 4; /* len of suported rates */
2912 memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4); 2909 memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4);
2913 2910
@@ -2915,9 +2912,9 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2915} 2912}
2916 2913
2917static int is_frame_from_current_bss(struct atmel_private *priv, 2914static int is_frame_from_current_bss(struct atmel_private *priv,
2918 struct ieee80211_hdr_4addr *header) 2915 struct ieee80211_hdr *header)
2919{ 2916{
2920 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 2917 if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
2921 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0; 2918 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
2922 else 2919 else
2923 return memcmp(header->addr2, priv->CurrentBSSID, 6) == 0; 2920 return memcmp(header->addr2, priv->CurrentBSSID, 6) == 0;
@@ -2965,7 +2962,7 @@ static int retrieve_bss(struct atmel_private *priv)
2965} 2962}
2966 2963
2967static void store_bss_info(struct atmel_private *priv, 2964static void store_bss_info(struct atmel_private *priv,
2968 struct ieee80211_hdr_4addr *header, u16 capability, 2965 struct ieee80211_hdr *header, u16 capability,
2969 u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len, 2966 u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len,
2970 u8 *ssid, int is_beacon) 2967 u8 *ssid, int is_beacon)
2971{ 2968{
@@ -3004,7 +3001,7 @@ static void store_bss_info(struct atmel_private *priv,
3004 else if (capability & WLAN_CAPABILITY_ESS) 3001 else if (capability & WLAN_CAPABILITY_ESS)
3005 priv->BSSinfo[index].BSStype =IW_MODE_INFRA; 3002 priv->BSSinfo[index].BSStype =IW_MODE_INFRA;
3006 3003
3007 priv->BSSinfo[index].preamble = capability & MFIE_TYPE_POWER_CONSTRAINT ? 3004 priv->BSSinfo[index].preamble = capability & WLAN_CAPABILITY_SHORT_PREAMBLE ?
3008 SHORT_PREAMBLE : LONG_PREAMBLE; 3005 SHORT_PREAMBLE : LONG_PREAMBLE;
3009} 3006}
3010 3007
@@ -3040,7 +3037,7 @@ static void authenticate(struct atmel_private *priv, u16 frame_len)
3040 } 3037 }
3041 } else if (system == WLAN_AUTH_SHARED_KEY) { 3038 } else if (system == WLAN_AUTH_SHARED_KEY) {
3042 if (trans_seq_no == 0x0002 && 3039 if (trans_seq_no == 0x0002 &&
3043 auth->el_id == MFIE_TYPE_CHALLENGE) { 3040 auth->el_id == WLAN_EID_CHALLENGE) {
3044 send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len); 3041 send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len);
3045 return; 3042 return;
3046 } else if (trans_seq_no == 0x0004) { 3043 } else if (trans_seq_no == 0x0004) {
@@ -3183,7 +3180,7 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
3183 } 3180 }
3184} 3181}
3185 3182
3186void atmel_join_bss(struct atmel_private *priv, int bss_index) 3183static void atmel_join_bss(struct atmel_private *priv, int bss_index)
3187{ 3184{
3188 struct bss_info *bss = &priv->BSSinfo[bss_index]; 3185 struct bss_info *bss = &priv->BSSinfo[bss_index];
3189 3186
@@ -3291,12 +3288,12 @@ static void atmel_smooth_qual(struct atmel_private *priv)
3291 3288
3292/* deals with incoming managment frames. */ 3289/* deals with incoming managment frames. */
3293static void atmel_management_frame(struct atmel_private *priv, 3290static void atmel_management_frame(struct atmel_private *priv,
3294 struct ieee80211_hdr_4addr *header, 3291 struct ieee80211_hdr *header,
3295 u16 frame_len, u8 rssi) 3292 u16 frame_len, u8 rssi)
3296{ 3293{
3297 u16 subtype; 3294 u16 subtype;
3298 3295
3299 subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE; 3296 subtype = le16_to_cpu(header->frame_control) & IEEE80211_FCTL_STYPE;
3300 switch (subtype) { 3297 switch (subtype) {
3301 case IEEE80211_STYPE_BEACON: 3298 case IEEE80211_STYPE_BEACON:
3302 case IEEE80211_STYPE_PROBE_RESP: 3299 case IEEE80211_STYPE_PROBE_RESP:
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 427b8203e3f9..a53c378e7484 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -718,7 +718,6 @@ struct b43_wldev {
718 718
719 bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */ 719 bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */
720 bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */ 720 bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */
721 bool short_slot; /* TRUE, if short slot timing is enabled. */
722 bool radio_hw_enable; /* saved state of radio hardware enabled state */ 721 bool radio_hw_enable; /* saved state of radio hardware enabled state */
723 bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */ 722 bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */
724 723
diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
index 06a01da80160..e04fc91f569e 100644
--- a/drivers/net/wireless/b43/debugfs.c
+++ b/drivers/net/wireless/b43/debugfs.c
@@ -731,6 +731,7 @@ static void b43_add_dynamic_debug(struct b43_wldev *dev)
731 add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0); 731 add_dyn_dbg("debug_pwork_stop", B43_DBG_PWORK_STOP, 0);
732 add_dyn_dbg("debug_lo", B43_DBG_LO, 0); 732 add_dyn_dbg("debug_lo", B43_DBG_LO, 0);
733 add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, 0); 733 add_dyn_dbg("debug_firmware", B43_DBG_FIRMWARE, 0);
734 add_dyn_dbg("debug_keys", B43_DBG_KEYS, 0);
734 735
735#undef add_dyn_dbg 736#undef add_dyn_dbg
736} 737}
diff --git a/drivers/net/wireless/b43/debugfs.h b/drivers/net/wireless/b43/debugfs.h
index 22ffd02ba554..7886cbe2d1d1 100644
--- a/drivers/net/wireless/b43/debugfs.h
+++ b/drivers/net/wireless/b43/debugfs.h
@@ -12,6 +12,7 @@ enum b43_dyndbg { /* Dynamic debugging features */
12 B43_DBG_PWORK_STOP, 12 B43_DBG_PWORK_STOP,
13 B43_DBG_LO, 13 B43_DBG_LO,
14 B43_DBG_FIRMWARE, 14 B43_DBG_FIRMWARE,
15 B43_DBG_KEYS,
15 __B43_NR_DYNDBG, 16 __B43_NR_DYNDBG,
16}; 17};
17 18
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 098f886976f6..6d65a02b7052 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1387,13 +1387,11 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1387 1387
1388 info = IEEE80211_SKB_CB(meta->skb); 1388 info = IEEE80211_SKB_CB(meta->skb);
1389 1389
1390 memset(&info->status, 0, sizeof(info->status));
1391
1392 /* 1390 /*
1393 * Call back to inform the ieee80211 subsystem about 1391 * Call back to inform the ieee80211 subsystem about
1394 * the status of the transmission. 1392 * the status of the transmission.
1395 */ 1393 */
1396 frame_succeed = b43_fill_txstatus_report(info, status); 1394 frame_succeed = b43_fill_txstatus_report(dev, info, status);
1397#ifdef CONFIG_B43_DEBUG 1395#ifdef CONFIG_B43_DEBUG
1398 if (frame_succeed) 1396 if (frame_succeed)
1399 ring->nr_succeed_tx_packets++; 1397 ring->nr_succeed_tx_packets++;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 14c44df584d0..7b31a327b24a 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -703,13 +703,11 @@ static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
703static void b43_short_slot_timing_enable(struct b43_wldev *dev) 703static void b43_short_slot_timing_enable(struct b43_wldev *dev)
704{ 704{
705 b43_set_slot_time(dev, 9); 705 b43_set_slot_time(dev, 9);
706 dev->short_slot = 1;
707} 706}
708 707
709static void b43_short_slot_timing_disable(struct b43_wldev *dev) 708static void b43_short_slot_timing_disable(struct b43_wldev *dev)
710{ 709{
711 b43_set_slot_time(dev, 20); 710 b43_set_slot_time(dev, 20);
712 dev->short_slot = 0;
713} 711}
714 712
715/* Enable a Generic IRQ. "mask" is the mask of which IRQs to enable. 713/* Enable a Generic IRQ. "mask" is the mask of which IRQs to enable.
@@ -994,6 +992,52 @@ static void b43_clear_keys(struct b43_wldev *dev)
994 b43_key_clear(dev, i); 992 b43_key_clear(dev, i);
995} 993}
996 994
995static void b43_dump_keymemory(struct b43_wldev *dev)
996{
997 unsigned int i, index, offset;
998 DECLARE_MAC_BUF(macbuf);
999 u8 mac[ETH_ALEN];
1000 u16 algo;
1001 u32 rcmta0;
1002 u16 rcmta1;
1003 u64 hf;
1004 struct b43_key *key;
1005
1006 if (!b43_debug(dev, B43_DBG_KEYS))
1007 return;
1008
1009 hf = b43_hf_read(dev);
1010 b43dbg(dev->wl, "Hardware key memory dump: USEDEFKEYS=%u\n",
1011 !!(hf & B43_HF_USEDEFKEYS));
1012 for (index = 0; index < dev->max_nr_keys; index++) {
1013 key = &(dev->key[index]);
1014 printk(KERN_DEBUG "Key slot %02u: %s",
1015 index, (key->keyconf == NULL) ? " " : "*");
1016 offset = dev->ktp + (index * B43_SEC_KEYSIZE);
1017 for (i = 0; i < B43_SEC_KEYSIZE; i += 2) {
1018 u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, offset + i);
1019 printk("%02X%02X", (tmp & 0xFF), ((tmp >> 8) & 0xFF));
1020 }
1021
1022 algo = b43_shm_read16(dev, B43_SHM_SHARED,
1023 B43_SHM_SH_KEYIDXBLOCK + (index * 2));
1024 printk(" Algo: %04X/%02X", algo, key->algorithm);
1025
1026 if (index >= 4) {
1027 rcmta0 = b43_shm_read32(dev, B43_SHM_RCMTA,
1028 ((index - 4) * 2) + 0);
1029 rcmta1 = b43_shm_read16(dev, B43_SHM_RCMTA,
1030 ((index - 4) * 2) + 1);
1031 *((__le32 *)(&mac[0])) = cpu_to_le32(rcmta0);
1032 *((__le16 *)(&mac[4])) = cpu_to_le16(rcmta1);
1033 printk(" MAC: %s",
1034 print_mac(macbuf, mac));
1035 } else
1036 printk(" DEFAULT KEY");
1037 printk("\n");
1038 }
1039}
1040
997void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) 1041void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
998{ 1042{
999 u32 macctl; 1043 u32 macctl;
@@ -1339,25 +1383,6 @@ u8 b43_ieee80211_antenna_sanitize(struct b43_wldev *dev,
1339 return antenna_nr; 1383 return antenna_nr;
1340} 1384}
1341 1385
1342static int b43_antenna_from_ieee80211(struct b43_wldev *dev, u8 antenna)
1343{
1344 antenna = b43_ieee80211_antenna_sanitize(dev, antenna);
1345 switch (antenna) {
1346 case 0: /* default/diversity */
1347 return B43_ANTENNA_DEFAULT;
1348 case 1: /* Antenna 0 */
1349 return B43_ANTENNA0;
1350 case 2: /* Antenna 1 */
1351 return B43_ANTENNA1;
1352 case 3: /* Antenna 2 */
1353 return B43_ANTENNA2;
1354 case 4: /* Antenna 3 */
1355 return B43_ANTENNA3;
1356 default:
1357 return B43_ANTENNA_DEFAULT;
1358 }
1359}
1360
1361/* Convert a b43 antenna number value to the PHY TX control value. */ 1386/* Convert a b43 antenna number value to the PHY TX control value. */
1362static u16 b43_antenna_to_phyctl(int antenna) 1387static u16 b43_antenna_to_phyctl(int antenna)
1363{ 1388{
@@ -1399,7 +1424,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
1399 len, ram_offset, shm_size_offset, rate); 1424 len, ram_offset, shm_size_offset, rate);
1400 1425
1401 /* Write the PHY TX control parameters. */ 1426 /* Write the PHY TX control parameters. */
1402 antenna = b43_antenna_from_ieee80211(dev, info->antenna_sel_tx); 1427 antenna = B43_ANTENNA_DEFAULT;
1403 antenna = b43_antenna_to_phyctl(antenna); 1428 antenna = b43_antenna_to_phyctl(antenna);
1404 ctl = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL); 1429 ctl = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL);
1405 /* We can't send beacons with short preamble. Would get PHY errors. */ 1430 /* We can't send beacons with short preamble. Would get PHY errors. */
@@ -1693,25 +1718,6 @@ static void b43_update_templates(struct b43_wl *wl)
1693 queue_work(wl->hw->workqueue, &wl->beacon_update_trigger); 1718 queue_work(wl->hw->workqueue, &wl->beacon_update_trigger);
1694} 1719}
1695 1720
1696static void b43_set_ssid(struct b43_wldev *dev, const u8 * ssid, u8 ssid_len)
1697{
1698 u32 tmp;
1699 u16 i, len;
1700
1701 len = min((u16) ssid_len, (u16) 0x100);
1702 for (i = 0; i < len; i += sizeof(u32)) {
1703 tmp = (u32) (ssid[i + 0]);
1704 if (i + 1 < len)
1705 tmp |= (u32) (ssid[i + 1]) << 8;
1706 if (i + 2 < len)
1707 tmp |= (u32) (ssid[i + 2]) << 16;
1708 if (i + 3 < len)
1709 tmp |= (u32) (ssid[i + 3]) << 24;
1710 b43_shm_write32(dev, B43_SHM_SHARED, 0x380 + i, tmp);
1711 }
1712 b43_shm_write16(dev, B43_SHM_SHARED, 0x48, len);
1713}
1714
1715static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int) 1721static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int)
1716{ 1722{
1717 b43_time_lock(dev); 1723 b43_time_lock(dev);
@@ -3339,15 +3345,31 @@ init_failure:
3339 return err; 3345 return err;
3340} 3346}
3341 3347
3342static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 3348/* Write the short and long frame retry limit values. */
3349static void b43_set_retry_limits(struct b43_wldev *dev,
3350 unsigned int short_retry,
3351 unsigned int long_retry)
3352{
3353 /* The retry limit is a 4-bit counter. Enforce this to avoid overflowing
3354 * the chip-internal counter. */
3355 short_retry = min(short_retry, (unsigned int)0xF);
3356 long_retry = min(long_retry, (unsigned int)0xF);
3357
3358 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_SRLIMIT,
3359 short_retry);
3360 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_LRLIMIT,
3361 long_retry);
3362}
3363
3364static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3343{ 3365{
3344 struct b43_wl *wl = hw_to_b43_wl(hw); 3366 struct b43_wl *wl = hw_to_b43_wl(hw);
3345 struct b43_wldev *dev; 3367 struct b43_wldev *dev;
3346 struct b43_phy *phy; 3368 struct b43_phy *phy;
3369 struct ieee80211_conf *conf = &hw->conf;
3347 unsigned long flags; 3370 unsigned long flags;
3348 int antenna; 3371 int antenna;
3349 int err = 0; 3372 int err = 0;
3350 u32 savedirqs;
3351 3373
3352 mutex_lock(&wl->mutex); 3374 mutex_lock(&wl->mutex);
3353 3375
@@ -3358,33 +3380,20 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
3358 dev = wl->current_dev; 3380 dev = wl->current_dev;
3359 phy = &dev->phy; 3381 phy = &dev->phy;
3360 3382
3361 /* Disable IRQs while reconfiguring the device. 3383 b43_mac_suspend(dev);
3362 * This makes it possible to drop the spinlock throughout 3384
3363 * the reconfiguration process. */ 3385 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
3364 spin_lock_irqsave(&wl->irq_lock, flags); 3386 b43_set_retry_limits(dev, conf->short_frame_max_tx_count,
3365 if (b43_status(dev) < B43_STAT_STARTED) { 3387 conf->long_frame_max_tx_count);
3366 spin_unlock_irqrestore(&wl->irq_lock, flags); 3388 changed &= ~IEEE80211_CONF_CHANGE_RETRY_LIMITS;
3367 goto out_unlock_mutex; 3389 if (!changed)
3368 } 3390 goto out_mac_enable;
3369 savedirqs = b43_interrupt_disable(dev, B43_IRQ_ALL);
3370 spin_unlock_irqrestore(&wl->irq_lock, flags);
3371 b43_synchronize_irq(dev);
3372 3391
3373 /* Switch to the requested channel. 3392 /* Switch to the requested channel.
3374 * The firmware takes care of races with the TX handler. */ 3393 * The firmware takes care of races with the TX handler. */
3375 if (conf->channel->hw_value != phy->channel) 3394 if (conf->channel->hw_value != phy->channel)
3376 b43_switch_channel(dev, conf->channel->hw_value); 3395 b43_switch_channel(dev, conf->channel->hw_value);
3377 3396
3378 /* Enable/Disable ShortSlot timing. */
3379 if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) !=
3380 dev->short_slot) {
3381 B43_WARN_ON(phy->type != B43_PHYTYPE_G);
3382 if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)
3383 b43_short_slot_timing_enable(dev);
3384 else
3385 b43_short_slot_timing_disable(dev);
3386 }
3387
3388 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_RADIOTAP); 3397 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
3389 3398
3390 /* Adjust the desired TX power level. */ 3399 /* Adjust the desired TX power level. */
@@ -3399,9 +3408,9 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
3399 } 3408 }
3400 3409
3401 /* Antennas for RX and management frame TX. */ 3410 /* Antennas for RX and management frame TX. */
3402 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_tx); 3411 antenna = B43_ANTENNA_DEFAULT;
3403 b43_mgmtframe_txantenna(dev, antenna); 3412 b43_mgmtframe_txantenna(dev, antenna);
3404 antenna = b43_antenna_from_ieee80211(dev, conf->antenna_sel_rx); 3413 antenna = B43_ANTENNA_DEFAULT;
3405 if (phy->ops->set_rx_antenna) 3414 if (phy->ops->set_rx_antenna)
3406 phy->ops->set_rx_antenna(dev, antenna); 3415 phy->ops->set_rx_antenna(dev, antenna);
3407 3416
@@ -3425,16 +3434,91 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
3425 } 3434 }
3426 } 3435 }
3427 3436
3428 spin_lock_irqsave(&wl->irq_lock, flags); 3437out_mac_enable:
3429 b43_interrupt_enable(dev, savedirqs); 3438 b43_mac_enable(dev);
3430 mmiowb(); 3439out_unlock_mutex:
3431 spin_unlock_irqrestore(&wl->irq_lock, flags);
3432 out_unlock_mutex:
3433 mutex_unlock(&wl->mutex); 3440 mutex_unlock(&wl->mutex);
3434 3441
3435 return err; 3442 return err;
3436} 3443}
3437 3444
3445static void b43_update_basic_rates(struct b43_wldev *dev, u64 brates)
3446{
3447 struct ieee80211_supported_band *sband =
3448 dev->wl->hw->wiphy->bands[b43_current_band(dev->wl)];
3449 struct ieee80211_rate *rate;
3450 int i;
3451 u16 basic, direct, offset, basic_offset, rateptr;
3452
3453 for (i = 0; i < sband->n_bitrates; i++) {
3454 rate = &sband->bitrates[i];
3455
3456 if (b43_is_cck_rate(rate->hw_value)) {
3457 direct = B43_SHM_SH_CCKDIRECT;
3458 basic = B43_SHM_SH_CCKBASIC;
3459 offset = b43_plcp_get_ratecode_cck(rate->hw_value);
3460 offset &= 0xF;
3461 } else {
3462 direct = B43_SHM_SH_OFDMDIRECT;
3463 basic = B43_SHM_SH_OFDMBASIC;
3464 offset = b43_plcp_get_ratecode_ofdm(rate->hw_value);
3465 offset &= 0xF;
3466 }
3467
3468 rate = ieee80211_get_response_rate(sband, brates, rate->bitrate);
3469
3470 if (b43_is_cck_rate(rate->hw_value)) {
3471 basic_offset = b43_plcp_get_ratecode_cck(rate->hw_value);
3472 basic_offset &= 0xF;
3473 } else {
3474 basic_offset = b43_plcp_get_ratecode_ofdm(rate->hw_value);
3475 basic_offset &= 0xF;
3476 }
3477
3478 /*
3479 * Get the pointer that we need to point to
3480 * from the direct map
3481 */
3482 rateptr = b43_shm_read16(dev, B43_SHM_SHARED,
3483 direct + 2 * basic_offset);
3484 /* and write it to the basic map */
3485 b43_shm_write16(dev, B43_SHM_SHARED, basic + 2 * offset,
3486 rateptr);
3487 }
3488}
3489
3490static void b43_op_bss_info_changed(struct ieee80211_hw *hw,
3491 struct ieee80211_vif *vif,
3492 struct ieee80211_bss_conf *conf,
3493 u32 changed)
3494{
3495 struct b43_wl *wl = hw_to_b43_wl(hw);
3496 struct b43_wldev *dev;
3497
3498 mutex_lock(&wl->mutex);
3499
3500 dev = wl->current_dev;
3501 if (!dev || b43_status(dev) < B43_STAT_STARTED)
3502 goto out_unlock_mutex;
3503 b43_mac_suspend(dev);
3504
3505 if (changed & BSS_CHANGED_BASIC_RATES)
3506 b43_update_basic_rates(dev, conf->basic_rates);
3507
3508 if (changed & BSS_CHANGED_ERP_SLOT) {
3509 if (conf->use_short_slot)
3510 b43_short_slot_timing_enable(dev);
3511 else
3512 b43_short_slot_timing_disable(dev);
3513 }
3514
3515 b43_mac_enable(dev);
3516out_unlock_mutex:
3517 mutex_unlock(&wl->mutex);
3518
3519 return;
3520}
3521
3438static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3522static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3439 const u8 *local_addr, const u8 *addr, 3523 const u8 *local_addr, const u8 *addr,
3440 struct ieee80211_key_conf *key) 3524 struct ieee80211_key_conf *key)
@@ -3445,7 +3529,6 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3445 u8 algorithm; 3529 u8 algorithm;
3446 u8 index; 3530 u8 index;
3447 int err; 3531 int err;
3448 DECLARE_MAC_BUF(mac);
3449 3532
3450 if (modparam_nohwcrypt) 3533 if (modparam_nohwcrypt)
3451 return -ENOSPC; /* User disabled HW-crypto */ 3534 return -ENOSPC; /* User disabled HW-crypto */
@@ -3528,15 +3611,18 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3528 default: 3611 default:
3529 B43_WARN_ON(1); 3612 B43_WARN_ON(1);
3530 } 3613 }
3614
3531out_unlock: 3615out_unlock:
3532 spin_unlock_irqrestore(&wl->irq_lock, flags);
3533 mutex_unlock(&wl->mutex);
3534 if (!err) { 3616 if (!err) {
3535 b43dbg(wl, "%s hardware based encryption for keyidx: %d, " 3617 b43dbg(wl, "%s hardware based encryption for keyidx: %d, "
3536 "mac: %s\n", 3618 "mac: %pM\n",
3537 cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, 3619 cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
3538 print_mac(mac, addr)); 3620 addr);
3621 b43_dump_keymemory(dev);
3539 } 3622 }
3623 spin_unlock_irqrestore(&wl->irq_lock, flags);
3624 mutex_unlock(&wl->mutex);
3625
3540 return err; 3626 return err;
3541} 3627}
3542 3628
@@ -3598,8 +3684,6 @@ static int b43_op_config_interface(struct ieee80211_hw *hw,
3598 if (b43_is_mode(wl, NL80211_IFTYPE_AP) || 3684 if (b43_is_mode(wl, NL80211_IFTYPE_AP) ||
3599 b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) { 3685 b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) {
3600 B43_WARN_ON(vif->type != wl->if_type); 3686 B43_WARN_ON(vif->type != wl->if_type);
3601 if (conf->changed & IEEE80211_IFCC_SSID)
3602 b43_set_ssid(dev, conf->ssid, conf->ssid_len);
3603 if (conf->changed & IEEE80211_IFCC_BEACON) 3687 if (conf->changed & IEEE80211_IFCC_BEACON)
3604 b43_update_templates(wl); 3688 b43_update_templates(wl);
3605 } else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) { 3689 } else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) {
@@ -3878,22 +3962,6 @@ static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev)
3878#endif /* CONFIG_SSB_DRIVER_PCICORE */ 3962#endif /* CONFIG_SSB_DRIVER_PCICORE */
3879} 3963}
3880 3964
3881/* Write the short and long frame retry limit values. */
3882static void b43_set_retry_limits(struct b43_wldev *dev,
3883 unsigned int short_retry,
3884 unsigned int long_retry)
3885{
3886 /* The retry limit is a 4-bit counter. Enforce this to avoid overflowing
3887 * the chip-internal counter. */
3888 short_retry = min(short_retry, (unsigned int)0xF);
3889 long_retry = min(long_retry, (unsigned int)0xF);
3890
3891 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_SRLIMIT,
3892 short_retry);
3893 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_LRLIMIT,
3894 long_retry);
3895}
3896
3897static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle) 3965static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle)
3898{ 3966{
3899 u16 pu_delay; 3967 u16 pu_delay;
@@ -4214,26 +4282,6 @@ static void b43_op_stop(struct ieee80211_hw *hw)
4214 cancel_work_sync(&(wl->txpower_adjust_work)); 4282 cancel_work_sync(&(wl->txpower_adjust_work));
4215} 4283}
4216 4284
4217static int b43_op_set_retry_limit(struct ieee80211_hw *hw,
4218 u32 short_retry_limit, u32 long_retry_limit)
4219{
4220 struct b43_wl *wl = hw_to_b43_wl(hw);
4221 struct b43_wldev *dev;
4222 int err = 0;
4223
4224 mutex_lock(&wl->mutex);
4225 dev = wl->current_dev;
4226 if (unlikely(!dev || (b43_status(dev) < B43_STAT_INITIALIZED))) {
4227 err = -ENODEV;
4228 goto out_unlock;
4229 }
4230 b43_set_retry_limits(dev, short_retry_limit, long_retry_limit);
4231out_unlock:
4232 mutex_unlock(&wl->mutex);
4233
4234 return err;
4235}
4236
4237static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, 4285static int b43_op_beacon_set_tim(struct ieee80211_hw *hw,
4238 struct ieee80211_sta *sta, bool set) 4286 struct ieee80211_sta *sta, bool set)
4239{ 4287{
@@ -4263,6 +4311,7 @@ static const struct ieee80211_ops b43_hw_ops = {
4263 .add_interface = b43_op_add_interface, 4311 .add_interface = b43_op_add_interface,
4264 .remove_interface = b43_op_remove_interface, 4312 .remove_interface = b43_op_remove_interface,
4265 .config = b43_op_config, 4313 .config = b43_op_config,
4314 .bss_info_changed = b43_op_bss_info_changed,
4266 .config_interface = b43_op_config_interface, 4315 .config_interface = b43_op_config_interface,
4267 .configure_filter = b43_op_configure_filter, 4316 .configure_filter = b43_op_configure_filter,
4268 .set_key = b43_op_set_key, 4317 .set_key = b43_op_set_key,
@@ -4270,7 +4319,6 @@ static const struct ieee80211_ops b43_hw_ops = {
4270 .get_tx_stats = b43_op_get_tx_stats, 4319 .get_tx_stats = b43_op_get_tx_stats,
4271 .start = b43_op_start, 4320 .start = b43_op_start,
4272 .stop = b43_op_stop, 4321 .stop = b43_op_stop,
4273 .set_retry_limit = b43_op_set_retry_limit,
4274 .set_tim = b43_op_beacon_set_tim, 4322 .set_tim = b43_op_beacon_set_tim,
4275 .sta_notify = b43_op_sta_notify, 4323 .sta_notify = b43_op_sta_notify,
4276}; 4324};
@@ -4588,7 +4636,7 @@ static int b43_wireless_init(struct ssb_device *dev)
4588 BIT(NL80211_IFTYPE_ADHOC); 4636 BIT(NL80211_IFTYPE_ADHOC);
4589 4637
4590 hw->queues = b43_modparam_qos ? 4 : 1; 4638 hw->queues = b43_modparam_qos ? 4 : 1;
4591 hw->max_altrates = 1; 4639 hw->max_rates = 2;
4592 SET_IEEE80211_DEV(hw, dev->dev); 4640 SET_IEEE80211_DEV(hw, dev->dev);
4593 if (is_valid_ether_addr(sprom->et1mac)) 4641 if (is_valid_ether_addr(sprom->et1mac))
4594 SET_IEEE80211_PERM_ADDR(hw, sprom->et1mac); 4642 SET_IEEE80211_PERM_ADDR(hw, sprom->et1mac);
diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c
index 0f1a84c9de61..7fe9d1701624 100644
--- a/drivers/net/wireless/b43/phy_a.c
+++ b/drivers/net/wireless/b43/phy_a.c
@@ -77,7 +77,7 @@ static s8 b43_aphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
77} 77}
78#endif 78#endif
79 79
80void b43_radio_set_tx_iq(struct b43_wldev *dev) 80static void b43_radio_set_tx_iq(struct b43_wldev *dev)
81{ 81{
82 static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; 82 static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 };
83 static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; 83 static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A };
@@ -147,7 +147,7 @@ static void aphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
147//FIXME b43_phy_xmitpower(dev); 147//FIXME b43_phy_xmitpower(dev);
148} 148}
149 149
150void b43_radio_init2060(struct b43_wldev *dev) 150static void b43_radio_init2060(struct b43_wldev *dev)
151{ 151{
152 b43_radio_write16(dev, 0x0004, 0x00C0); 152 b43_radio_write16(dev, 0x0004, 0x00C0);
153 b43_radio_write16(dev, 0x0005, 0x0008); 153 b43_radio_write16(dev, 0x0005, 0x0008);
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index af37abccccb3..026b61c03fb9 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -178,13 +178,27 @@ void b43_phy_unlock(struct b43_wldev *dev)
178 b43_power_saving_ctl_bits(dev, 0); 178 b43_power_saving_ctl_bits(dev, 0);
179} 179}
180 180
181static inline void assert_mac_suspended(struct b43_wldev *dev)
182{
183 if (!B43_DEBUG)
184 return;
185 if ((b43_status(dev) >= B43_STAT_INITIALIZED) &&
186 (dev->mac_suspended <= 0)) {
187 b43dbg(dev->wl, "PHY/RADIO register access with "
188 "enabled MAC.\n");
189 dump_stack();
190 }
191}
192
181u16 b43_radio_read(struct b43_wldev *dev, u16 reg) 193u16 b43_radio_read(struct b43_wldev *dev, u16 reg)
182{ 194{
195 assert_mac_suspended(dev);
183 return dev->phy.ops->radio_read(dev, reg); 196 return dev->phy.ops->radio_read(dev, reg);
184} 197}
185 198
186void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value) 199void b43_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
187{ 200{
201 assert_mac_suspended(dev);
188 dev->phy.ops->radio_write(dev, reg, value); 202 dev->phy.ops->radio_write(dev, reg, value);
189} 203}
190 204
@@ -208,11 +222,13 @@ void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
208 222
209u16 b43_phy_read(struct b43_wldev *dev, u16 reg) 223u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
210{ 224{
225 assert_mac_suspended(dev);
211 return dev->phy.ops->phy_read(dev, reg); 226 return dev->phy.ops->phy_read(dev, reg);
212} 227}
213 228
214void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value) 229void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value)
215{ 230{
231 assert_mac_suspended(dev);
216 dev->phy.ops->phy_write(dev, reg, value); 232 dev->phy.ops->phy_write(dev, reg, value);
217} 233}
218 234
@@ -280,8 +296,10 @@ void b43_software_rfkill(struct b43_wldev *dev, enum rfkill_state state)
280 state = RFKILL_STATE_SOFT_BLOCKED; 296 state = RFKILL_STATE_SOFT_BLOCKED;
281 } 297 }
282 298
299 b43_mac_suspend(dev);
283 phy->ops->software_rfkill(dev, state); 300 phy->ops->software_rfkill(dev, state);
284 phy->radio_on = (state == RFKILL_STATE_UNBLOCKED); 301 phy->radio_on = (state == RFKILL_STATE_UNBLOCKED);
302 b43_mac_enable(dev);
285} 303}
286 304
287/** 305/**
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 232181f6333c..caac4a45f0bf 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -54,7 +54,7 @@ static const s8 b43_tssi2dbm_g_table[] = {
54 -20, -20, -20, -20, 54 -20, -20, -20, -20,
55}; 55};
56 56
57const u8 b43_radio_channel_codes_bg[] = { 57static const u8 b43_radio_channel_codes_bg[] = {
58 12, 17, 22, 27, 58 12, 17, 22, 27,
59 32, 37, 42, 47, 59 32, 37, 42, 47,
60 52, 57, 62, 67, 60 52, 57, 62, 67,
@@ -215,9 +215,9 @@ void b43_gphy_set_baseband_attenuation(struct b43_wldev *dev,
215} 215}
216 216
217/* Adjust the transmission power output (G-PHY) */ 217/* Adjust the transmission power output (G-PHY) */
218void b43_set_txpower_g(struct b43_wldev *dev, 218static void b43_set_txpower_g(struct b43_wldev *dev,
219 const struct b43_bbatt *bbatt, 219 const struct b43_bbatt *bbatt,
220 const struct b43_rfatt *rfatt, u8 tx_control) 220 const struct b43_rfatt *rfatt, u8 tx_control)
221{ 221{
222 struct b43_phy *phy = &dev->phy; 222 struct b43_phy *phy = &dev->phy;
223 struct b43_phy_g *gphy = phy->g; 223 struct b43_phy_g *gphy = phy->g;
@@ -383,14 +383,14 @@ static void b43_set_original_gains(struct b43_wldev *dev)
383} 383}
384 384
385/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 385/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
386void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val) 386static void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val)
387{ 387{
388 b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); 388 b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset);
389 b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val); 389 b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val);
390} 390}
391 391
392/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 392/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
393s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset) 393static s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset)
394{ 394{
395 u16 val; 395 u16 val;
396 396
@@ -401,7 +401,7 @@ s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset)
401} 401}
402 402
403/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 403/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
404void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val) 404static void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val)
405{ 405{
406 u16 i; 406 u16 i;
407 s16 tmp; 407 s16 tmp;
@@ -415,7 +415,7 @@ void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val)
415} 415}
416 416
417/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 417/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
418void b43_nrssi_mem_update(struct b43_wldev *dev) 418static void b43_nrssi_mem_update(struct b43_wldev *dev)
419{ 419{
420 struct b43_phy_g *gphy = dev->phy.g; 420 struct b43_phy_g *gphy = dev->phy.g;
421 s16 i, delta; 421 s16 i, delta;
@@ -589,7 +589,7 @@ static void b43_calc_nrssi_offset(struct b43_wldev *dev)
589 b43_phy_write(dev, 0x0811, backup[1]); 589 b43_phy_write(dev, 0x0811, backup[1]);
590} 590}
591 591
592void b43_calc_nrssi_slope(struct b43_wldev *dev) 592static void b43_calc_nrssi_slope(struct b43_wldev *dev)
593{ 593{
594 struct b43_phy *phy = &dev->phy; 594 struct b43_phy *phy = &dev->phy;
595 struct b43_phy_g *gphy = phy->g; 595 struct b43_phy_g *gphy = phy->g;
@@ -1354,7 +1354,7 @@ struct init2050_saved_values {
1354 u16 phy_syncctl; 1354 u16 phy_syncctl;
1355}; 1355};
1356 1356
1357u16 b43_radio_init2050(struct b43_wldev *dev) 1357static u16 b43_radio_init2050(struct b43_wldev *dev)
1358{ 1358{
1359 struct b43_phy *phy = &dev->phy; 1359 struct b43_phy *phy = &dev->phy;
1360 struct init2050_saved_values sav; 1360 struct init2050_saved_values sav;
@@ -3047,6 +3047,8 @@ static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev)
3047 int rfatt, bbatt; 3047 int rfatt, bbatt;
3048 u8 tx_control; 3048 u8 tx_control;
3049 3049
3050 b43_mac_suspend(dev);
3051
3050 spin_lock_irq(&dev->wl->irq_lock); 3052 spin_lock_irq(&dev->wl->irq_lock);
3051 3053
3052 /* Calculate the new attenuation values. */ 3054 /* Calculate the new attenuation values. */
@@ -3103,6 +3105,8 @@ static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev)
3103 gphy->tx_control); 3105 gphy->tx_control);
3104 b43_radio_unlock(dev); 3106 b43_radio_unlock(dev);
3105 b43_phy_unlock(dev); 3107 b43_phy_unlock(dev);
3108
3109 b43_mac_enable(dev);
3106} 3110}
3107 3111
3108static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev, 3112static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev,
@@ -3215,9 +3219,9 @@ static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev)
3215 struct b43_phy *phy = &dev->phy; 3219 struct b43_phy *phy = &dev->phy;
3216 struct b43_phy_g *gphy = phy->g; 3220 struct b43_phy_g *gphy = phy->g;
3217 3221
3222 b43_mac_suspend(dev);
3218 //TODO: update_aci_moving_average 3223 //TODO: update_aci_moving_average
3219 if (gphy->aci_enable && gphy->aci_wlan_automatic) { 3224 if (gphy->aci_enable && gphy->aci_wlan_automatic) {
3220 b43_mac_suspend(dev);
3221 if (!gphy->aci_enable && 1 /*TODO: not scanning? */ ) { 3225 if (!gphy->aci_enable && 1 /*TODO: not scanning? */ ) {
3222 if (0 /*TODO: bunch of conditions */ ) { 3226 if (0 /*TODO: bunch of conditions */ ) {
3223 phy->ops->interf_mitigation(dev, 3227 phy->ops->interf_mitigation(dev,
@@ -3227,12 +3231,12 @@ static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev)
3227 if (/*(aci_average > 1000) &&*/ !b43_gphy_aci_scan(dev)) 3231 if (/*(aci_average > 1000) &&*/ !b43_gphy_aci_scan(dev))
3228 phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE); 3232 phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE);
3229 } 3233 }
3230 b43_mac_enable(dev);
3231 } else if (gphy->interfmode == B43_INTERFMODE_NONWLAN && 3234 } else if (gphy->interfmode == B43_INTERFMODE_NONWLAN &&
3232 phy->rev == 1) { 3235 phy->rev == 1) {
3233 //TODO: implement rev1 workaround 3236 //TODO: implement rev1 workaround
3234 } 3237 }
3235 b43_lo_g_maintanance_work(dev); 3238 b43_lo_g_maintanance_work(dev);
3239 b43_mac_enable(dev);
3236} 3240}
3237 3241
3238static void b43_gphy_op_pwork_60sec(struct b43_wldev *dev) 3242static void b43_gphy_op_pwork_60sec(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 401591267592..1036bef8c4cc 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -587,9 +587,8 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
587 spin_lock(&q->lock); /* IRQs are already disabled. */ 587 spin_lock(&q->lock); /* IRQs are already disabled. */
588 588
589 info = IEEE80211_SKB_CB(pack->skb); 589 info = IEEE80211_SKB_CB(pack->skb);
590 memset(&info->status, 0, sizeof(info->status));
591 590
592 b43_fill_txstatus_report(info, status); 591 b43_fill_txstatus_report(dev, info, status);
593 592
594 total_len = pack->skb->len + b43_txhdr_size(dev); 593 total_len = pack->skb->len + b43_txhdr_size(dev);
595 total_len = roundup(total_len, 4); 594 total_len = roundup(total_len, 4);
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 2fabcf8f0474..eae9b8052658 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -46,7 +46,6 @@ static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
46 case 0x6E: 46 case 0x6E:
47 return 3; 47 return 3;
48 } 48 }
49 B43_WARN_ON(1);
50 return -1; 49 return -1;
51} 50}
52 51
@@ -73,7 +72,6 @@ static u8 b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
73 case 0xC: 72 case 0xC:
74 return base + 7; 73 return base + 7;
75 } 74 }
76 B43_WARN_ON(1);
77 return -1; 75 return -1;
78} 76}
79 77
@@ -185,7 +183,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
185 u8 *_txhdr, 183 u8 *_txhdr,
186 const unsigned char *fragment_data, 184 const unsigned char *fragment_data,
187 unsigned int fragment_len, 185 unsigned int fragment_len,
188 const struct ieee80211_tx_info *info, 186 struct ieee80211_tx_info *info,
189 u16 cookie) 187 u16 cookie)
190{ 188{
191 struct b43_txhdr *txhdr = (struct b43_txhdr *)_txhdr; 189 struct b43_txhdr *txhdr = (struct b43_txhdr *)_txhdr;
@@ -202,6 +200,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
202 u16 phy_ctl = 0; 200 u16 phy_ctl = 0;
203 u8 extra_ft = 0; 201 u8 extra_ft = 0;
204 struct ieee80211_rate *txrate; 202 struct ieee80211_rate *txrate;
203 struct ieee80211_tx_rate *rates;
205 204
206 memset(txhdr, 0, sizeof(*txhdr)); 205 memset(txhdr, 0, sizeof(*txhdr));
207 206
@@ -291,7 +290,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
291 phy_ctl |= B43_TXH_PHY_ENC_OFDM; 290 phy_ctl |= B43_TXH_PHY_ENC_OFDM;
292 else 291 else
293 phy_ctl |= B43_TXH_PHY_ENC_CCK; 292 phy_ctl |= B43_TXH_PHY_ENC_CCK;
294 if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) 293 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
295 phy_ctl |= B43_TXH_PHY_SHORTPRMBL; 294 phy_ctl |= B43_TXH_PHY_SHORTPRMBL;
296 295
297 switch (b43_ieee80211_antenna_sanitize(dev, info->antenna_sel_tx)) { 296 switch (b43_ieee80211_antenna_sanitize(dev, info->antenna_sel_tx)) {
@@ -314,6 +313,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
314 B43_WARN_ON(1); 313 B43_WARN_ON(1);
315 } 314 }
316 315
316 rates = info->control.rates;
317 /* MAC control */ 317 /* MAC control */
318 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 318 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
319 mac_ctl |= B43_TXH_MAC_ACK; 319 mac_ctl |= B43_TXH_MAC_ACK;
@@ -324,12 +324,22 @@ int b43_generate_txhdr(struct b43_wldev *dev,
324 mac_ctl |= B43_TXH_MAC_STMSDU; 324 mac_ctl |= B43_TXH_MAC_STMSDU;
325 if (phy->type == B43_PHYTYPE_A) 325 if (phy->type == B43_PHYTYPE_A)
326 mac_ctl |= B43_TXH_MAC_5GHZ; 326 mac_ctl |= B43_TXH_MAC_5GHZ;
327 if (info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT) 327
328 /* Overwrite rates[0].count to make the retry calculation
329 * in the tx status easier. need the actual retry limit to
330 * detect whether the fallback rate was used.
331 */
332 if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
333 (rates[0].count <= dev->wl->hw->conf.long_frame_max_tx_count)) {
334 rates[0].count = dev->wl->hw->conf.long_frame_max_tx_count;
328 mac_ctl |= B43_TXH_MAC_LONGFRAME; 335 mac_ctl |= B43_TXH_MAC_LONGFRAME;
336 } else {
337 rates[0].count = dev->wl->hw->conf.short_frame_max_tx_count;
338 }
329 339
330 /* Generate the RTS or CTS-to-self frame */ 340 /* Generate the RTS or CTS-to-self frame */
331 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) || 341 if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
332 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) { 342 (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) {
333 unsigned int len; 343 unsigned int len;
334 struct ieee80211_hdr *hdr; 344 struct ieee80211_hdr *hdr;
335 int rts_rate, rts_rate_fb; 345 int rts_rate, rts_rate_fb;
@@ -344,7 +354,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
344 rts_rate_fb = b43_calc_fallback_rate(rts_rate); 354 rts_rate_fb = b43_calc_fallback_rate(rts_rate);
345 rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); 355 rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb);
346 356
347 if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 357 if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
348 struct ieee80211_cts *cts; 358 struct ieee80211_cts *cts;
349 359
350 if (b43_is_old_txhdr_format(dev)) { 360 if (b43_is_old_txhdr_format(dev)) {
@@ -596,6 +606,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
596 phytype == B43_PHYTYPE_A); 606 phytype == B43_PHYTYPE_A);
597 else 607 else
598 status.rate_idx = b43_plcp_get_bitrate_idx_cck(plcp); 608 status.rate_idx = b43_plcp_get_bitrate_idx_cck(plcp);
609 if (unlikely(status.rate_idx == -1))
610 goto drop;
599 status.antenna = !!(phystat0 & B43_RX_PHYST0_ANT); 611 status.antenna = !!(phystat0 & B43_RX_PHYST0_ANT);
600 612
601 /* 613 /*
@@ -687,10 +699,18 @@ void b43_handle_txstatus(struct b43_wldev *dev,
687/* Fill out the mac80211 TXstatus report based on the b43-specific 699/* Fill out the mac80211 TXstatus report based on the b43-specific
688 * txstatus report data. This returns a boolean whether the frame was 700 * txstatus report data. This returns a boolean whether the frame was
689 * successfully transmitted. */ 701 * successfully transmitted. */
690bool b43_fill_txstatus_report(struct ieee80211_tx_info *report, 702bool b43_fill_txstatus_report(struct b43_wldev *dev,
703 struct ieee80211_tx_info *report,
691 const struct b43_txstatus *status) 704 const struct b43_txstatus *status)
692{ 705{
693 bool frame_success = 1; 706 bool frame_success = 1;
707 int retry_limit;
708
709 /* preserve the confiured retry limit before clearing the status
710 * The xmit function has overwritten the rc's value with the actual
711 * retry limit done by the hardware */
712 retry_limit = report->status.rates[0].count;
713 ieee80211_tx_info_clear_status(report);
694 714
695 if (status->acked) { 715 if (status->acked) {
696 /* The frame was ACKed. */ 716 /* The frame was ACKed. */
@@ -700,14 +720,32 @@ bool b43_fill_txstatus_report(struct ieee80211_tx_info *report,
700 if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) { 720 if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) {
701 /* ...but we expected an ACK. */ 721 /* ...but we expected an ACK. */
702 frame_success = 0; 722 frame_success = 0;
703 report->status.excessive_retries = 1;
704 } 723 }
705 } 724 }
706 if (status->frame_count == 0) { 725 if (status->frame_count == 0) {
707 /* The frame was not transmitted at all. */ 726 /* The frame was not transmitted at all. */
708 report->status.retry_count = 0; 727 report->status.rates[0].count = 0;
709 } else 728 } else if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
710 report->status.retry_count = status->frame_count - 1; 729 /*
730 * If the short retries (RTS, not data frame) have exceeded
731 * the limit, the hw will not have tried the selected rate,
732 * but will have used the fallback rate instead.
733 * Don't let the rate control count attempts for the selected
734 * rate in this case, otherwise the statistics will be off.
735 */
736 report->status.rates[0].count = 0;
737 report->status.rates[1].count = status->frame_count;
738 } else {
739 if (status->frame_count > retry_limit) {
740 report->status.rates[0].count = retry_limit;
741 report->status.rates[1].count = status->frame_count -
742 retry_limit;
743
744 } else {
745 report->status.rates[0].count = status->frame_count;
746 report->status.rates[1].idx = -1;
747 }
748 }
711 749
712 return frame_success; 750 return frame_success;
713} 751}
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index 0215faf47541..4fb2a190f7a7 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -178,7 +178,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
178 u8 * txhdr, 178 u8 * txhdr,
179 const unsigned char *fragment_data, 179 const unsigned char *fragment_data,
180 unsigned int fragment_len, 180 unsigned int fragment_len,
181 const struct ieee80211_tx_info *txctl, u16 cookie); 181 struct ieee80211_tx_info *txctl, u16 cookie);
182 182
183/* Transmit Status */ 183/* Transmit Status */
184struct b43_txstatus { 184struct b43_txstatus {
@@ -294,7 +294,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr);
294 294
295void b43_handle_txstatus(struct b43_wldev *dev, 295void b43_handle_txstatus(struct b43_wldev *dev,
296 const struct b43_txstatus *status); 296 const struct b43_txstatus *status);
297bool b43_fill_txstatus_report(struct ieee80211_tx_info *report, 297bool b43_fill_txstatus_report(struct b43_wldev *dev,
298 struct ieee80211_tx_info *report,
298 const struct b43_txstatus *status); 299 const struct b43_txstatus *status);
299 300
300void b43_tx_suspend(struct b43_wldev *dev); 301void b43_tx_suspend(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index c40078e1fff9..97b0e06dfe21 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -145,6 +145,10 @@
145#define B43legacy_SHM_SH_PRMAXTIME 0x0074 /* Probe Response max time */ 145#define B43legacy_SHM_SH_PRMAXTIME 0x0074 /* Probe Response max time */
146#define B43legacy_SHM_SH_PRPHYCTL 0x0188 /* Probe Resp PHY TX control */ 146#define B43legacy_SHM_SH_PRPHYCTL 0x0188 /* Probe Resp PHY TX control */
147/* SHM_SHARED rate tables */ 147/* SHM_SHARED rate tables */
148#define B43legacy_SHM_SH_OFDMDIRECT 0x0480 /* Pointer to OFDM direct map */
149#define B43legacy_SHM_SH_OFDMBASIC 0x04A0 /* Pointer to OFDM basic rate map */
150#define B43legacy_SHM_SH_CCKDIRECT 0x04C0 /* Pointer to CCK direct map */
151#define B43legacy_SHM_SH_CCKBASIC 0x04E0 /* Pointer to CCK basic rate map */
148/* SHM_SHARED microcode soft registers */ 152/* SHM_SHARED microcode soft registers */
149#define B43legacy_SHM_SH_UCODEREV 0x0000 /* Microcode revision */ 153#define B43legacy_SHM_SH_UCODEREV 0x0000 /* Microcode revision */
150#define B43legacy_SHM_SH_UCODEPATCH 0x0002 /* Microcode patchlevel */ 154#define B43legacy_SHM_SH_UCODEPATCH 0x0002 /* Microcode patchlevel */
@@ -663,7 +667,6 @@ struct b43legacy_wldev {
663 bool bad_frames_preempt;/* Use "Bad Frames Preemption". */ 667 bool bad_frames_preempt;/* Use "Bad Frames Preemption". */
664 bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM). */ 668 bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM). */
665 bool short_preamble; /* TRUE if using short preamble. */ 669 bool short_preamble; /* TRUE if using short preamble. */
666 bool short_slot; /* TRUE if using short slot timing. */
667 bool radio_hw_enable; /* State of radio hardware enable bit. */ 670 bool radio_hw_enable; /* State of radio hardware enable bit. */
668 671
669 /* PHY/Radio device. */ 672 /* PHY/Radio device. */
diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
index 03ce0821a60e..1f85ac569fec 100644
--- a/drivers/net/wireless/b43legacy/debugfs.c
+++ b/drivers/net/wireless/b43legacy/debugfs.c
@@ -211,7 +211,7 @@ static ssize_t b43legacy_debugfs_read(struct file *file, char __user *userbuf,
211 struct b43legacy_dfs_file *dfile; 211 struct b43legacy_dfs_file *dfile;
212 ssize_t uninitialized_var(ret); 212 ssize_t uninitialized_var(ret);
213 char *buf; 213 char *buf;
214 const size_t bufsize = 1024 * 128; 214 const size_t bufsize = 1024 * 16; /* 16 KiB buffer */
215 const size_t buforder = get_order(bufsize); 215 const size_t buforder = get_order(bufsize);
216 int err = 0; 216 int err = 0;
217 217
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index fb6819e40f38..3649fc367098 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -919,7 +919,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
919 if (!ring->txhdr_cache) 919 if (!ring->txhdr_cache)
920 goto err_kfree_meta; 920 goto err_kfree_meta;
921 921
922 dma_test = ssb_dma_map_single(dev->dev, 922 dma_test = ssb_dma_map_single(dev->dev,
923 ring->txhdr_cache, 923 ring->txhdr_cache,
924 sizeof(struct b43legacy_txhdr_fw3), 924 sizeof(struct b43legacy_txhdr_fw3),
925 DMA_TO_DEVICE); 925 DMA_TO_DEVICE);
@@ -1411,6 +1411,7 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1411 struct b43legacy_dmaring *ring; 1411 struct b43legacy_dmaring *ring;
1412 struct b43legacy_dmadesc_generic *desc; 1412 struct b43legacy_dmadesc_generic *desc;
1413 struct b43legacy_dmadesc_meta *meta; 1413 struct b43legacy_dmadesc_meta *meta;
1414 int retry_limit;
1414 int slot; 1415 int slot;
1415 1416
1416 ring = parse_cookie(dev, status->cookie, &slot); 1417 ring = parse_cookie(dev, status->cookie, &slot);
@@ -1437,25 +1438,42 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1437 struct ieee80211_tx_info *info; 1438 struct ieee80211_tx_info *info;
1438 BUG_ON(!meta->skb); 1439 BUG_ON(!meta->skb);
1439 info = IEEE80211_SKB_CB(meta->skb); 1440 info = IEEE80211_SKB_CB(meta->skb);
1440 /* Call back to inform the ieee80211 subsystem about the
1441 * status of the transmission.
1442 * Some fields of txstat are already filled in dma_tx().
1443 */
1444 1441
1445 memset(&info->status, 0, sizeof(info->status)); 1442 /* preserve the confiured retry limit before clearing the status
1443 * The xmit function has overwritten the rc's value with the actual
1444 * retry limit done by the hardware */
1445 retry_limit = info->status.rates[0].count;
1446 ieee80211_tx_info_clear_status(info);
1446 1447
1447 if (status->acked) { 1448 if (status->acked)
1448 info->flags |= IEEE80211_TX_STAT_ACK; 1449 info->flags |= IEEE80211_TX_STAT_ACK;
1450
1451 if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1452 /*
1453 * If the short retries (RTS, not data frame) have exceeded
1454 * the limit, the hw will not have tried the selected rate,
1455 * but will have used the fallback rate instead.
1456 * Don't let the rate control count attempts for the selected
1457 * rate in this case, otherwise the statistics will be off.
1458 */
1459 info->status.rates[0].count = 0;
1460 info->status.rates[1].count = status->frame_count;
1449 } else { 1461 } else {
1450 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 1462 if (status->frame_count > retry_limit) {
1451 info->status.excessive_retries = 1; 1463 info->status.rates[0].count = retry_limit;
1464 info->status.rates[1].count = status->frame_count -
1465 retry_limit;
1466
1467 } else {
1468 info->status.rates[0].count = status->frame_count;
1469 info->status.rates[1].idx = -1;
1470 }
1452 } 1471 }
1453 if (status->frame_count == 0) { 1472
1454 /* The frame was not transmitted at all. */ 1473 /* Call back to inform the ieee80211 subsystem about the
1455 info->status.retry_count = 0; 1474 * status of the transmission.
1456 } else 1475 * Some fields of txstat are already filled in dma_tx().
1457 info->status.retry_count = status->frame_count 1476 */
1458 - 1;
1459 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); 1477 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1460 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1478 /* skb is freed by ieee80211_tx_status_irqsafe() */
1461 meta->skb = NULL; 1479 meta->skb = NULL;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index c66d57560e7c..c1324e31d2f6 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -576,13 +576,11 @@ static void b43legacy_set_slot_time(struct b43legacy_wldev *dev,
576static void b43legacy_short_slot_timing_enable(struct b43legacy_wldev *dev) 576static void b43legacy_short_slot_timing_enable(struct b43legacy_wldev *dev)
577{ 577{
578 b43legacy_set_slot_time(dev, 9); 578 b43legacy_set_slot_time(dev, 9);
579 dev->short_slot = 1;
580} 579}
581 580
582static void b43legacy_short_slot_timing_disable(struct b43legacy_wldev *dev) 581static void b43legacy_short_slot_timing_disable(struct b43legacy_wldev *dev)
583{ 582{
584 b43legacy_set_slot_time(dev, 20); 583 b43legacy_set_slot_time(dev, 20);
585 dev->short_slot = 0;
586} 584}
587 585
588/* Enable a Generic IRQ. "mask" is the mask of which IRQs to enable. 586/* Enable a Generic IRQ. "mask" is the mask of which IRQs to enable.
@@ -1160,29 +1158,6 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl)
1160 wl->beacon1_uploaded = 0; 1158 wl->beacon1_uploaded = 0;
1161} 1159}
1162 1160
1163static void b43legacy_set_ssid(struct b43legacy_wldev *dev,
1164 const u8 *ssid, u8 ssid_len)
1165{
1166 u32 tmp;
1167 u16 i;
1168 u16 len;
1169
1170 len = min((u16)ssid_len, (u16)0x100);
1171 for (i = 0; i < len; i += sizeof(u32)) {
1172 tmp = (u32)(ssid[i + 0]);
1173 if (i + 1 < len)
1174 tmp |= (u32)(ssid[i + 1]) << 8;
1175 if (i + 2 < len)
1176 tmp |= (u32)(ssid[i + 2]) << 16;
1177 if (i + 3 < len)
1178 tmp |= (u32)(ssid[i + 3]) << 24;
1179 b43legacy_shm_write32(dev, B43legacy_SHM_SHARED,
1180 0x380 + i, tmp);
1181 }
1182 b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
1183 0x48, len);
1184}
1185
1186static void b43legacy_set_beacon_int(struct b43legacy_wldev *dev, 1161static void b43legacy_set_beacon_int(struct b43legacy_wldev *dev,
1187 u16 beacon_int) 1162 u16 beacon_int)
1188{ 1163{
@@ -2556,26 +2531,27 @@ init_failure:
2556 return err; 2531 return err;
2557} 2532}
2558 2533
2559static int b43legacy_antenna_from_ieee80211(u8 antenna) 2534/* Write the short and long frame retry limit values. */
2535static void b43legacy_set_retry_limits(struct b43legacy_wldev *dev,
2536 unsigned int short_retry,
2537 unsigned int long_retry)
2560{ 2538{
2561 switch (antenna) { 2539 /* The retry limit is a 4-bit counter. Enforce this to avoid overflowing
2562 case 0: /* default/diversity */ 2540 * the chip-internal counter. */
2563 return B43legacy_ANTENNA_DEFAULT; 2541 short_retry = min(short_retry, (unsigned int)0xF);
2564 case 1: /* Antenna 0 */ 2542 long_retry = min(long_retry, (unsigned int)0xF);
2565 return B43legacy_ANTENNA0; 2543
2566 case 2: /* Antenna 1 */ 2544 b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, 0x0006, short_retry);
2567 return B43legacy_ANTENNA1; 2545 b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, 0x0007, long_retry);
2568 default:
2569 return B43legacy_ANTENNA_DEFAULT;
2570 }
2571} 2546}
2572 2547
2573static int b43legacy_op_dev_config(struct ieee80211_hw *hw, 2548static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2574 struct ieee80211_conf *conf) 2549 u32 changed)
2575{ 2550{
2576 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); 2551 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
2577 struct b43legacy_wldev *dev; 2552 struct b43legacy_wldev *dev;
2578 struct b43legacy_phy *phy; 2553 struct b43legacy_phy *phy;
2554 struct ieee80211_conf *conf = &hw->conf;
2579 unsigned long flags; 2555 unsigned long flags;
2580 unsigned int new_phymode = 0xFFFF; 2556 unsigned int new_phymode = 0xFFFF;
2581 int antenna_tx; 2557 int antenna_tx;
@@ -2583,13 +2559,21 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2583 int err = 0; 2559 int err = 0;
2584 u32 savedirqs; 2560 u32 savedirqs;
2585 2561
2586 antenna_tx = b43legacy_antenna_from_ieee80211(conf->antenna_sel_tx); 2562 antenna_tx = B43legacy_ANTENNA_DEFAULT;
2587 antenna_rx = b43legacy_antenna_from_ieee80211(conf->antenna_sel_rx); 2563 antenna_rx = B43legacy_ANTENNA_DEFAULT;
2588 2564
2589 mutex_lock(&wl->mutex); 2565 mutex_lock(&wl->mutex);
2590 dev = wl->current_dev; 2566 dev = wl->current_dev;
2591 phy = &dev->phy; 2567 phy = &dev->phy;
2592 2568
2569 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
2570 b43legacy_set_retry_limits(dev,
2571 conf->short_frame_max_tx_count,
2572 conf->long_frame_max_tx_count);
2573 changed &= ~IEEE80211_CONF_CHANGE_RETRY_LIMITS;
2574 if (!changed)
2575 goto out_unlock_mutex;
2576
2593 /* Switch the PHY mode (if necessary). */ 2577 /* Switch the PHY mode (if necessary). */
2594 switch (conf->channel->band) { 2578 switch (conf->channel->band) {
2595 case IEEE80211_BAND_2GHZ: 2579 case IEEE80211_BAND_2GHZ:
@@ -2622,16 +2606,6 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2622 if (conf->channel->hw_value != phy->channel) 2606 if (conf->channel->hw_value != phy->channel)
2623 b43legacy_radio_selectchannel(dev, conf->channel->hw_value, 0); 2607 b43legacy_radio_selectchannel(dev, conf->channel->hw_value, 0);
2624 2608
2625 /* Enable/Disable ShortSlot timing. */
2626 if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME))
2627 != dev->short_slot) {
2628 B43legacy_WARN_ON(phy->type != B43legacy_PHYTYPE_G);
2629 if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)
2630 b43legacy_short_slot_timing_enable(dev);
2631 else
2632 b43legacy_short_slot_timing_disable(dev);
2633 }
2634
2635 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_RADIOTAP); 2609 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
2636 2610
2637 /* Adjust the desired TX power level. */ 2611 /* Adjust the desired TX power level. */
@@ -2676,6 +2650,104 @@ out_unlock_mutex:
2676 return err; 2650 return err;
2677} 2651}
2678 2652
2653static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u64 brates)
2654{
2655 struct ieee80211_supported_band *sband =
2656 dev->wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
2657 struct ieee80211_rate *rate;
2658 int i;
2659 u16 basic, direct, offset, basic_offset, rateptr;
2660
2661 for (i = 0; i < sband->n_bitrates; i++) {
2662 rate = &sband->bitrates[i];
2663
2664 if (b43legacy_is_cck_rate(rate->hw_value)) {
2665 direct = B43legacy_SHM_SH_CCKDIRECT;
2666 basic = B43legacy_SHM_SH_CCKBASIC;
2667 offset = b43legacy_plcp_get_ratecode_cck(rate->hw_value);
2668 offset &= 0xF;
2669 } else {
2670 direct = B43legacy_SHM_SH_OFDMDIRECT;
2671 basic = B43legacy_SHM_SH_OFDMBASIC;
2672 offset = b43legacy_plcp_get_ratecode_ofdm(rate->hw_value);
2673 offset &= 0xF;
2674 }
2675
2676 rate = ieee80211_get_response_rate(sband, brates, rate->bitrate);
2677
2678 if (b43legacy_is_cck_rate(rate->hw_value)) {
2679 basic_offset = b43legacy_plcp_get_ratecode_cck(rate->hw_value);
2680 basic_offset &= 0xF;
2681 } else {
2682 basic_offset = b43legacy_plcp_get_ratecode_ofdm(rate->hw_value);
2683 basic_offset &= 0xF;
2684 }
2685
2686 /*
2687 * Get the pointer that we need to point to
2688 * from the direct map
2689 */
2690 rateptr = b43legacy_shm_read16(dev, B43legacy_SHM_SHARED,
2691 direct + 2 * basic_offset);
2692 /* and write it to the basic map */
2693 b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
2694 basic + 2 * offset, rateptr);
2695 }
2696}
2697
2698static void b43legacy_op_bss_info_changed(struct ieee80211_hw *hw,
2699 struct ieee80211_vif *vif,
2700 struct ieee80211_bss_conf *conf,
2701 u32 changed)
2702{
2703 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
2704 struct b43legacy_wldev *dev;
2705 struct b43legacy_phy *phy;
2706 unsigned long flags;
2707 u32 savedirqs;
2708
2709 mutex_lock(&wl->mutex);
2710
2711 dev = wl->current_dev;
2712 phy = &dev->phy;
2713
2714 /* Disable IRQs while reconfiguring the device.
2715 * This makes it possible to drop the spinlock throughout
2716 * the reconfiguration process. */
2717 spin_lock_irqsave(&wl->irq_lock, flags);
2718 if (b43legacy_status(dev) < B43legacy_STAT_STARTED) {
2719 spin_unlock_irqrestore(&wl->irq_lock, flags);
2720 goto out_unlock_mutex;
2721 }
2722 savedirqs = b43legacy_interrupt_disable(dev, B43legacy_IRQ_ALL);
2723 spin_unlock_irqrestore(&wl->irq_lock, flags);
2724 b43legacy_synchronize_irq(dev);
2725
2726 b43legacy_mac_suspend(dev);
2727
2728 if (changed & BSS_CHANGED_BASIC_RATES)
2729 b43legacy_update_basic_rates(dev, conf->basic_rates);
2730
2731 if (changed & BSS_CHANGED_ERP_SLOT) {
2732 if (conf->use_short_slot)
2733 b43legacy_short_slot_timing_enable(dev);
2734 else
2735 b43legacy_short_slot_timing_disable(dev);
2736 }
2737
2738 b43legacy_mac_enable(dev);
2739
2740 spin_lock_irqsave(&wl->irq_lock, flags);
2741 b43legacy_interrupt_enable(dev, savedirqs);
2742 /* XXX: why? */
2743 mmiowb();
2744 spin_unlock_irqrestore(&wl->irq_lock, flags);
2745 out_unlock_mutex:
2746 mutex_unlock(&wl->mutex);
2747
2748 return;
2749}
2750
2679static void b43legacy_op_configure_filter(struct ieee80211_hw *hw, 2751static void b43legacy_op_configure_filter(struct ieee80211_hw *hw,
2680 unsigned int changed, 2752 unsigned int changed,
2681 unsigned int *fflags, 2753 unsigned int *fflags,
@@ -2735,7 +2807,6 @@ static int b43legacy_op_config_interface(struct ieee80211_hw *hw,
2735 if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) { 2807 if (b43legacy_status(dev) >= B43legacy_STAT_INITIALIZED) {
2736 if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP)) { 2808 if (b43legacy_is_mode(wl, NL80211_IFTYPE_AP)) {
2737 B43legacy_WARN_ON(vif->type != NL80211_IFTYPE_AP); 2809 B43legacy_WARN_ON(vif->type != NL80211_IFTYPE_AP);
2738 b43legacy_set_ssid(dev, conf->ssid, conf->ssid_len);
2739 if (conf->changed & IEEE80211_IFCC_BEACON) 2810 if (conf->changed & IEEE80211_IFCC_BEACON)
2740 b43legacy_update_templates(wl); 2811 b43legacy_update_templates(wl);
2741 } else if (b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC)) { 2812 } else if (b43legacy_is_mode(wl, NL80211_IFTYPE_ADHOC)) {
@@ -3002,20 +3073,6 @@ static void b43legacy_imcfglo_timeouts_workaround(struct b43legacy_wldev *dev)
3002#endif /* CONFIG_SSB_DRIVER_PCICORE */ 3073#endif /* CONFIG_SSB_DRIVER_PCICORE */
3003} 3074}
3004 3075
3005/* Write the short and long frame retry limit values. */
3006static void b43legacy_set_retry_limits(struct b43legacy_wldev *dev,
3007 unsigned int short_retry,
3008 unsigned int long_retry)
3009{
3010 /* The retry limit is a 4-bit counter. Enforce this to avoid overflowing
3011 * the chip-internal counter. */
3012 short_retry = min(short_retry, (unsigned int)0xF);
3013 long_retry = min(long_retry, (unsigned int)0xF);
3014
3015 b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, 0x0006, short_retry);
3016 b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, 0x0007, long_retry);
3017}
3018
3019static void b43legacy_set_synth_pu_delay(struct b43legacy_wldev *dev, 3076static void b43legacy_set_synth_pu_delay(struct b43legacy_wldev *dev,
3020 bool idle) { 3077 bool idle) {
3021 u16 pu_delay = 1050; 3078 u16 pu_delay = 1050;
@@ -3380,28 +3437,6 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
3380 mutex_unlock(&wl->mutex); 3437 mutex_unlock(&wl->mutex);
3381} 3438}
3382 3439
3383static int b43legacy_op_set_retry_limit(struct ieee80211_hw *hw,
3384 u32 short_retry_limit,
3385 u32 long_retry_limit)
3386{
3387 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3388 struct b43legacy_wldev *dev;
3389 int err = 0;
3390
3391 mutex_lock(&wl->mutex);
3392 dev = wl->current_dev;
3393 if (unlikely(!dev ||
3394 (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED))) {
3395 err = -ENODEV;
3396 goto out_unlock;
3397 }
3398 b43legacy_set_retry_limits(dev, short_retry_limit, long_retry_limit);
3399out_unlock:
3400 mutex_unlock(&wl->mutex);
3401
3402 return err;
3403}
3404
3405static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw, 3440static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
3406 struct ieee80211_sta *sta, bool set) 3441 struct ieee80211_sta *sta, bool set)
3407{ 3442{
@@ -3421,13 +3456,13 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
3421 .add_interface = b43legacy_op_add_interface, 3456 .add_interface = b43legacy_op_add_interface,
3422 .remove_interface = b43legacy_op_remove_interface, 3457 .remove_interface = b43legacy_op_remove_interface,
3423 .config = b43legacy_op_dev_config, 3458 .config = b43legacy_op_dev_config,
3459 .bss_info_changed = b43legacy_op_bss_info_changed,
3424 .config_interface = b43legacy_op_config_interface, 3460 .config_interface = b43legacy_op_config_interface,
3425 .configure_filter = b43legacy_op_configure_filter, 3461 .configure_filter = b43legacy_op_configure_filter,
3426 .get_stats = b43legacy_op_get_stats, 3462 .get_stats = b43legacy_op_get_stats,
3427 .get_tx_stats = b43legacy_op_get_tx_stats, 3463 .get_tx_stats = b43legacy_op_get_tx_stats,
3428 .start = b43legacy_op_start, 3464 .start = b43legacy_op_start,
3429 .stop = b43legacy_op_stop, 3465 .stop = b43legacy_op_stop,
3430 .set_retry_limit = b43legacy_op_set_retry_limit,
3431 .set_tim = b43legacy_op_beacon_set_tim, 3466 .set_tim = b43legacy_op_beacon_set_tim,
3432}; 3467};
3433 3468
@@ -3710,7 +3745,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
3710 BIT(NL80211_IFTYPE_WDS) | 3745 BIT(NL80211_IFTYPE_WDS) |
3711 BIT(NL80211_IFTYPE_ADHOC); 3746 BIT(NL80211_IFTYPE_ADHOC);
3712 hw->queues = 1; /* FIXME: hardware has more queues */ 3747 hw->queues = 1; /* FIXME: hardware has more queues */
3713 hw->max_altrates = 1; 3748 hw->max_rates = 2;
3714 SET_IEEE80211_DEV(hw, dev->dev); 3749 SET_IEEE80211_DEV(hw, dev->dev);
3715 if (is_valid_ether_addr(sprom->et1mac)) 3750 if (is_valid_ether_addr(sprom->et1mac))
3716 SET_IEEE80211_PERM_ADDR(hw, sprom->et1mac); 3751 SET_IEEE80211_PERM_ADDR(hw, sprom->et1mac);
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 4c9442b16f3f..11319ec2d64a 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -1296,12 +1296,10 @@ void b43legacy_lo_write(struct b43legacy_wldev *dev,
1296 /* Sanity check. */ 1296 /* Sanity check. */
1297 if (pair->low < -8 || pair->low > 8 || 1297 if (pair->low < -8 || pair->low > 8 ||
1298 pair->high < -8 || pair->high > 8) { 1298 pair->high < -8 || pair->high > 8) {
1299 struct b43legacy_phy *phy = &dev->phy;
1300 b43legacydbg(dev->wl, 1299 b43legacydbg(dev->wl,
1301 "WARNING: Writing invalid LOpair " 1300 "WARNING: Writing invalid LOpair "
1302 "(low: %d, high: %d, index: %lu)\n", 1301 "(low: %d, high: %d)\n",
1303 pair->low, pair->high, 1302 pair->low, pair->high);
1304 (unsigned long)(pair - phy->_lo_pairs));
1305 dump_stack(); 1303 dump_stack();
1306 } 1304 }
1307#endif 1305#endif
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index a86c7647fa2d..746d5361bba0 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -491,6 +491,7 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
491 struct b43legacy_pioqueue *queue; 491 struct b43legacy_pioqueue *queue;
492 struct b43legacy_pio_txpacket *packet; 492 struct b43legacy_pio_txpacket *packet;
493 struct ieee80211_tx_info *info; 493 struct ieee80211_tx_info *info;
494 int retry_limit;
494 495
495 queue = parse_cookie(dev, status->cookie, &packet); 496 queue = parse_cookie(dev, status->cookie, &packet);
496 B43legacy_WARN_ON(!queue); 497 B43legacy_WARN_ON(!queue);
@@ -503,11 +504,37 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
503 sizeof(struct b43legacy_txhdr_fw3)); 504 sizeof(struct b43legacy_txhdr_fw3));
504 505
505 info = IEEE80211_SKB_CB(packet->skb); 506 info = IEEE80211_SKB_CB(packet->skb);
506 memset(&info->status, 0, sizeof(info->status)); 507
508 /* preserve the confiured retry limit before clearing the status
509 * The xmit function has overwritten the rc's value with the actual
510 * retry limit done by the hardware */
511 retry_limit = info->status.rates[0].count;
512 ieee80211_tx_info_clear_status(info);
507 513
508 if (status->acked) 514 if (status->acked)
509 info->flags |= IEEE80211_TX_STAT_ACK; 515 info->flags |= IEEE80211_TX_STAT_ACK;
510 info->status.retry_count = status->frame_count - 1; 516
517 if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
518 /*
519 * If the short retries (RTS, not data frame) have exceeded
520 * the limit, the hw will not have tried the selected rate,
521 * but will have used the fallback rate instead.
522 * Don't let the rate control count attempts for the selected
523 * rate in this case, otherwise the statistics will be off.
524 */
525 info->status.rates[0].count = 0;
526 info->status.rates[1].count = status->frame_count;
527 } else {
528 if (status->frame_count > retry_limit) {
529 info->status.rates[0].count = retry_limit;
530 info->status.rates[1].count = status->frame_count -
531 retry_limit;
532
533 } else {
534 info->status.rates[0].count = status->frame_count;
535 info->status.rates[1].idx = -1;
536 }
537 }
511 ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb); 538 ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb);
512 packet->skb = NULL; 539 packet->skb = NULL;
513 540
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 65e833781608..12fca99f7578 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -188,7 +188,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
188 struct b43legacy_txhdr_fw3 *txhdr, 188 struct b43legacy_txhdr_fw3 *txhdr,
189 const unsigned char *fragment_data, 189 const unsigned char *fragment_data,
190 unsigned int fragment_len, 190 unsigned int fragment_len,
191 const struct ieee80211_tx_info *info, 191 struct ieee80211_tx_info *info,
192 u16 cookie) 192 u16 cookie)
193{ 193{
194 const struct ieee80211_hdr *wlhdr; 194 const struct ieee80211_hdr *wlhdr;
@@ -201,6 +201,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
201 u32 mac_ctl = 0; 201 u32 mac_ctl = 0;
202 u16 phy_ctl = 0; 202 u16 phy_ctl = 0;
203 struct ieee80211_rate *tx_rate; 203 struct ieee80211_rate *tx_rate;
204 struct ieee80211_tx_rate *rates;
204 205
205 wlhdr = (const struct ieee80211_hdr *)fragment_data; 206 wlhdr = (const struct ieee80211_hdr *)fragment_data;
206 207
@@ -274,7 +275,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
274 /* PHY TX Control word */ 275 /* PHY TX Control word */
275 if (rate_ofdm) 276 if (rate_ofdm)
276 phy_ctl |= B43legacy_TX4_PHY_OFDM; 277 phy_ctl |= B43legacy_TX4_PHY_OFDM;
277 if (dev->short_preamble) 278 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
278 phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL; 279 phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL;
279 switch (info->antenna_sel_tx) { 280 switch (info->antenna_sel_tx) {
280 case 0: 281 case 0:
@@ -291,6 +292,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
291 } 292 }
292 293
293 /* MAC control */ 294 /* MAC control */
295 rates = info->control.rates;
294 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 296 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
295 mac_ctl |= B43legacy_TX4_MAC_ACK; 297 mac_ctl |= B43legacy_TX4_MAC_ACK;
296 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) 298 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
@@ -299,12 +301,22 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
299 mac_ctl |= B43legacy_TX4_MAC_STMSDU; 301 mac_ctl |= B43legacy_TX4_MAC_STMSDU;
300 if (rate_fb_ofdm) 302 if (rate_fb_ofdm)
301 mac_ctl |= B43legacy_TX4_MAC_FALLBACKOFDM; 303 mac_ctl |= B43legacy_TX4_MAC_FALLBACKOFDM;
302 if (info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT) 304
305 /* Overwrite rates[0].count to make the retry calculation
306 * in the tx status easier. need the actual retry limit to
307 * detect whether the fallback rate was used.
308 */
309 if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
310 (rates[0].count <= dev->wl->hw->conf.long_frame_max_tx_count)) {
311 rates[0].count = dev->wl->hw->conf.long_frame_max_tx_count;
303 mac_ctl |= B43legacy_TX4_MAC_LONGFRAME; 312 mac_ctl |= B43legacy_TX4_MAC_LONGFRAME;
313 } else {
314 rates[0].count = dev->wl->hw->conf.short_frame_max_tx_count;
315 }
304 316
305 /* Generate the RTS or CTS-to-self frame */ 317 /* Generate the RTS or CTS-to-self frame */
306 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) || 318 if ((rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
307 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) { 319 (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) {
308 unsigned int len; 320 unsigned int len;
309 struct ieee80211_hdr *hdr; 321 struct ieee80211_hdr *hdr;
310 int rts_rate; 322 int rts_rate;
@@ -319,7 +331,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
319 if (rts_rate_fb_ofdm) 331 if (rts_rate_fb_ofdm)
320 mac_ctl |= B43legacy_TX4_MAC_CTSFALLBACKOFDM; 332 mac_ctl |= B43legacy_TX4_MAC_CTSFALLBACKOFDM;
321 333
322 if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 334 if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
323 ieee80211_ctstoself_get(dev->wl->hw, 335 ieee80211_ctstoself_get(dev->wl->hw,
324 info->control.vif, 336 info->control.vif,
325 fragment_data, 337 fragment_data,
@@ -362,7 +374,7 @@ int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
362 u8 *txhdr, 374 u8 *txhdr,
363 const unsigned char *fragment_data, 375 const unsigned char *fragment_data,
364 unsigned int fragment_len, 376 unsigned int fragment_len,
365 const struct ieee80211_tx_info *info, 377 struct ieee80211_tx_info *info,
366 u16 cookie) 378 u16 cookie)
367{ 379{
368 return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr, 380 return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr,
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h
index e56777e0feab..62e09d02788f 100644
--- a/drivers/net/wireless/b43legacy/xmit.h
+++ b/drivers/net/wireless/b43legacy/xmit.h
@@ -80,7 +80,7 @@ int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
80 u8 *txhdr, 80 u8 *txhdr,
81 const unsigned char *fragment_data, 81 const unsigned char *fragment_data,
82 unsigned int fragment_len, 82 unsigned int fragment_len,
83 const struct ieee80211_tx_info *info, 83 struct ieee80211_tx_info *info,
84 u16 cookie); 84 u16 cookie);
85 85
86 86
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index 1fef33169fdd..932d207bce23 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -2,8 +2,17 @@ config HOSTAP
2 tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)" 2 tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)"
3 depends on WLAN_80211 3 depends on WLAN_80211
4 select WIRELESS_EXT 4 select WIRELESS_EXT
5 select IEEE80211 5 select CRYPTO
6 select IEEE80211_CRYPT_WEP 6 select CRYPTO_ARC4
7 select CRYPTO_ECB
8 select CRYPTO_AES
9 select CRYPTO_MICHAEL_MIC
10 select CRYPTO_ECB
11 select CRC32
12 select LIB80211
13 select LIB80211_CRYPT_WEP
14 select LIB80211_CRYPT_TKIP
15 select LIB80211_CRYPT_CCMP
7 ---help--- 16 ---help---
8 Shared driver code for IEEE 802.11b wireless cards based on 17 Shared driver code for IEEE 802.11b wireless cards based on
9 Intersil Prism2/2.5/3 chipset. This driver supports so called 18 Intersil Prism2/2.5/3 chipset. This driver supports so called
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
index 3a386a636cca..2453deaa3e00 100644
--- a/drivers/net/wireless/hostap/hostap.h
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -63,7 +63,7 @@ void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
63int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac); 63int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac);
64void ap_control_kickall(struct ap_data *ap); 64void ap_control_kickall(struct ap_data *ap);
65void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, 65void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
66 struct ieee80211_crypt_data ***crypt); 66 struct lib80211_crypt_data ***crypt);
67int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], 67int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
68 struct iw_quality qual[], int buf_size, 68 struct iw_quality qual[], int buf_size,
69 int aplist); 69 int aplist);
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h
index 3694b1eba521..3a9474d9a907 100644
--- a/drivers/net/wireless/hostap/hostap_80211.h
+++ b/drivers/net/wireless/hostap/hostap_80211.h
@@ -2,7 +2,7 @@
2#define HOSTAP_80211_H 2#define HOSTAP_80211_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <net/ieee80211_crypt.h> 5#include <net/ieee80211.h>
6 6
7struct hostap_ieee80211_mgmt { 7struct hostap_ieee80211_mgmt {
8 __le16 frame_control; 8 __le16 frame_control;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index f106bc1585a4..19b1bf0478bd 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -1,5 +1,5 @@
1#include <linux/etherdevice.h> 1#include <linux/etherdevice.h>
2#include <net/ieee80211_crypt.h> 2#include <net/lib80211.h>
3 3
4#include "hostap_80211.h" 4#include "hostap_80211.h"
5#include "hostap.h" 5#include "hostap.h"
@@ -19,7 +19,6 @@ void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
19{ 19{
20 struct ieee80211_hdr_4addr *hdr; 20 struct ieee80211_hdr_4addr *hdr;
21 u16 fc; 21 u16 fc;
22 DECLARE_MAC_BUF(mac);
23 22
24 hdr = (struct ieee80211_hdr_4addr *) skb->data; 23 hdr = (struct ieee80211_hdr_4addr *) skb->data;
25 24
@@ -45,11 +44,11 @@ void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
45 printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id), 44 printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id),
46 le16_to_cpu(hdr->seq_ctl)); 45 le16_to_cpu(hdr->seq_ctl));
47 46
48 printk(KERN_DEBUG " A1=%s", print_mac(mac, hdr->addr1)); 47 printk(KERN_DEBUG " A1=%pM", hdr->addr1);
49 printk(" A2=%s", print_mac(mac, hdr->addr2)); 48 printk(" A2=%pM", hdr->addr2);
50 printk(" A3=%s", print_mac(mac, hdr->addr3)); 49 printk(" A3=%pM", hdr->addr3);
51 if (skb->len >= 30) 50 if (skb->len >= 30)
52 printk(" A4=%s", print_mac(mac, hdr->addr4)); 51 printk(" A4=%pM", hdr->addr4);
53 printk("\n"); 52 printk("\n");
54} 53}
55 54
@@ -68,7 +67,6 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
68 67
69 iface = netdev_priv(dev); 68 iface = netdev_priv(dev);
70 local = iface->local; 69 local = iface->local;
71 dev->last_rx = jiffies;
72 70
73 if (dev->type == ARPHRD_IEEE80211_PRISM) { 71 if (dev->type == ARPHRD_IEEE80211_PRISM) {
74 if (local->monitor_type == PRISM2_MONITOR_PRISM) { 72 if (local->monitor_type == PRISM2_MONITOR_PRISM) {
@@ -557,7 +555,6 @@ static int
557hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, 555hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
558 u16 fc, struct net_device **wds) 556 u16 fc, struct net_device **wds)
559{ 557{
560 DECLARE_MAC_BUF(mac);
561 /* FIX: is this really supposed to accept WDS frames only in Master 558 /* FIX: is this really supposed to accept WDS frames only in Master
562 * mode? What about Repeater or Managed with WDS frames? */ 559 * mode? What about Repeater or Managed with WDS frames? */
563 if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) != 560 if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) !=
@@ -573,10 +570,10 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
573 hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) { 570 hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) {
574 /* RA (or BSSID) is not ours - drop */ 571 /* RA (or BSSID) is not ours - drop */
575 PDEBUG(DEBUG_EXTRA2, "%s: received WDS frame with " 572 PDEBUG(DEBUG_EXTRA2, "%s: received WDS frame with "
576 "not own or broadcast %s=%s\n", 573 "not own or broadcast %s=%pM\n",
577 local->dev->name, 574 local->dev->name,
578 fc & IEEE80211_FCTL_FROMDS ? "RA" : "BSSID", 575 fc & IEEE80211_FCTL_FROMDS ? "RA" : "BSSID",
579 print_mac(mac, hdr->addr1)); 576 hdr->addr1);
580 return -1; 577 return -1;
581 } 578 }
582 579
@@ -589,8 +586,8 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
589 /* require that WDS link has been registered with TA or the 586 /* require that WDS link has been registered with TA or the
590 * frame is from current AP when using 'AP client mode' */ 587 * frame is from current AP when using 'AP client mode' */
591 PDEBUG(DEBUG_EXTRA, "%s: received WDS[4 addr] frame " 588 PDEBUG(DEBUG_EXTRA, "%s: received WDS[4 addr] frame "
592 "from unknown TA=%s\n", 589 "from unknown TA=%pM\n",
593 local->dev->name, print_mac(mac, hdr->addr2)); 590 local->dev->name, hdr->addr2);
594 if (local->ap && local->ap->autom_ap_wds) 591 if (local->ap && local->ap->autom_ap_wds)
595 hostap_wds_link_oper(local, hdr->addr2, WDS_ADD); 592 hostap_wds_link_oper(local, hdr->addr2, WDS_ADD);
596 return -1; 593 return -1;
@@ -652,7 +649,7 @@ static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
652/* Called only as a tasklet (software IRQ) */ 649/* Called only as a tasklet (software IRQ) */
653static int 650static int
654hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, 651hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
655 struct ieee80211_crypt_data *crypt) 652 struct lib80211_crypt_data *crypt)
656{ 653{
657 struct ieee80211_hdr_4addr *hdr; 654 struct ieee80211_hdr_4addr *hdr;
658 int res, hdrlen; 655 int res, hdrlen;
@@ -667,10 +664,8 @@ hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
667 strcmp(crypt->ops->name, "TKIP") == 0) { 664 strcmp(crypt->ops->name, "TKIP") == 0) {
668 if (net_ratelimit()) { 665 if (net_ratelimit()) {
669 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " 666 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
670 "received packet from " MAC_FMT "\n", 667 "received packet from %pM\n",
671 local->dev->name, 668 local->dev->name, hdr->addr2);
672 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
673 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5]);
674 } 669 }
675 return -1; 670 return -1;
676 } 671 }
@@ -679,12 +674,8 @@ hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
679 res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); 674 res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
680 atomic_dec(&crypt->refcnt); 675 atomic_dec(&crypt->refcnt);
681 if (res < 0) { 676 if (res < 0) {
682 printk(KERN_DEBUG "%s: decryption failed (SA=" MAC_FMT 677 printk(KERN_DEBUG "%s: decryption failed (SA=%pM) res=%d\n",
683 ") res=%d\n", 678 local->dev->name, hdr->addr2, res);
684 local->dev->name,
685 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
686 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5],
687 res);
688 local->comm_tallies.rx_discards_wep_undecryptable++; 679 local->comm_tallies.rx_discards_wep_undecryptable++;
689 return -1; 680 return -1;
690 } 681 }
@@ -696,11 +687,10 @@ hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
696/* Called only as a tasklet (software IRQ) */ 687/* Called only as a tasklet (software IRQ) */
697static int 688static int
698hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb, 689hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb,
699 int keyidx, struct ieee80211_crypt_data *crypt) 690 int keyidx, struct lib80211_crypt_data *crypt)
700{ 691{
701 struct ieee80211_hdr_4addr *hdr; 692 struct ieee80211_hdr_4addr *hdr;
702 int res, hdrlen; 693 int res, hdrlen;
703 DECLARE_MAC_BUF(mac);
704 694
705 if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) 695 if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
706 return 0; 696 return 0;
@@ -713,8 +703,8 @@ hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb,
713 atomic_dec(&crypt->refcnt); 703 atomic_dec(&crypt->refcnt);
714 if (res < 0) { 704 if (res < 0) {
715 printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed" 705 printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
716 " (SA=%s keyidx=%d)\n", 706 " (SA=%pM keyidx=%d)\n",
717 local->dev->name, print_mac(mac, hdr->addr2), keyidx); 707 local->dev->name, hdr->addr2, keyidx);
718 return -1; 708 return -1;
719 } 709 }
720 710
@@ -743,7 +733,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
743 int from_assoc_ap = 0; 733 int from_assoc_ap = 0;
744 u8 dst[ETH_ALEN]; 734 u8 dst[ETH_ALEN];
745 u8 src[ETH_ALEN]; 735 u8 src[ETH_ALEN];
746 struct ieee80211_crypt_data *crypt = NULL; 736 struct lib80211_crypt_data *crypt = NULL;
747 void *sta = NULL; 737 void *sta = NULL;
748 int keyidx = 0; 738 int keyidx = 0;
749 739
@@ -795,7 +785,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
795 int idx = 0; 785 int idx = 0;
796 if (skb->len >= hdrlen + 3) 786 if (skb->len >= hdrlen + 3)
797 idx = skb->data[hdrlen + 3] >> 6; 787 idx = skb->data[hdrlen + 3] >> 6;
798 crypt = local->crypt[idx]; 788 crypt = local->crypt_info.crypt[idx];
799 sta = NULL; 789 sta = NULL;
800 790
801 /* Use station specific key to override default keys if the 791 /* Use station specific key to override default keys if the
@@ -822,10 +812,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
822 * frames silently instead of filling system log with 812 * frames silently instead of filling system log with
823 * these reports. */ 813 * these reports. */
824 printk(KERN_DEBUG "%s: WEP decryption failed (not set)" 814 printk(KERN_DEBUG "%s: WEP decryption failed (not set)"
825 " (SA=" MAC_FMT ")\n", 815 " (SA=%pM)\n",
826 local->dev->name, 816 local->dev->name, hdr->addr2);
827 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
828 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5]);
829#endif 817#endif
830 local->comm_tallies.rx_discards_wep_undecryptable++; 818 local->comm_tallies.rx_discards_wep_undecryptable++;
831 goto rx_dropped; 819 goto rx_dropped;
@@ -839,9 +827,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
839 (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0) 827 (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
840 { 828 {
841 printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth " 829 printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth "
842 "from " MAC_FMT "\n", dev->name, 830 "from %pM\n", dev->name, hdr->addr2);
843 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
844 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5]);
845 /* TODO: could inform hostapd about this so that it 831 /* TODO: could inform hostapd about this so that it
846 * could send auth failure report */ 832 * could send auth failure report */
847 goto rx_dropped; 833 goto rx_dropped;
@@ -896,8 +882,6 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
896 from_assoc_ap = 1; 882 from_assoc_ap = 1;
897 } 883 }
898 884
899 dev->last_rx = jiffies;
900
901 if ((local->iw_mode == IW_MODE_MASTER || 885 if ((local->iw_mode == IW_MODE_MASTER ||
902 local->iw_mode == IW_MODE_REPEAT) && 886 local->iw_mode == IW_MODE_REPEAT) &&
903 !from_assoc_ap) { 887 !from_assoc_ap) {
@@ -1009,10 +993,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
1009 "unencrypted EAPOL frame\n", local->dev->name); 993 "unencrypted EAPOL frame\n", local->dev->name);
1010 } else { 994 } else {
1011 printk(KERN_DEBUG "%s: encryption configured, but RX " 995 printk(KERN_DEBUG "%s: encryption configured, but RX "
1012 "frame not encrypted (SA=" MAC_FMT ")\n", 996 "frame not encrypted (SA=%pM)\n",
1013 local->dev->name, 997 local->dev->name, hdr->addr2);
1014 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
1015 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5]);
1016 goto rx_dropped; 998 goto rx_dropped;
1017 } 999 }
1018 } 1000 }
@@ -1021,10 +1003,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
1021 !hostap_is_eapol_frame(local, skb)) { 1003 !hostap_is_eapol_frame(local, skb)) {
1022 if (net_ratelimit()) { 1004 if (net_ratelimit()) {
1023 printk(KERN_DEBUG "%s: dropped unencrypted RX data " 1005 printk(KERN_DEBUG "%s: dropped unencrypted RX data "
1024 "frame from " MAC_FMT " (drop_unencrypted=1)\n", 1006 "frame from %pM (drop_unencrypted=1)\n",
1025 dev->name, 1007 dev->name, hdr->addr2);
1026 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
1027 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5]);
1028 } 1008 }
1029 goto rx_dropped; 1009 goto rx_dropped;
1030 } 1010 }
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 921c984416f8..078a010f39a0 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -17,7 +17,6 @@ void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
17{ 17{
18 struct ieee80211_hdr_4addr *hdr; 18 struct ieee80211_hdr_4addr *hdr;
19 u16 fc; 19 u16 fc;
20 DECLARE_MAC_BUF(mac);
21 20
22 hdr = (struct ieee80211_hdr_4addr *) skb->data; 21 hdr = (struct ieee80211_hdr_4addr *) skb->data;
23 22
@@ -41,11 +40,11 @@ void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
41 printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id), 40 printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id),
42 le16_to_cpu(hdr->seq_ctl)); 41 le16_to_cpu(hdr->seq_ctl));
43 42
44 printk(KERN_DEBUG " A1=%s", print_mac(mac, hdr->addr1)); 43 printk(KERN_DEBUG " A1=%pM", hdr->addr1);
45 printk(" A2=%s", print_mac(mac, hdr->addr2)); 44 printk(" A2=%pM", hdr->addr2);
46 printk(" A3=%s", print_mac(mac, hdr->addr3)); 45 printk(" A3=%pM", hdr->addr3);
47 if (skb->len >= 30) 46 if (skb->len >= 30)
48 printk(" A4=%s", print_mac(mac, hdr->addr4)); 47 printk(" A4=%pM", hdr->addr4);
49 printk("\n"); 48 printk("\n");
50} 49}
51 50
@@ -307,7 +306,7 @@ int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev)
307 306
308/* Called only from software IRQ */ 307/* Called only from software IRQ */
309static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, 308static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
310 struct ieee80211_crypt_data *crypt) 309 struct lib80211_crypt_data *crypt)
311{ 310{
312 struct hostap_interface *iface; 311 struct hostap_interface *iface;
313 local_info_t *local; 312 local_info_t *local;
@@ -328,10 +327,8 @@ static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
328 hdr = (struct ieee80211_hdr_4addr *) skb->data; 327 hdr = (struct ieee80211_hdr_4addr *) skb->data;
329 if (net_ratelimit()) { 328 if (net_ratelimit()) {
330 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " 329 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
331 "TX packet to " MAC_FMT "\n", 330 "TX packet to %pM\n",
332 local->dev->name, 331 local->dev->name, hdr->addr1);
333 hdr->addr1[0], hdr->addr1[1], hdr->addr1[2],
334 hdr->addr1[3], hdr->addr1[4], hdr->addr1[5]);
335 } 332 }
336 kfree_skb(skb); 333 kfree_skb(skb);
337 return NULL; 334 return NULL;
@@ -408,7 +405,7 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
408 if (local->host_encrypt) { 405 if (local->host_encrypt) {
409 /* Set crypt to default algorithm and key; will be replaced in 406 /* Set crypt to default algorithm and key; will be replaced in
410 * AP code if STA has own alg/key */ 407 * AP code if STA has own alg/key */
411 tx.crypt = local->crypt[local->tx_keyidx]; 408 tx.crypt = local->crypt_info.crypt[local->crypt_info.tx_keyidx];
412 tx.host_encrypt = 1; 409 tx.host_encrypt = 1;
413 } else { 410 } else {
414 tx.crypt = NULL; 411 tx.crypt = NULL;
@@ -490,7 +487,9 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
490 487
491 if (tx.crypt && (!tx.crypt->ops || !tx.crypt->ops->encrypt_mpdu)) 488 if (tx.crypt && (!tx.crypt->ops || !tx.crypt->ops->encrypt_mpdu))
492 tx.crypt = NULL; 489 tx.crypt = NULL;
493 else if ((tx.crypt || local->crypt[local->tx_keyidx]) && !no_encrypt) { 490 else if ((tx.crypt ||
491 local->crypt_info.crypt[local->crypt_info.tx_keyidx]) &&
492 !no_encrypt) {
494 /* Add ISWEP flag both for firmware and host based encryption 493 /* Add ISWEP flag both for firmware and host based encryption
495 */ 494 */
496 fc |= IEEE80211_FCTL_PROTECTED; 495 fc |= IEEE80211_FCTL_PROTECTED;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index af3d4ef2a80b..0903db786d5f 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -94,7 +94,6 @@ static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta)
94static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta) 94static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta)
95{ 95{
96 struct sta_info *s; 96 struct sta_info *s;
97 DECLARE_MAC_BUF(mac);
98 97
99 s = ap->sta_hash[STA_HASH(sta->addr)]; 98 s = ap->sta_hash[STA_HASH(sta->addr)];
100 if (s == NULL) return; 99 if (s == NULL) return;
@@ -109,20 +108,18 @@ static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta)
109 if (s->hnext != NULL) 108 if (s->hnext != NULL)
110 s->hnext = s->hnext->hnext; 109 s->hnext = s->hnext->hnext;
111 else 110 else
112 printk("AP: could not remove STA %s" 111 printk("AP: could not remove STA %pM from hash table\n",
113 " from hash table\n", 112 sta->addr);
114 print_mac(mac, sta->addr));
115} 113}
116 114
117static void ap_free_sta(struct ap_data *ap, struct sta_info *sta) 115static void ap_free_sta(struct ap_data *ap, struct sta_info *sta)
118{ 116{
119 DECLARE_MAC_BUF(mac);
120 if (sta->ap && sta->local) 117 if (sta->ap && sta->local)
121 hostap_event_expired_sta(sta->local->dev, sta); 118 hostap_event_expired_sta(sta->local->dev, sta);
122 119
123 if (ap->proc != NULL) { 120 if (ap->proc != NULL) {
124 char name[20]; 121 char name[20];
125 sprintf(name, "%s", print_mac(mac, sta->addr)); 122 sprintf(name, "%pM", sta->addr);
126 remove_proc_entry(name, ap->proc); 123 remove_proc_entry(name, ap->proc);
127 } 124 }
128 125
@@ -185,7 +182,6 @@ static void ap_handle_timer(unsigned long data)
185 struct ap_data *ap; 182 struct ap_data *ap;
186 unsigned long next_time = 0; 183 unsigned long next_time = 0;
187 int was_assoc; 184 int was_assoc;
188 DECLARE_MAC_BUF(mac);
189 185
190 if (sta == NULL || sta->local == NULL || sta->local->ap == NULL) { 186 if (sta == NULL || sta->local == NULL || sta->local->ap == NULL) {
191 PDEBUG(DEBUG_AP, "ap_handle_timer() called with NULL data\n"); 187 PDEBUG(DEBUG_AP, "ap_handle_timer() called with NULL data\n");
@@ -242,8 +238,8 @@ static void ap_handle_timer(unsigned long data)
242 if (sta->ap) { 238 if (sta->ap) {
243 if (ap->autom_ap_wds) { 239 if (ap->autom_ap_wds) {
244 PDEBUG(DEBUG_AP, "%s: removing automatic WDS " 240 PDEBUG(DEBUG_AP, "%s: removing automatic WDS "
245 "connection to AP %s\n", 241 "connection to AP %pM\n",
246 local->dev->name, print_mac(mac, sta->addr)); 242 local->dev->name, sta->addr);
247 hostap_wds_link_oper(local, sta->addr, WDS_DEL); 243 hostap_wds_link_oper(local, sta->addr, WDS_DEL);
248 } 244 }
249 } else if (sta->timeout_next == STA_NULLFUNC) { 245 } else if (sta->timeout_next == STA_NULLFUNC) {
@@ -259,11 +255,11 @@ static void ap_handle_timer(unsigned long data)
259 } else { 255 } else {
260 int deauth = sta->timeout_next == STA_DEAUTH; 256 int deauth = sta->timeout_next == STA_DEAUTH;
261 __le16 resp; 257 __le16 resp;
262 PDEBUG(DEBUG_AP, "%s: sending %s info to STA %s" 258 PDEBUG(DEBUG_AP, "%s: sending %s info to STA %pM"
263 "(last=%lu, jiffies=%lu)\n", 259 "(last=%lu, jiffies=%lu)\n",
264 local->dev->name, 260 local->dev->name,
265 deauth ? "deauthentication" : "disassociation", 261 deauth ? "deauthentication" : "disassociation",
266 print_mac(mac, sta->addr), sta->last_rx, jiffies); 262 sta->addr, sta->last_rx, jiffies);
267 263
268 resp = cpu_to_le16(deauth ? WLAN_REASON_PREV_AUTH_NOT_VALID : 264 resp = cpu_to_le16(deauth ? WLAN_REASON_PREV_AUTH_NOT_VALID :
269 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); 265 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
@@ -275,10 +271,10 @@ static void ap_handle_timer(unsigned long data)
275 271
276 if (sta->timeout_next == STA_DEAUTH) { 272 if (sta->timeout_next == STA_DEAUTH) {
277 if (sta->flags & WLAN_STA_PERM) { 273 if (sta->flags & WLAN_STA_PERM) {
278 PDEBUG(DEBUG_AP, "%s: STA %s" 274 PDEBUG(DEBUG_AP, "%s: STA %pM"
279 " would have been removed, " 275 " would have been removed, "
280 "but it has 'perm' flag\n", 276 "but it has 'perm' flag\n",
281 local->dev->name, print_mac(mac, sta->addr)); 277 local->dev->name, sta->addr);
282 } else 278 } else
283 ap_free_sta(ap, sta); 279 ap_free_sta(ap, sta);
284 return; 280 return;
@@ -332,7 +328,6 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
332 struct ap_data *ap = (struct ap_data *) data; 328 struct ap_data *ap = (struct ap_data *) data;
333 char *policy_txt; 329 char *policy_txt;
334 struct mac_entry *entry; 330 struct mac_entry *entry;
335 DECLARE_MAC_BUF(mac);
336 331
337 if (off != 0) { 332 if (off != 0) {
338 *eof = 1; 333 *eof = 1;
@@ -363,7 +358,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
363 break; 358 break;
364 } 359 }
365 360
366 p += sprintf(p, "%s\n", print_mac(mac, entry->addr)); 361 p += sprintf(p, "%pM\n", entry->addr);
367 } 362 }
368 spin_unlock_bh(&ap->mac_restrictions.lock); 363 spin_unlock_bh(&ap->mac_restrictions.lock);
369 364
@@ -520,7 +515,6 @@ static int prism2_ap_proc_read(char *page, char **start, off_t off,
520 struct ap_data *ap = (struct ap_data *) data; 515 struct ap_data *ap = (struct ap_data *) data;
521 struct sta_info *sta; 516 struct sta_info *sta;
522 int i; 517 int i;
523 DECLARE_MAC_BUF(mac);
524 518
525 if (off > PROC_LIMIT) { 519 if (off > PROC_LIMIT) {
526 *eof = 1; 520 *eof = 1;
@@ -533,8 +527,8 @@ static int prism2_ap_proc_read(char *page, char **start, off_t off,
533 if (!sta->ap) 527 if (!sta->ap)
534 continue; 528 continue;
535 529
536 p += sprintf(p, "%s %d %d %d %d '", 530 p += sprintf(p, "%pM %d %d %d %d '",
537 print_mac(mac, sta->addr), 531 sta->addr,
538 sta->u.ap.channel, sta->last_rx_signal, 532 sta->u.ap.channel, sta->last_rx_signal,
539 sta->last_rx_silence, sta->last_rx_rate); 533 sta->last_rx_silence, sta->last_rx_rate);
540 for (i = 0; i < sta->u.ap.ssid_len; i++) 534 for (i = 0; i < sta->u.ap.ssid_len; i++)
@@ -683,11 +677,9 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
683 if (sta) 677 if (sta)
684 atomic_dec(&sta->users); 678 atomic_dec(&sta->users);
685 if (txt) { 679 if (txt) {
686 PDEBUG(DEBUG_AP, "%s: " MAC_FMT " auth_cb - alg=%d " 680 PDEBUG(DEBUG_AP, "%s: %pM auth_cb - alg=%d "
687 "trans#=%d status=%d - %s\n", 681 "trans#=%d status=%d - %s\n",
688 dev->name, 682 dev->name, hdr->addr1,
689 hdr->addr1[0], hdr->addr1[1], hdr->addr1[2],
690 hdr->addr1[3], hdr->addr1[4], hdr->addr1[5],
691 auth_alg, auth_transaction, status, txt); 683 auth_alg, auth_transaction, status, txt);
692 } 684 }
693 dev_kfree_skb(skb); 685 dev_kfree_skb(skb);
@@ -754,11 +746,8 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
754 if (sta) 746 if (sta)
755 atomic_dec(&sta->users); 747 atomic_dec(&sta->users);
756 if (txt) { 748 if (txt) {
757 PDEBUG(DEBUG_AP, "%s: " MAC_FMT " assoc_cb - %s\n", 749 PDEBUG(DEBUG_AP, "%s: %pM assoc_cb - %s\n",
758 dev->name, 750 dev->name, hdr->addr1, txt);
759 hdr->addr1[0], hdr->addr1[1], hdr->addr1[2],
760 hdr->addr1[3], hdr->addr1[4], hdr->addr1[5],
761 txt);
762 } 751 }
763 dev_kfree_skb(skb); 752 dev_kfree_skb(skb);
764} 753}
@@ -781,11 +770,9 @@ static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data)
781 sta->flags &= ~WLAN_STA_PENDING_POLL; 770 sta->flags &= ~WLAN_STA_PENDING_POLL;
782 spin_unlock(&ap->sta_table_lock); 771 spin_unlock(&ap->sta_table_lock);
783 } else { 772 } else {
784 PDEBUG(DEBUG_AP, "%s: STA " MAC_FMT 773 PDEBUG(DEBUG_AP,
785 " did not ACK activity poll frame\n", 774 "%s: STA %pM did not ACK activity poll frame\n",
786 ap->local->dev->name, 775 ap->local->dev->name, hdr->addr1);
787 hdr->addr1[0], hdr->addr1[1], hdr->addr1[2],
788 hdr->addr1[3], hdr->addr1[4], hdr->addr1[5]);
789 } 776 }
790 777
791 fail: 778 fail:
@@ -1002,7 +989,6 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off,
1002 char *p = page; 989 char *p = page;
1003 struct sta_info *sta = (struct sta_info *) data; 990 struct sta_info *sta = (struct sta_info *) data;
1004 int i; 991 int i;
1005 DECLARE_MAC_BUF(mac);
1006 992
1007 /* FIX: possible race condition.. the STA data could have just expired, 993 /* FIX: possible race condition.. the STA data could have just expired,
1008 * but proc entry was still here so that the read could have started; 994 * but proc entry was still here so that the read could have started;
@@ -1013,11 +999,11 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off,
1013 return 0; 999 return 0;
1014 } 1000 }
1015 1001
1016 p += sprintf(p, "%s=%s\nusers=%d\naid=%d\n" 1002 p += sprintf(p, "%s=%pM\nusers=%d\naid=%d\n"
1017 "flags=0x%04x%s%s%s%s%s%s%s\n" 1003 "flags=0x%04x%s%s%s%s%s%s%s\n"
1018 "capability=0x%02x\nlisten_interval=%d\nsupported_rates=", 1004 "capability=0x%02x\nlisten_interval=%d\nsupported_rates=",
1019 sta->ap ? "AP" : "STA", 1005 sta->ap ? "AP" : "STA",
1020 print_mac(mac, sta->addr), atomic_read(&sta->users), sta->aid, 1006 sta->addr, atomic_read(&sta->users), sta->aid,
1021 sta->flags, 1007 sta->flags,
1022 sta->flags & WLAN_STA_AUTH ? " AUTH" : "", 1008 sta->flags & WLAN_STA_AUTH ? " AUTH" : "",
1023 sta->flags & WLAN_STA_ASSOC ? " ASSOC" : "", 1009 sta->flags & WLAN_STA_ASSOC ? " ASSOC" : "",
@@ -1078,7 +1064,6 @@ static void handle_add_proc_queue(struct work_struct *work)
1078 struct sta_info *sta; 1064 struct sta_info *sta;
1079 char name[20]; 1065 char name[20];
1080 struct add_sta_proc_data *entry, *prev; 1066 struct add_sta_proc_data *entry, *prev;
1081 DECLARE_MAC_BUF(mac);
1082 1067
1083 entry = ap->add_sta_proc_entries; 1068 entry = ap->add_sta_proc_entries;
1084 ap->add_sta_proc_entries = NULL; 1069 ap->add_sta_proc_entries = NULL;
@@ -1091,7 +1076,7 @@ static void handle_add_proc_queue(struct work_struct *work)
1091 spin_unlock_bh(&ap->sta_table_lock); 1076 spin_unlock_bh(&ap->sta_table_lock);
1092 1077
1093 if (sta) { 1078 if (sta) {
1094 sprintf(name, "%s", print_mac(mac, sta->addr)); 1079 sprintf(name, "%pM", sta->addr);
1095 sta->proc = create_proc_read_entry( 1080 sta->proc = create_proc_read_entry(
1096 name, 0, ap->proc, 1081 name, 0, ap->proc,
1097 prism2_sta_proc_read, sta); 1082 prism2_sta_proc_read, sta);
@@ -1221,7 +1206,7 @@ static void prism2_check_tx_rates(struct sta_info *sta)
1221 1206
1222static void ap_crypt_init(struct ap_data *ap) 1207static void ap_crypt_init(struct ap_data *ap)
1223{ 1208{
1224 ap->crypt = ieee80211_get_crypto_ops("WEP"); 1209 ap->crypt = lib80211_get_crypto_ops("WEP");
1225 1210
1226 if (ap->crypt) { 1211 if (ap->crypt) {
1227 if (ap->crypt->init) { 1212 if (ap->crypt->init) {
@@ -1239,7 +1224,7 @@ static void ap_crypt_init(struct ap_data *ap)
1239 1224
1240 if (ap->crypt == NULL) { 1225 if (ap->crypt == NULL) {
1241 printk(KERN_WARNING "AP could not initialize WEP: load module " 1226 printk(KERN_WARNING "AP could not initialize WEP: load module "
1242 "ieee80211_crypt_wep.ko\n"); 1227 "lib80211_crypt_wep.ko\n");
1243 } 1228 }
1244} 1229}
1245 1230
@@ -1308,7 +1293,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
1308 __le16 *pos; 1293 __le16 *pos;
1309 u16 resp = WLAN_STATUS_SUCCESS, fc; 1294 u16 resp = WLAN_STATUS_SUCCESS, fc;
1310 struct sta_info *sta = NULL; 1295 struct sta_info *sta = NULL;
1311 struct ieee80211_crypt_data *crypt; 1296 struct lib80211_crypt_data *crypt;
1312 char *txt = ""; 1297 char *txt = "";
1313 1298
1314 len = skb->len - IEEE80211_MGMT_HDR_LEN; 1299 len = skb->len - IEEE80211_MGMT_HDR_LEN;
@@ -1318,9 +1303,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
1318 1303
1319 if (len < 6) { 1304 if (len < 6) {
1320 PDEBUG(DEBUG_AP, "%s: handle_authen - too short payload " 1305 PDEBUG(DEBUG_AP, "%s: handle_authen - too short payload "
1321 "(len=%d) from " MAC_FMT "\n", dev->name, len, 1306 "(len=%d) from %pM\n", dev->name, len, hdr->addr2);
1322 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
1323 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5]);
1324 return; 1307 return;
1325 } 1308 }
1326 1309
@@ -1336,7 +1319,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
1336 int idx = 0; 1319 int idx = 0;
1337 if (skb->len >= hdrlen + 3) 1320 if (skb->len >= hdrlen + 3)
1338 idx = skb->data[hdrlen + 3] >> 6; 1321 idx = skb->data[hdrlen + 3] >> 6;
1339 crypt = local->crypt[idx]; 1322 crypt = local->crypt_info.crypt[idx];
1340 } 1323 }
1341 1324
1342 pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN); 1325 pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
@@ -1385,10 +1368,8 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
1385 if (time_after(jiffies, sta->u.ap.last_beacon + 1368 if (time_after(jiffies, sta->u.ap.last_beacon +
1386 (10 * sta->listen_interval * HZ) / 1024)) { 1369 (10 * sta->listen_interval * HZ) / 1024)) {
1387 PDEBUG(DEBUG_AP, "%s: no beacons received for a while," 1370 PDEBUG(DEBUG_AP, "%s: no beacons received for a while,"
1388 " assuming AP " MAC_FMT " is now STA\n", 1371 " assuming AP %pM is now STA\n",
1389 dev->name, 1372 dev->name, sta->addr);
1390 sta->addr[0], sta->addr[1], sta->addr[2],
1391 sta->addr[3], sta->addr[4], sta->addr[5]);
1392 sta->ap = 0; 1373 sta->ap = 0;
1393 sta->flags = 0; 1374 sta->flags = 0;
1394 sta->u.sta.challenge = NULL; 1375 sta->u.sta.challenge = NULL;
@@ -1503,11 +1484,9 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
1503 } 1484 }
1504 1485
1505 if (resp) { 1486 if (resp) {
1506 PDEBUG(DEBUG_AP, "%s: " MAC_FMT " auth (alg=%d " 1487 PDEBUG(DEBUG_AP, "%s: %pM auth (alg=%d "
1507 "trans#=%d stat=%d len=%d fc=%04x) ==> %d (%s)\n", 1488 "trans#=%d stat=%d len=%d fc=%04x) ==> %d (%s)\n",
1508 dev->name, 1489 dev->name, hdr->addr2,
1509 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
1510 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5],
1511 auth_alg, auth_transaction, status_code, len, 1490 auth_alg, auth_transaction, status_code, len,
1512 fc, resp, txt); 1491 fc, resp, txt);
1513 } 1492 }
@@ -1533,10 +1512,8 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb,
1533 1512
1534 if (len < (reassoc ? 10 : 4)) { 1513 if (len < (reassoc ? 10 : 4)) {
1535 PDEBUG(DEBUG_AP, "%s: handle_assoc - too short payload " 1514 PDEBUG(DEBUG_AP, "%s: handle_assoc - too short payload "
1536 "(len=%d, reassoc=%d) from " MAC_FMT "\n", 1515 "(len=%d, reassoc=%d) from %pM\n",
1537 dev->name, len, reassoc, 1516 dev->name, len, reassoc, hdr->addr2);
1538 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
1539 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5]);
1540 return; 1517 return;
1541 } 1518 }
1542 1519
@@ -1613,12 +1590,9 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb,
1613 } 1590 }
1614 1591
1615 if (left > 0) { 1592 if (left > 0) {
1616 PDEBUG(DEBUG_AP, "%s: assoc from " MAC_FMT 1593 PDEBUG(DEBUG_AP, "%s: assoc from %pM"
1617 " with extra data (%d bytes) [", 1594 " with extra data (%d bytes) [",
1618 dev->name, 1595 dev->name, hdr->addr2, left);
1619 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
1620 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5],
1621 left);
1622 while (left > 0) { 1596 while (left > 0) {
1623 PDEBUG2(DEBUG_AP, "<%02x>", *u); 1597 PDEBUG2(DEBUG_AP, "<%02x>", *u);
1624 u++; left--; 1598 u++; left--;
@@ -1717,14 +1691,12 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb,
1717 } 1691 }
1718 1692
1719#if 0 1693#if 0
1720 PDEBUG(DEBUG_AP, "%s: " MAC_FMT" %sassoc (len=%d " 1694 PDEBUG(DEBUG_AP, "%s: %pM %sassoc (len=%d "
1721 "prev_ap=" MAC_FMT") => %d(%d) (%s)\n", 1695 "prev_ap=%pM) => %d(%d) (%s)\n",
1722 dev->name, 1696 dev->name,
1723 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2], 1697 hdr->addr2,
1724 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5],
1725 reassoc ? "re" : "", len, 1698 reassoc ? "re" : "", len,
1726 prev_ap[0], prev_ap[1], prev_ap[2], 1699 prev_ap,
1727 prev_ap[3], prev_ap[4], prev_ap[5],
1728 resp, send_deauth, txt); 1700 resp, send_deauth, txt);
1729#endif 1701#endif
1730} 1702}
@@ -1741,7 +1713,6 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb,
1741 u16 reason_code; 1713 u16 reason_code;
1742 __le16 *pos; 1714 __le16 *pos;
1743 struct sta_info *sta = NULL; 1715 struct sta_info *sta = NULL;
1744 DECLARE_MAC_BUF(mac);
1745 1716
1746 len = skb->len - IEEE80211_MGMT_HDR_LEN; 1717 len = skb->len - IEEE80211_MGMT_HDR_LEN;
1747 1718
@@ -1753,10 +1724,8 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb,
1753 pos = (__le16 *) body; 1724 pos = (__le16 *) body;
1754 reason_code = le16_to_cpu(*pos); 1725 reason_code = le16_to_cpu(*pos);
1755 1726
1756 PDEBUG(DEBUG_AP, "%s: deauthentication: " MAC_FMT " len=%d, " 1727 PDEBUG(DEBUG_AP, "%s: deauthentication: %pM len=%d, "
1757 "reason_code=%d\n", dev->name, 1728 "reason_code=%d\n", dev->name, hdr->addr2,
1758 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
1759 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5],
1760 len, reason_code); 1729 len, reason_code);
1761 1730
1762 spin_lock_bh(&local->ap->sta_table_lock); 1731 spin_lock_bh(&local->ap->sta_table_lock);
@@ -1768,11 +1737,9 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb,
1768 } 1737 }
1769 spin_unlock_bh(&local->ap->sta_table_lock); 1738 spin_unlock_bh(&local->ap->sta_table_lock);
1770 if (sta == NULL) { 1739 if (sta == NULL) {
1771 printk("%s: deauthentication from " MAC_FMT ", " 1740 printk("%s: deauthentication from %pM, "
1772 "reason_code=%d, but STA not authenticated\n", dev->name, 1741 "reason_code=%d, but STA not authenticated\n", dev->name,
1773 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2], 1742 hdr->addr2, reason_code);
1774 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5],
1775 reason_code);
1776 } 1743 }
1777} 1744}
1778 1745
@@ -1799,10 +1766,8 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
1799 pos = (__le16 *) body; 1766 pos = (__le16 *) body;
1800 reason_code = le16_to_cpu(*pos); 1767 reason_code = le16_to_cpu(*pos);
1801 1768
1802 PDEBUG(DEBUG_AP, "%s: disassociation: " MAC_FMT " len=%d, " 1769 PDEBUG(DEBUG_AP, "%s: disassociation: %pM len=%d, "
1803 "reason_code=%d\n", dev->name, 1770 "reason_code=%d\n", dev->name, hdr->addr2,
1804 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
1805 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5],
1806 len, reason_code); 1771 len, reason_code);
1807 1772
1808 spin_lock_bh(&local->ap->sta_table_lock); 1773 spin_lock_bh(&local->ap->sta_table_lock);
@@ -1814,12 +1779,9 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
1814 } 1779 }
1815 spin_unlock_bh(&local->ap->sta_table_lock); 1780 spin_unlock_bh(&local->ap->sta_table_lock);
1816 if (sta == NULL) { 1781 if (sta == NULL) {
1817 printk("%s: disassociation from " MAC_FMT ", " 1782 printk("%s: disassociation from %pM, "
1818 "reason_code=%d, but STA not authenticated\n", 1783 "reason_code=%d, but STA not authenticated\n",
1819 dev->name, 1784 dev->name, hdr->addr2, reason_code);
1820 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
1821 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5],
1822 reason_code);
1823 } 1785 }
1824} 1786}
1825 1787
@@ -1909,19 +1871,14 @@ static void handle_pspoll(local_info_t *local,
1909 u16 aid; 1871 u16 aid;
1910 struct sk_buff *skb; 1872 struct sk_buff *skb;
1911 1873
1912 PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=" MAC_FMT 1874 PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=%pM, TA=%pM PWRMGT=%d\n",
1913 ", TA=" MAC_FMT " PWRMGT=%d\n", 1875 hdr->addr1, hdr->addr2,
1914 hdr->addr1[0], hdr->addr1[1], hdr->addr1[2],
1915 hdr->addr1[3], hdr->addr1[4], hdr->addr1[5],
1916 hdr->addr2[0], hdr->addr2[1], hdr->addr2[2],
1917 hdr->addr2[3], hdr->addr2[4], hdr->addr2[5],
1918 !!(le16_to_cpu(hdr->frame_ctl) & IEEE80211_FCTL_PM)); 1876 !!(le16_to_cpu(hdr->frame_ctl) & IEEE80211_FCTL_PM));
1919 1877
1920 if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) { 1878 if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
1921 PDEBUG(DEBUG_AP, "handle_pspoll - addr1(BSSID)=" MAC_FMT 1879 PDEBUG(DEBUG_AP,
1922 " not own MAC\n", 1880 "handle_pspoll - addr1(BSSID)=%pM not own MAC\n",
1923 hdr->addr1[0], hdr->addr1[1], hdr->addr1[2], 1881 hdr->addr1);
1924 hdr->addr1[3], hdr->addr1[4], hdr->addr1[5]);
1925 return; 1882 return;
1926 } 1883 }
1927 1884
@@ -2007,11 +1964,10 @@ static void handle_wds_oper_queue(struct work_struct *work)
2007 1964
2008 while (entry) { 1965 while (entry) {
2009 PDEBUG(DEBUG_AP, "%s: %s automatic WDS connection " 1966 PDEBUG(DEBUG_AP, "%s: %s automatic WDS connection "
2010 "to AP " MAC_FMT "\n", 1967 "to AP %pM\n",
2011 local->dev->name, 1968 local->dev->name,
2012 entry->type == WDS_ADD ? "adding" : "removing", 1969 entry->type == WDS_ADD ? "adding" : "removing",
2013 entry->addr[0], entry->addr[1], entry->addr[2], 1970 entry->addr);
2014 entry->addr[3], entry->addr[4], entry->addr[5]);
2015 if (entry->type == WDS_ADD) 1971 if (entry->type == WDS_ADD)
2016 prism2_wds_add(local, entry->addr, 0); 1972 prism2_wds_add(local, entry->addr, 0);
2017 else if (entry->type == WDS_DEL) 1973 else if (entry->type == WDS_DEL)
@@ -2215,10 +2171,8 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
2215 } 2171 }
2216 2172
2217 if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) { 2173 if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
2218 PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)=" 2174 PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)=%pM"
2219 MAC_FMT " not own MAC\n", 2175 " not own MAC\n", hdr->addr1);
2220 hdr->addr1[0], hdr->addr1[1], hdr->addr1[2],
2221 hdr->addr1[3], hdr->addr1[4], hdr->addr1[5]);
2222 goto done; 2176 goto done;
2223 } 2177 }
2224 2178
@@ -2254,18 +2208,14 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
2254 } 2208 }
2255 2209
2256 if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) { 2210 if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
2257 PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=" MAC_FMT 2211 PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=%pM"
2258 " not own MAC\n", 2212 " not own MAC\n", hdr->addr1);
2259 hdr->addr1[0], hdr->addr1[1], hdr->addr1[2],
2260 hdr->addr1[3], hdr->addr1[4], hdr->addr1[5]);
2261 goto done; 2213 goto done;
2262 } 2214 }
2263 2215
2264 if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN)) { 2216 if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN)) {
2265 PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=" MAC_FMT 2217 PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=%pM"
2266 " not own MAC\n", 2218 " not own MAC\n", hdr->addr3);
2267 hdr->addr3[0], hdr->addr3[1], hdr->addr3[2],
2268 hdr->addr3[3], hdr->addr3[4], hdr->addr3[5]);
2269 goto done; 2219 goto done;
2270 } 2220 }
2271 2221
@@ -2366,10 +2316,9 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
2366 memcpy(hdr->addr2, sta->addr, ETH_ALEN); 2316 memcpy(hdr->addr2, sta->addr, ETH_ALEN);
2367 hdr->duration_id = cpu_to_le16(sta->aid | BIT(15) | BIT(14)); 2317 hdr->duration_id = cpu_to_le16(sta->aid | BIT(15) | BIT(14));
2368 2318
2369 PDEBUG(DEBUG_PS2, "%s: Scheduling buffered packet delivery for STA " 2319 PDEBUG(DEBUG_PS2,
2370 MAC_FMT "\n", local->dev->name, 2320 "%s: Scheduling buffered packet delivery for STA %pM\n",
2371 sta->addr[0], sta->addr[1], sta->addr[2], 2321 local->dev->name, sta->addr);
2372 sta->addr[3], sta->addr[4], sta->addr[5]);
2373 2322
2374 skb->dev = local->dev; 2323 skb->dev = local->dev;
2375 2324
@@ -2723,12 +2672,8 @@ static int ap_update_sta_tx_rate(struct sta_info *sta, struct net_device *dev)
2723 case 3: sta->tx_rate = 110; break; 2672 case 3: sta->tx_rate = 110; break;
2724 default: sta->tx_rate = 0; break; 2673 default: sta->tx_rate = 0; break;
2725 } 2674 }
2726 PDEBUG(DEBUG_AP, "%s: STA " MAC_FMT 2675 PDEBUG(DEBUG_AP, "%s: STA %pM TX rate raised to %d\n",
2727 " TX rate raised to %d\n", 2676 dev->name, sta->addr, sta->tx_rate);
2728 dev->name,
2729 sta->addr[0], sta->addr[1], sta->addr[2],
2730 sta->addr[3], sta->addr[4], sta->addr[5],
2731 sta->tx_rate);
2732 } 2677 }
2733 sta->tx_since_last_failure = 0; 2678 sta->tx_since_last_failure = 0;
2734 } 2679 }
@@ -2781,9 +2726,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
2781 * print out any errors here. */ 2726 * print out any errors here. */
2782 if (net_ratelimit()) { 2727 if (net_ratelimit()) {
2783 printk(KERN_DEBUG "AP: drop packet to non-associated " 2728 printk(KERN_DEBUG "AP: drop packet to non-associated "
2784 "STA " MAC_FMT "\n", 2729 "STA %pM\n", hdr->addr1);
2785 hdr->addr1[0], hdr->addr1[1], hdr->addr1[2],
2786 hdr->addr1[3], hdr->addr1[4], hdr->addr1[5]);
2787 } 2730 }
2788#endif 2731#endif
2789 local->ap->tx_drop_nonassoc++; 2732 local->ap->tx_drop_nonassoc++;
@@ -2821,11 +2764,9 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
2821 } 2764 }
2822 2765
2823 if (skb_queue_len(&sta->tx_buf) >= STA_MAX_TX_BUFFER) { 2766 if (skb_queue_len(&sta->tx_buf) >= STA_MAX_TX_BUFFER) {
2824 PDEBUG(DEBUG_PS, "%s: No more space in STA (" MAC_FMT 2767 PDEBUG(DEBUG_PS, "%s: No more space in STA (%pM)'s"
2825 ")'s PS mode buffer\n", 2768 "PS mode buffer\n",
2826 local->dev->name, 2769 local->dev->name, sta->addr);
2827 sta->addr[0], sta->addr[1], sta->addr[2],
2828 sta->addr[3], sta->addr[4], sta->addr[5]);
2829 /* Make sure that TIM is set for the station (it might not be 2770 /* Make sure that TIM is set for the station (it might not be
2830 * after AP wlan hw reset). */ 2771 * after AP wlan hw reset). */
2831 /* FIX: should fix hw reset to restore bits based on STA 2772 /* FIX: should fix hw reset to restore bits based on STA
@@ -2897,12 +2838,9 @@ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb)
2897 sta = ap_get_sta(local->ap, hdr->addr1); 2838 sta = ap_get_sta(local->ap, hdr->addr1);
2898 if (!sta) { 2839 if (!sta) {
2899 spin_unlock(&local->ap->sta_table_lock); 2840 spin_unlock(&local->ap->sta_table_lock);
2900 PDEBUG(DEBUG_AP, "%s: Could not find STA " MAC_FMT 2841 PDEBUG(DEBUG_AP, "%s: Could not find STA %pM"
2901 " for this TX error (@%lu)\n", 2842 " for this TX error (@%lu)\n",
2902 local->dev->name, 2843 local->dev->name, hdr->addr1, jiffies);
2903 hdr->addr1[0], hdr->addr1[1], hdr->addr1[2],
2904 hdr->addr1[3], hdr->addr1[4], hdr->addr1[5],
2905 jiffies);
2906 return; 2844 return;
2907 } 2845 }
2908 2846
@@ -2929,12 +2867,9 @@ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb)
2929 case 3: sta->tx_rate = 110; break; 2867 case 3: sta->tx_rate = 110; break;
2930 default: sta->tx_rate = 0; break; 2868 default: sta->tx_rate = 0; break;
2931 } 2869 }
2932 PDEBUG(DEBUG_AP, "%s: STA " MAC_FMT 2870 PDEBUG(DEBUG_AP,
2933 " TX rate lowered to %d\n", 2871 "%s: STA %pM TX rate lowered to %d\n",
2934 local->dev->name, 2872 local->dev->name, sta->addr, sta->tx_rate);
2935 sta->addr[0], sta->addr[1], sta->addr[2],
2936 sta->addr[3], sta->addr[4], sta->addr[5],
2937 sta->tx_rate);
2938 } 2873 }
2939 sta->tx_consecutive_exc = 0; 2874 sta->tx_consecutive_exc = 0;
2940 } 2875 }
@@ -2945,17 +2880,16 @@ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb)
2945static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta, 2880static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta,
2946 int pwrmgt, int type, int stype) 2881 int pwrmgt, int type, int stype)
2947{ 2882{
2948 DECLARE_MAC_BUF(mac);
2949 if (pwrmgt && !(sta->flags & WLAN_STA_PS)) { 2883 if (pwrmgt && !(sta->flags & WLAN_STA_PS)) {
2950 sta->flags |= WLAN_STA_PS; 2884 sta->flags |= WLAN_STA_PS;
2951 PDEBUG(DEBUG_PS2, "STA %s changed to use PS " 2885 PDEBUG(DEBUG_PS2, "STA %pM changed to use PS "
2952 "mode (type=0x%02X, stype=0x%02X)\n", 2886 "mode (type=0x%02X, stype=0x%02X)\n",
2953 print_mac(mac, sta->addr), type >> 2, stype >> 4); 2887 sta->addr, type >> 2, stype >> 4);
2954 } else if (!pwrmgt && (sta->flags & WLAN_STA_PS)) { 2888 } else if (!pwrmgt && (sta->flags & WLAN_STA_PS)) {
2955 sta->flags &= ~WLAN_STA_PS; 2889 sta->flags &= ~WLAN_STA_PS;
2956 PDEBUG(DEBUG_PS2, "STA %s changed to not use " 2890 PDEBUG(DEBUG_PS2, "STA %pM changed to not use "
2957 "PS mode (type=0x%02X, stype=0x%02X)\n", 2891 "PS mode (type=0x%02X, stype=0x%02X)\n",
2958 print_mac(mac, sta->addr), type >> 2, stype >> 4); 2892 sta->addr, type >> 2, stype >> 4);
2959 if (type != IEEE80211_FTYPE_CTL || 2893 if (type != IEEE80211_FTYPE_CTL ||
2960 stype != IEEE80211_STYPE_PSPOLL) 2894 stype != IEEE80211_STYPE_PSPOLL)
2961 schedule_packet_send(local, sta); 2895 schedule_packet_send(local, sta);
@@ -3029,13 +2963,9 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
3029#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 2963#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
3030 } else { 2964 } else {
3031 printk(KERN_DEBUG "%s: dropped received packet" 2965 printk(KERN_DEBUG "%s: dropped received packet"
3032 " from non-associated STA " 2966 " from non-associated STA %pM"
3033 MAC_FMT
3034 " (type=0x%02x, subtype=0x%02x)\n", 2967 " (type=0x%02x, subtype=0x%02x)\n",
3035 dev->name, 2968 dev->name, hdr->addr2,
3036 hdr->addr2[0], hdr->addr2[1],
3037 hdr->addr2[2], hdr->addr2[3],
3038 hdr->addr2[4], hdr->addr2[5],
3039 type >> 2, stype >> 4); 2969 type >> 2, stype >> 4);
3040 hostap_rx(dev, skb, rx_stats); 2970 hostap_rx(dev, skb, rx_stats);
3041#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 2971#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
@@ -3068,13 +2998,9 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
3068 * after being unavailable for some time. Speed up 2998 * after being unavailable for some time. Speed up
3069 * re-association by informing the station about it not 2999 * re-association by informing the station about it not
3070 * being associated. */ 3000 * being associated. */
3071 printk(KERN_DEBUG "%s: rejected received nullfunc " 3001 printk(KERN_DEBUG "%s: rejected received nullfunc frame"
3072 "frame without ToDS from not associated STA " 3002 " without ToDS from not associated STA %pM\n",
3073 MAC_FMT "\n", 3003 dev->name, hdr->addr2);
3074 dev->name,
3075 hdr->addr2[0], hdr->addr2[1],
3076 hdr->addr2[2], hdr->addr2[3],
3077 hdr->addr2[4], hdr->addr2[5]);
3078 hostap_rx(dev, skb, rx_stats); 3004 hostap_rx(dev, skb, rx_stats);
3079#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 3005#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
3080 } 3006 }
@@ -3090,13 +3016,10 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
3090 * broadcast frame from an IBSS network. Drop it silently. 3016 * broadcast frame from an IBSS network. Drop it silently.
3091 * If BSSID is own, report the dropping of this frame. */ 3017 * If BSSID is own, report the dropping of this frame. */
3092 if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) { 3018 if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
3093 printk(KERN_DEBUG "%s: dropped received packet from " 3019 printk(KERN_DEBUG "%s: dropped received packet from %pM"
3094 MAC_FMT " with no ToDS flag " 3020 " with no ToDS flag "
3095 "(type=0x%02x, subtype=0x%02x)\n", dev->name, 3021 "(type=0x%02x, subtype=0x%02x)\n", dev->name,
3096 hdr->addr2[0], hdr->addr2[1], 3022 hdr->addr2, type >> 2, stype >> 4);
3097 hdr->addr2[2], hdr->addr2[3],
3098 hdr->addr2[4], hdr->addr2[5],
3099 type >> 2, stype >> 4);
3100 hostap_dump_rx_80211(dev->name, skb, rx_stats); 3023 hostap_dump_rx_80211(dev->name, skb, rx_stats);
3101 } 3024 }
3102 ret = AP_RX_DROP; 3025 ret = AP_RX_DROP;
@@ -3142,7 +3065,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
3142/* Called only as a tasklet (software IRQ) */ 3065/* Called only as a tasklet (software IRQ) */
3143int hostap_handle_sta_crypto(local_info_t *local, 3066int hostap_handle_sta_crypto(local_info_t *local,
3144 struct ieee80211_hdr_4addr *hdr, 3067 struct ieee80211_hdr_4addr *hdr,
3145 struct ieee80211_crypt_data **crypt, 3068 struct lib80211_crypt_data **crypt,
3146 void **sta_ptr) 3069 void **sta_ptr)
3147{ 3070{
3148 struct sta_info *sta; 3071 struct sta_info *sta;
@@ -3290,7 +3213,7 @@ void hostap_update_rates(local_info_t *local)
3290 3213
3291 3214
3292void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, 3215void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
3293 struct ieee80211_crypt_data ***crypt) 3216 struct lib80211_crypt_data ***crypt)
3294{ 3217{
3295 struct sta_info *sta; 3218 struct sta_info *sta;
3296 3219
diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/hostap/hostap_ap.h
index 2fa2452b6b07..d36e4b175336 100644
--- a/drivers/net/wireless/hostap/hostap_ap.h
+++ b/drivers/net/wireless/hostap/hostap_ap.h
@@ -74,7 +74,7 @@ struct sta_info {
74 u32 tx_since_last_failure; 74 u32 tx_since_last_failure;
75 u32 tx_consecutive_exc; 75 u32 tx_consecutive_exc;
76 76
77 struct ieee80211_crypt_data *crypt; 77 struct lib80211_crypt_data *crypt;
78 78
79 int ap; /* whether this station is an AP */ 79 int ap; /* whether this station is an AP */
80 80
@@ -209,7 +209,7 @@ struct ap_data {
209 209
210 /* WEP operations for generating challenges to be used with shared key 210 /* WEP operations for generating challenges to be used with shared key
211 * authentication */ 211 * authentication */
212 struct ieee80211_crypto_ops *crypt; 212 struct lib80211_crypto_ops *crypt;
213 void *crypt_priv; 213 void *crypt_priv;
214#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 214#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
215}; 215};
@@ -229,7 +229,7 @@ typedef enum {
229struct hostap_tx_data { 229struct hostap_tx_data {
230 struct sk_buff *skb; 230 struct sk_buff *skb;
231 int host_encrypt; 231 int host_encrypt;
232 struct ieee80211_crypt_data *crypt; 232 struct lib80211_crypt_data *crypt;
233 void *sta_ptr; 233 void *sta_ptr;
234}; 234};
235ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx); 235ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx);
@@ -244,7 +244,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
244 struct hostap_80211_rx_status *rx_stats, 244 struct hostap_80211_rx_status *rx_stats,
245 int wds); 245 int wds);
246int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr_4addr *hdr, 246int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
247 struct ieee80211_crypt_data **crypt, 247 struct lib80211_crypt_data **crypt,
248 void **sta_ptr); 248 void **sta_ptr);
249int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr); 249int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr);
250int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr); 250int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr);
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h
index b470c743c2d1..90b64b092007 100644
--- a/drivers/net/wireless/hostap/hostap_common.h
+++ b/drivers/net/wireless/hostap/hostap_common.h
@@ -6,19 +6,6 @@
6 6
7/* IEEE 802.11 defines */ 7/* IEEE 802.11 defines */
8 8
9/* Information Element IDs */
10#define WLAN_EID_SSID 0
11#define WLAN_EID_SUPP_RATES 1
12#define WLAN_EID_FH_PARAMS 2
13#define WLAN_EID_DS_PARAMS 3
14#define WLAN_EID_CF_PARAMS 4
15#define WLAN_EID_TIM 5
16#define WLAN_EID_IBSS_PARAMS 6
17#define WLAN_EID_CHALLENGE 16
18#define WLAN_EID_RSN 48
19#define WLAN_EID_GENERIC 221
20
21
22/* HFA384X Configuration RIDs */ 9/* HFA384X Configuration RIDs */
23#define HFA384X_RID_CNFPORTTYPE 0xFC00 10#define HFA384X_RID_CNFPORTTYPE 0xFC00
24#define HFA384X_RID_CNFOWNMACADDR 0xFC01 11#define HFA384X_RID_CNFOWNMACADDR 0xFC01
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 3153fe9d7ce0..0f27059bbe85 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -47,7 +47,7 @@
47#include <linux/wireless.h> 47#include <linux/wireless.h>
48#include <net/iw_handler.h> 48#include <net/iw_handler.h>
49#include <net/ieee80211.h> 49#include <net/ieee80211.h>
50#include <net/ieee80211_crypt.h> 50#include <net/lib80211.h>
51#include <asm/irq.h> 51#include <asm/irq.h>
52 52
53#include "hostap_80211.h" 53#include "hostap_80211.h"
@@ -2335,10 +2335,6 @@ static void prism2_txexc(local_info_t *local)
2335 int show_dump, res; 2335 int show_dump, res;
2336 char *payload = NULL; 2336 char *payload = NULL;
2337 struct hfa384x_tx_frame txdesc; 2337 struct hfa384x_tx_frame txdesc;
2338 DECLARE_MAC_BUF(mac);
2339 DECLARE_MAC_BUF(mac2);
2340 DECLARE_MAC_BUF(mac3);
2341 DECLARE_MAC_BUF(mac4);
2342 2338
2343 show_dump = local->frame_dump & PRISM2_DUMP_TXEXC_HDR; 2339 show_dump = local->frame_dump & PRISM2_DUMP_TXEXC_HDR;
2344 local->stats.tx_errors++; 2340 local->stats.tx_errors++;
@@ -2404,9 +2400,9 @@ static void prism2_txexc(local_info_t *local)
2404 WLAN_FC_GET_STYPE(fc) >> 4, 2400 WLAN_FC_GET_STYPE(fc) >> 4,
2405 fc & IEEE80211_FCTL_TODS ? " ToDS" : "", 2401 fc & IEEE80211_FCTL_TODS ? " ToDS" : "",
2406 fc & IEEE80211_FCTL_FROMDS ? " FromDS" : ""); 2402 fc & IEEE80211_FCTL_FROMDS ? " FromDS" : "");
2407 PDEBUG(DEBUG_EXTRA, " A1=%s A2=%s A3=%s A4=%s\n", 2403 PDEBUG(DEBUG_EXTRA, " A1=%pM A2=%pM A3=%pM A4=%pM\n",
2408 print_mac(mac, txdesc.addr1), print_mac(mac2, txdesc.addr2), 2404 txdesc.addr1, txdesc.addr2,
2409 print_mac(mac3, txdesc.addr3), print_mac(mac4, txdesc.addr4)); 2405 txdesc.addr3, txdesc.addr4);
2410} 2406}
2411 2407
2412 2408
@@ -2792,45 +2788,6 @@ static void prism2_check_sta_fw_version(local_info_t *local)
2792} 2788}
2793 2789
2794 2790
2795static void prism2_crypt_deinit_entries(local_info_t *local, int force)
2796{
2797 struct list_head *ptr, *n;
2798 struct ieee80211_crypt_data *entry;
2799
2800 for (ptr = local->crypt_deinit_list.next, n = ptr->next;
2801 ptr != &local->crypt_deinit_list; ptr = n, n = ptr->next) {
2802 entry = list_entry(ptr, struct ieee80211_crypt_data, list);
2803
2804 if (atomic_read(&entry->refcnt) != 0 && !force)
2805 continue;
2806
2807 list_del(ptr);
2808
2809 if (entry->ops)
2810 entry->ops->deinit(entry->priv);
2811 kfree(entry);
2812 }
2813}
2814
2815
2816static void prism2_crypt_deinit_handler(unsigned long data)
2817{
2818 local_info_t *local = (local_info_t *) data;
2819 unsigned long flags;
2820
2821 spin_lock_irqsave(&local->lock, flags);
2822 prism2_crypt_deinit_entries(local, 0);
2823 if (!list_empty(&local->crypt_deinit_list)) {
2824 printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
2825 "deletion list\n", local->dev->name);
2826 local->crypt_deinit_timer.expires = jiffies + HZ;
2827 add_timer(&local->crypt_deinit_timer);
2828 }
2829 spin_unlock_irqrestore(&local->lock, flags);
2830
2831}
2832
2833
2834static void hostap_passive_scan(unsigned long data) 2791static void hostap_passive_scan(unsigned long data)
2835{ 2792{
2836 local_info_t *local = (local_info_t *) data; 2793 local_info_t *local = (local_info_t *) data;
@@ -3254,10 +3211,8 @@ while (0)
3254 3211
3255 INIT_LIST_HEAD(&local->cmd_queue); 3212 INIT_LIST_HEAD(&local->cmd_queue);
3256 init_waitqueue_head(&local->hostscan_wq); 3213 init_waitqueue_head(&local->hostscan_wq);
3257 INIT_LIST_HEAD(&local->crypt_deinit_list); 3214
3258 init_timer(&local->crypt_deinit_timer); 3215 lib80211_crypt_info_init(&local->crypt_info, dev->name, &local->lock);
3259 local->crypt_deinit_timer.data = (unsigned long) local;
3260 local->crypt_deinit_timer.function = prism2_crypt_deinit_handler;
3261 3216
3262 init_timer(&local->passive_scan_timer); 3217 init_timer(&local->passive_scan_timer);
3263 local->passive_scan_timer.data = (unsigned long) local; 3218 local->passive_scan_timer.data = (unsigned long) local;
@@ -3358,9 +3313,7 @@ static void prism2_free_local_data(struct net_device *dev)
3358 3313
3359 flush_scheduled_work(); 3314 flush_scheduled_work();
3360 3315
3361 if (timer_pending(&local->crypt_deinit_timer)) 3316 lib80211_crypt_info_free(&local->crypt_info);
3362 del_timer(&local->crypt_deinit_timer);
3363 prism2_crypt_deinit_entries(local, 1);
3364 3317
3365 if (timer_pending(&local->passive_scan_timer)) 3318 if (timer_pending(&local->passive_scan_timer))
3366 del_timer(&local->passive_scan_timer); 3319 del_timer(&local->passive_scan_timer);
@@ -3377,16 +3330,6 @@ static void prism2_free_local_data(struct net_device *dev)
3377 if (local->dev_enabled) 3330 if (local->dev_enabled)
3378 prism2_callback(local, PRISM2_CALLBACK_DISABLE); 3331 prism2_callback(local, PRISM2_CALLBACK_DISABLE);
3379 3332
3380 for (i = 0; i < WEP_KEYS; i++) {
3381 struct ieee80211_crypt_data *crypt = local->crypt[i];
3382 if (crypt) {
3383 if (crypt->ops)
3384 crypt->ops->deinit(crypt->priv);
3385 kfree(crypt);
3386 local->crypt[i] = NULL;
3387 }
3388 }
3389
3390 if (local->ap != NULL) 3333 if (local->ap != NULL)
3391 hostap_free_data(local->ap); 3334 hostap_free_data(local->ap);
3392 3335
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
index 7cd3fb79230e..99b4cf41edf2 100644
--- a/drivers/net/wireless/hostap/hostap_info.c
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -166,7 +166,6 @@ static void prism2_host_roaming(local_info_t *local)
166 struct hfa384x_hostscan_result *selected, *entry; 166 struct hfa384x_hostscan_result *selected, *entry;
167 int i; 167 int i;
168 unsigned long flags; 168 unsigned long flags;
169 DECLARE_MAC_BUF(mac);
170 169
171 if (local->last_join_time && 170 if (local->last_join_time &&
172 time_before(jiffies, local->last_join_time + 10 * HZ)) { 171 time_before(jiffies, local->last_join_time + 10 * HZ)) {
@@ -199,9 +198,8 @@ static void prism2_host_roaming(local_info_t *local)
199 local->preferred_ap[2] || local->preferred_ap[3] || 198 local->preferred_ap[2] || local->preferred_ap[3] ||
200 local->preferred_ap[4] || local->preferred_ap[5]) { 199 local->preferred_ap[4] || local->preferred_ap[5]) {
201 /* Try to find preferred AP */ 200 /* Try to find preferred AP */
202 PDEBUG(DEBUG_EXTRA, "%s: Preferred AP BSSID " 201 PDEBUG(DEBUG_EXTRA, "%s: Preferred AP BSSID %pM\n",
203 "%s\n", 202 dev->name, local->preferred_ap);
204 dev->name, print_mac(mac, local->preferred_ap));
205 for (i = 0; i < local->last_scan_results_count; i++) { 203 for (i = 0; i < local->last_scan_results_count; i++) {
206 entry = &local->last_scan_results[i]; 204 entry = &local->last_scan_results[i];
207 if (memcmp(local->preferred_ap, entry->bssid, 6) == 0) 205 if (memcmp(local->preferred_ap, entry->bssid, 6) == 0)
@@ -218,9 +216,9 @@ static void prism2_host_roaming(local_info_t *local)
218 req.channel = selected->chid; 216 req.channel = selected->chid;
219 spin_unlock_irqrestore(&local->lock, flags); 217 spin_unlock_irqrestore(&local->lock, flags);
220 218
221 PDEBUG(DEBUG_EXTRA, "%s: JoinRequest: BSSID=%s" 219 PDEBUG(DEBUG_EXTRA, "%s: JoinRequest: BSSID=%pM"
222 " channel=%d\n", 220 " channel=%d\n",
223 dev->name, print_mac(mac, req.bssid), le16_to_cpu(req.channel)); 221 dev->name, req.bssid, le16_to_cpu(req.channel));
224 if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req, 222 if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
225 sizeof(req))) { 223 sizeof(req))) {
226 printk(KERN_DEBUG "%s: JoinRequest failed\n", dev->name); 224 printk(KERN_DEBUG "%s: JoinRequest failed\n", dev->name);
@@ -413,7 +411,6 @@ static void handle_info_queue_linkstatus(local_info_t *local)
413 int val = local->prev_link_status; 411 int val = local->prev_link_status;
414 int connected; 412 int connected;
415 union iwreq_data wrqu; 413 union iwreq_data wrqu;
416 DECLARE_MAC_BUF(mac);
417 414
418 connected = 415 connected =
419 val == HFA384X_LINKSTATUS_CONNECTED || 416 val == HFA384X_LINKSTATUS_CONNECTED ||
@@ -425,10 +422,9 @@ static void handle_info_queue_linkstatus(local_info_t *local)
425 printk(KERN_DEBUG "%s: could not read CURRENTBSSID after " 422 printk(KERN_DEBUG "%s: could not read CURRENTBSSID after "
426 "LinkStatus event\n", local->dev->name); 423 "LinkStatus event\n", local->dev->name);
427 } else { 424 } else {
428 PDEBUG(DEBUG_EXTRA, "%s: LinkStatus: BSSID=" 425 PDEBUG(DEBUG_EXTRA, "%s: LinkStatus: BSSID=%pM\n",
429 "%s\n",
430 local->dev->name, 426 local->dev->name,
431 print_mac(mac, (unsigned char *) local->bssid)); 427 (unsigned char *) local->bssid);
432 if (local->wds_type & HOSTAP_WDS_AP_CLIENT) 428 if (local->wds_type & HOSTAP_WDS_AP_CLIENT)
433 hostap_add_sta(local->ap, local->bssid); 429 hostap_add_sta(local->ap, local->bssid);
434 } 430 }
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 3f8b1d7036e5..c40fdf4c79de 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -2,7 +2,7 @@
2 2
3#include <linux/types.h> 3#include <linux/types.h>
4#include <linux/ethtool.h> 4#include <linux/ethtool.h>
5#include <net/ieee80211_crypt.h> 5#include <net/lib80211.h>
6 6
7#include "hostap_wlan.h" 7#include "hostap_wlan.h"
8#include "hostap.h" 8#include "hostap.h"
@@ -116,32 +116,6 @@ static int prism2_get_name(struct net_device *dev,
116} 116}
117 117
118 118
119static void prism2_crypt_delayed_deinit(local_info_t *local,
120 struct ieee80211_crypt_data **crypt)
121{
122 struct ieee80211_crypt_data *tmp;
123 unsigned long flags;
124
125 tmp = *crypt;
126 *crypt = NULL;
127
128 if (tmp == NULL)
129 return;
130
131 /* must not run ops->deinit() while there may be pending encrypt or
132 * decrypt operations. Use a list of delayed deinits to avoid needing
133 * locking. */
134
135 spin_lock_irqsave(&local->lock, flags);
136 list_add(&tmp->list, &local->crypt_deinit_list);
137 if (!timer_pending(&local->crypt_deinit_timer)) {
138 local->crypt_deinit_timer.expires = jiffies + HZ;
139 add_timer(&local->crypt_deinit_timer);
140 }
141 spin_unlock_irqrestore(&local->lock, flags);
142}
143
144
145static int prism2_ioctl_siwencode(struct net_device *dev, 119static int prism2_ioctl_siwencode(struct net_device *dev,
146 struct iw_request_info *info, 120 struct iw_request_info *info,
147 struct iw_point *erq, char *keybuf) 121 struct iw_point *erq, char *keybuf)
@@ -149,47 +123,47 @@ static int prism2_ioctl_siwencode(struct net_device *dev,
149 struct hostap_interface *iface; 123 struct hostap_interface *iface;
150 local_info_t *local; 124 local_info_t *local;
151 int i; 125 int i;
152 struct ieee80211_crypt_data **crypt; 126 struct lib80211_crypt_data **crypt;
153 127
154 iface = netdev_priv(dev); 128 iface = netdev_priv(dev);
155 local = iface->local; 129 local = iface->local;
156 130
157 i = erq->flags & IW_ENCODE_INDEX; 131 i = erq->flags & IW_ENCODE_INDEX;
158 if (i < 1 || i > 4) 132 if (i < 1 || i > 4)
159 i = local->tx_keyidx; 133 i = local->crypt_info.tx_keyidx;
160 else 134 else
161 i--; 135 i--;
162 if (i < 0 || i >= WEP_KEYS) 136 if (i < 0 || i >= WEP_KEYS)
163 return -EINVAL; 137 return -EINVAL;
164 138
165 crypt = &local->crypt[i]; 139 crypt = &local->crypt_info.crypt[i];
166 140
167 if (erq->flags & IW_ENCODE_DISABLED) { 141 if (erq->flags & IW_ENCODE_DISABLED) {
168 if (*crypt) 142 if (*crypt)
169 prism2_crypt_delayed_deinit(local, crypt); 143 lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
170 goto done; 144 goto done;
171 } 145 }
172 146
173 if (*crypt != NULL && (*crypt)->ops != NULL && 147 if (*crypt != NULL && (*crypt)->ops != NULL &&
174 strcmp((*crypt)->ops->name, "WEP") != 0) { 148 strcmp((*crypt)->ops->name, "WEP") != 0) {
175 /* changing to use WEP; deinit previously used algorithm */ 149 /* changing to use WEP; deinit previously used algorithm */
176 prism2_crypt_delayed_deinit(local, crypt); 150 lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
177 } 151 }
178 152
179 if (*crypt == NULL) { 153 if (*crypt == NULL) {
180 struct ieee80211_crypt_data *new_crypt; 154 struct lib80211_crypt_data *new_crypt;
181 155
182 /* take WEP into use */ 156 /* take WEP into use */
183 new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), 157 new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
184 GFP_KERNEL); 158 GFP_KERNEL);
185 if (new_crypt == NULL) 159 if (new_crypt == NULL)
186 return -ENOMEM; 160 return -ENOMEM;
187 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 161 new_crypt->ops = lib80211_get_crypto_ops("WEP");
188 if (!new_crypt->ops) { 162 if (!new_crypt->ops) {
189 request_module("ieee80211_crypt_wep"); 163 request_module("lib80211_crypt_wep");
190 new_crypt->ops = ieee80211_get_crypto_ops("WEP"); 164 new_crypt->ops = lib80211_get_crypto_ops("WEP");
191 } 165 }
192 if (new_crypt->ops) 166 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
193 new_crypt->priv = new_crypt->ops->init(i); 167 new_crypt->priv = new_crypt->ops->init(i);
194 if (!new_crypt->ops || !new_crypt->priv) { 168 if (!new_crypt->ops || !new_crypt->priv) {
195 kfree(new_crypt); 169 kfree(new_crypt);
@@ -210,16 +184,16 @@ static int prism2_ioctl_siwencode(struct net_device *dev,
210 memset(keybuf + erq->length, 0, len - erq->length); 184 memset(keybuf + erq->length, 0, len - erq->length);
211 (*crypt)->ops->set_key(keybuf, len, NULL, (*crypt)->priv); 185 (*crypt)->ops->set_key(keybuf, len, NULL, (*crypt)->priv);
212 for (j = 0; j < WEP_KEYS; j++) { 186 for (j = 0; j < WEP_KEYS; j++) {
213 if (j != i && local->crypt[j]) { 187 if (j != i && local->crypt_info.crypt[j]) {
214 first = 0; 188 first = 0;
215 break; 189 break;
216 } 190 }
217 } 191 }
218 if (first) 192 if (first)
219 local->tx_keyidx = i; 193 local->crypt_info.tx_keyidx = i;
220 } else { 194 } else {
221 /* No key data - just set the default TX key index */ 195 /* No key data - just set the default TX key index */
222 local->tx_keyidx = i; 196 local->crypt_info.tx_keyidx = i;
223 } 197 }
224 198
225 done: 199 done:
@@ -252,20 +226,20 @@ static int prism2_ioctl_giwencode(struct net_device *dev,
252 local_info_t *local; 226 local_info_t *local;
253 int i, len; 227 int i, len;
254 u16 val; 228 u16 val;
255 struct ieee80211_crypt_data *crypt; 229 struct lib80211_crypt_data *crypt;
256 230
257 iface = netdev_priv(dev); 231 iface = netdev_priv(dev);
258 local = iface->local; 232 local = iface->local;
259 233
260 i = erq->flags & IW_ENCODE_INDEX; 234 i = erq->flags & IW_ENCODE_INDEX;
261 if (i < 1 || i > 4) 235 if (i < 1 || i > 4)
262 i = local->tx_keyidx; 236 i = local->crypt_info.tx_keyidx;
263 else 237 else
264 i--; 238 i--;
265 if (i < 0 || i >= WEP_KEYS) 239 if (i < 0 || i >= WEP_KEYS)
266 return -EINVAL; 240 return -EINVAL;
267 241
268 crypt = local->crypt[i]; 242 crypt = local->crypt_info.crypt[i];
269 erq->flags = i + 1; 243 erq->flags = i + 1;
270 244
271 if (crypt == NULL || crypt->ops == NULL) { 245 if (crypt == NULL || crypt->ops == NULL) {
@@ -664,7 +638,6 @@ static int hostap_join_ap(struct net_device *dev)
664 unsigned long flags; 638 unsigned long flags;
665 int i; 639 int i;
666 struct hfa384x_hostscan_result *entry; 640 struct hfa384x_hostscan_result *entry;
667 DECLARE_MAC_BUF(mac);
668 641
669 iface = netdev_priv(dev); 642 iface = netdev_priv(dev);
670 local = iface->local; 643 local = iface->local;
@@ -686,14 +659,13 @@ static int hostap_join_ap(struct net_device *dev)
686 659
687 if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req, 660 if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
688 sizeof(req))) { 661 sizeof(req))) {
689 printk(KERN_DEBUG "%s: JoinRequest %s" 662 printk(KERN_DEBUG "%s: JoinRequest %pM failed\n",
690 " failed\n", 663 dev->name, local->preferred_ap);
691 dev->name, print_mac(mac, local->preferred_ap));
692 return -1; 664 return -1;
693 } 665 }
694 666
695 printk(KERN_DEBUG "%s: Trying to join BSSID %s\n", 667 printk(KERN_DEBUG "%s: Trying to join BSSID %pM\n",
696 dev->name, print_mac(mac, local->preferred_ap)); 668 dev->name, local->preferred_ap);
697 669
698 return 0; 670 return 0;
699} 671}
@@ -3229,8 +3201,8 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
3229 local_info_t *local = iface->local; 3201 local_info_t *local = iface->local;
3230 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; 3202 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
3231 int i, ret = 0; 3203 int i, ret = 0;
3232 struct ieee80211_crypto_ops *ops; 3204 struct lib80211_crypto_ops *ops;
3233 struct ieee80211_crypt_data **crypt; 3205 struct lib80211_crypt_data **crypt;
3234 void *sta_ptr; 3206 void *sta_ptr;
3235 u8 *addr; 3207 u8 *addr;
3236 const char *alg, *module; 3208 const char *alg, *module;
@@ -3239,7 +3211,7 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
3239 if (i > WEP_KEYS) 3211 if (i > WEP_KEYS)
3240 return -EINVAL; 3212 return -EINVAL;
3241 if (i < 1 || i > WEP_KEYS) 3213 if (i < 1 || i > WEP_KEYS)
3242 i = local->tx_keyidx; 3214 i = local->crypt_info.tx_keyidx;
3243 else 3215 else
3244 i--; 3216 i--;
3245 if (i < 0 || i >= WEP_KEYS) 3217 if (i < 0 || i >= WEP_KEYS)
@@ -3249,7 +3221,7 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
3249 if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff && 3221 if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
3250 addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) { 3222 addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
3251 sta_ptr = NULL; 3223 sta_ptr = NULL;
3252 crypt = &local->crypt[i]; 3224 crypt = &local->crypt_info.crypt[i];
3253 } else { 3225 } else {
3254 if (i != 0) 3226 if (i != 0)
3255 return -EINVAL; 3227 return -EINVAL;
@@ -3262,7 +3234,7 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
3262 * is emulated by using default key idx 0. 3234 * is emulated by using default key idx 0.
3263 */ 3235 */
3264 i = 0; 3236 i = 0;
3265 crypt = &local->crypt[i]; 3237 crypt = &local->crypt_info.crypt[i];
3266 } else 3238 } else
3267 return -EINVAL; 3239 return -EINVAL;
3268 } 3240 }
@@ -3271,22 +3243,22 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
3271 if ((erq->flags & IW_ENCODE_DISABLED) || 3243 if ((erq->flags & IW_ENCODE_DISABLED) ||
3272 ext->alg == IW_ENCODE_ALG_NONE) { 3244 ext->alg == IW_ENCODE_ALG_NONE) {
3273 if (*crypt) 3245 if (*crypt)
3274 prism2_crypt_delayed_deinit(local, crypt); 3246 lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
3275 goto done; 3247 goto done;
3276 } 3248 }
3277 3249
3278 switch (ext->alg) { 3250 switch (ext->alg) {
3279 case IW_ENCODE_ALG_WEP: 3251 case IW_ENCODE_ALG_WEP:
3280 alg = "WEP"; 3252 alg = "WEP";
3281 module = "ieee80211_crypt_wep"; 3253 module = "lib80211_crypt_wep";
3282 break; 3254 break;
3283 case IW_ENCODE_ALG_TKIP: 3255 case IW_ENCODE_ALG_TKIP:
3284 alg = "TKIP"; 3256 alg = "TKIP";
3285 module = "ieee80211_crypt_tkip"; 3257 module = "lib80211_crypt_tkip";
3286 break; 3258 break;
3287 case IW_ENCODE_ALG_CCMP: 3259 case IW_ENCODE_ALG_CCMP:
3288 alg = "CCMP"; 3260 alg = "CCMP";
3289 module = "ieee80211_crypt_ccmp"; 3261 module = "lib80211_crypt_ccmp";
3290 break; 3262 break;
3291 default: 3263 default:
3292 printk(KERN_DEBUG "%s: unsupported algorithm %d\n", 3264 printk(KERN_DEBUG "%s: unsupported algorithm %d\n",
@@ -3295,10 +3267,10 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
3295 goto done; 3267 goto done;
3296 } 3268 }
3297 3269
3298 ops = ieee80211_get_crypto_ops(alg); 3270 ops = lib80211_get_crypto_ops(alg);
3299 if (ops == NULL) { 3271 if (ops == NULL) {
3300 request_module(module); 3272 request_module(module);
3301 ops = ieee80211_get_crypto_ops(alg); 3273 ops = lib80211_get_crypto_ops(alg);
3302 } 3274 }
3303 if (ops == NULL) { 3275 if (ops == NULL) {
3304 printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n", 3276 printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n",
@@ -3317,18 +3289,19 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
3317 } 3289 }
3318 3290
3319 if (*crypt == NULL || (*crypt)->ops != ops) { 3291 if (*crypt == NULL || (*crypt)->ops != ops) {
3320 struct ieee80211_crypt_data *new_crypt; 3292 struct lib80211_crypt_data *new_crypt;
3321 3293
3322 prism2_crypt_delayed_deinit(local, crypt); 3294 lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
3323 3295
3324 new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), 3296 new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
3325 GFP_KERNEL); 3297 GFP_KERNEL);
3326 if (new_crypt == NULL) { 3298 if (new_crypt == NULL) {
3327 ret = -ENOMEM; 3299 ret = -ENOMEM;
3328 goto done; 3300 goto done;
3329 } 3301 }
3330 new_crypt->ops = ops; 3302 new_crypt->ops = ops;
3331 new_crypt->priv = new_crypt->ops->init(i); 3303 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
3304 new_crypt->priv = new_crypt->ops->init(i);
3332 if (new_crypt->priv == NULL) { 3305 if (new_crypt->priv == NULL) {
3333 kfree(new_crypt); 3306 kfree(new_crypt);
3334 ret = -EINVAL; 3307 ret = -EINVAL;
@@ -3356,20 +3329,20 @@ static int prism2_ioctl_siwencodeext(struct net_device *dev,
3356 3329
3357 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { 3330 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
3358 if (!sta_ptr) 3331 if (!sta_ptr)
3359 local->tx_keyidx = i; 3332 local->crypt_info.tx_keyidx = i;
3360 } 3333 }
3361 3334
3362 3335
3363 if (sta_ptr == NULL && ext->key_len > 0) { 3336 if (sta_ptr == NULL && ext->key_len > 0) {
3364 int first = 1, j; 3337 int first = 1, j;
3365 for (j = 0; j < WEP_KEYS; j++) { 3338 for (j = 0; j < WEP_KEYS; j++) {
3366 if (j != i && local->crypt[j]) { 3339 if (j != i && local->crypt_info.crypt[j]) {
3367 first = 0; 3340 first = 0;
3368 break; 3341 break;
3369 } 3342 }
3370 } 3343 }
3371 if (first) 3344 if (first)
3372 local->tx_keyidx = i; 3345 local->crypt_info.tx_keyidx = i;
3373 } 3346 }
3374 3347
3375 done: 3348 done:
@@ -3401,7 +3374,7 @@ static int prism2_ioctl_giwencodeext(struct net_device *dev,
3401{ 3374{
3402 struct hostap_interface *iface = netdev_priv(dev); 3375 struct hostap_interface *iface = netdev_priv(dev);
3403 local_info_t *local = iface->local; 3376 local_info_t *local = iface->local;
3404 struct ieee80211_crypt_data **crypt; 3377 struct lib80211_crypt_data **crypt;
3405 void *sta_ptr; 3378 void *sta_ptr;
3406 int max_key_len, i; 3379 int max_key_len, i;
3407 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; 3380 struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
@@ -3413,7 +3386,7 @@ static int prism2_ioctl_giwencodeext(struct net_device *dev,
3413 3386
3414 i = erq->flags & IW_ENCODE_INDEX; 3387 i = erq->flags & IW_ENCODE_INDEX;
3415 if (i < 1 || i > WEP_KEYS) 3388 if (i < 1 || i > WEP_KEYS)
3416 i = local->tx_keyidx; 3389 i = local->crypt_info.tx_keyidx;
3417 else 3390 else
3418 i--; 3391 i--;
3419 3392
@@ -3421,7 +3394,7 @@ static int prism2_ioctl_giwencodeext(struct net_device *dev,
3421 if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff && 3394 if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
3422 addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) { 3395 addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
3423 sta_ptr = NULL; 3396 sta_ptr = NULL;
3424 crypt = &local->crypt[i]; 3397 crypt = &local->crypt_info.crypt[i];
3425 } else { 3398 } else {
3426 i = 0; 3399 i = 0;
3427 sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt); 3400 sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt);
@@ -3470,8 +3443,8 @@ static int prism2_ioctl_set_encryption(local_info_t *local,
3470 int param_len) 3443 int param_len)
3471{ 3444{
3472 int ret = 0; 3445 int ret = 0;
3473 struct ieee80211_crypto_ops *ops; 3446 struct lib80211_crypto_ops *ops;
3474 struct ieee80211_crypt_data **crypt; 3447 struct lib80211_crypt_data **crypt;
3475 void *sta_ptr; 3448 void *sta_ptr;
3476 3449
3477 param->u.crypt.err = 0; 3450 param->u.crypt.err = 0;
@@ -3488,7 +3461,7 @@ static int prism2_ioctl_set_encryption(local_info_t *local,
3488 if (param->u.crypt.idx >= WEP_KEYS) 3461 if (param->u.crypt.idx >= WEP_KEYS)
3489 return -EINVAL; 3462 return -EINVAL;
3490 sta_ptr = NULL; 3463 sta_ptr = NULL;
3491 crypt = &local->crypt[param->u.crypt.idx]; 3464 crypt = &local->crypt_info.crypt[param->u.crypt.idx];
3492 } else { 3465 } else {
3493 if (param->u.crypt.idx) 3466 if (param->u.crypt.idx)
3494 return -EINVAL; 3467 return -EINVAL;
@@ -3505,20 +3478,20 @@ static int prism2_ioctl_set_encryption(local_info_t *local,
3505 3478
3506 if (strcmp(param->u.crypt.alg, "none") == 0) { 3479 if (strcmp(param->u.crypt.alg, "none") == 0) {
3507 if (crypt) 3480 if (crypt)
3508 prism2_crypt_delayed_deinit(local, crypt); 3481 lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
3509 goto done; 3482 goto done;
3510 } 3483 }
3511 3484
3512 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3485 ops = lib80211_get_crypto_ops(param->u.crypt.alg);
3513 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) { 3486 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
3514 request_module("ieee80211_crypt_wep"); 3487 request_module("lib80211_crypt_wep");
3515 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3488 ops = lib80211_get_crypto_ops(param->u.crypt.alg);
3516 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) { 3489 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
3517 request_module("ieee80211_crypt_tkip"); 3490 request_module("lib80211_crypt_tkip");
3518 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3491 ops = lib80211_get_crypto_ops(param->u.crypt.alg);
3519 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) { 3492 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
3520 request_module("ieee80211_crypt_ccmp"); 3493 request_module("lib80211_crypt_ccmp");
3521 ops = ieee80211_get_crypto_ops(param->u.crypt.alg); 3494 ops = lib80211_get_crypto_ops(param->u.crypt.alg);
3522 } 3495 }
3523 if (ops == NULL) { 3496 if (ops == NULL) {
3524 printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n", 3497 printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n",
@@ -3533,11 +3506,11 @@ static int prism2_ioctl_set_encryption(local_info_t *local,
3533 local->host_decrypt = local->host_encrypt = 1; 3506 local->host_decrypt = local->host_encrypt = 1;
3534 3507
3535 if (*crypt == NULL || (*crypt)->ops != ops) { 3508 if (*crypt == NULL || (*crypt)->ops != ops) {
3536 struct ieee80211_crypt_data *new_crypt; 3509 struct lib80211_crypt_data *new_crypt;
3537 3510
3538 prism2_crypt_delayed_deinit(local, crypt); 3511 lib80211_crypt_delayed_deinit(&local->crypt_info, crypt);
3539 3512
3540 new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), 3513 new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
3541 GFP_KERNEL); 3514 GFP_KERNEL);
3542 if (new_crypt == NULL) { 3515 if (new_crypt == NULL) {
3543 ret = -ENOMEM; 3516 ret = -ENOMEM;
@@ -3570,7 +3543,7 @@ static int prism2_ioctl_set_encryption(local_info_t *local,
3570 3543
3571 if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) { 3544 if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) {
3572 if (!sta_ptr) 3545 if (!sta_ptr)
3573 local->tx_keyidx = param->u.crypt.idx; 3546 local->crypt_info.tx_keyidx = param->u.crypt.idx;
3574 else if (param->u.crypt.idx) { 3547 else if (param->u.crypt.idx) {
3575 printk(KERN_DEBUG "%s: TX key idx setting failed\n", 3548 printk(KERN_DEBUG "%s: TX key idx setting failed\n",
3576 local->dev->name); 3549 local->dev->name);
@@ -3606,7 +3579,7 @@ static int prism2_ioctl_get_encryption(local_info_t *local,
3606 struct prism2_hostapd_param *param, 3579 struct prism2_hostapd_param *param,
3607 int param_len) 3580 int param_len)
3608{ 3581{
3609 struct ieee80211_crypt_data **crypt; 3582 struct lib80211_crypt_data **crypt;
3610 void *sta_ptr; 3583 void *sta_ptr;
3611 int max_key_len; 3584 int max_key_len;
3612 3585
@@ -3622,8 +3595,8 @@ static int prism2_ioctl_get_encryption(local_info_t *local,
3622 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { 3595 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
3623 sta_ptr = NULL; 3596 sta_ptr = NULL;
3624 if (param->u.crypt.idx >= WEP_KEYS) 3597 if (param->u.crypt.idx >= WEP_KEYS)
3625 param->u.crypt.idx = local->tx_keyidx; 3598 param->u.crypt.idx = local->crypt_info.tx_keyidx;
3626 crypt = &local->crypt[param->u.crypt.idx]; 3599 crypt = &local->crypt_info.crypt[param->u.crypt.idx];
3627 } else { 3600 } else {
3628 param->u.crypt.idx = 0; 3601 param->u.crypt.idx = 0;
3629 sta_ptr = ap_crypt_get_ptrs(local->ap, param->sta_addr, 0, 3602 sta_ptr = ap_crypt_get_ptrs(local->ap, param->sta_addr, 0,
@@ -3701,10 +3674,8 @@ static int prism2_ioctl_set_assoc_ap_addr(local_info_t *local,
3701 struct prism2_hostapd_param *param, 3674 struct prism2_hostapd_param *param,
3702 int param_len) 3675 int param_len)
3703{ 3676{
3704 DECLARE_MAC_BUF(mac); 3677 printk(KERN_DEBUG "%ssta: associated as client with AP %pM\n",
3705 printk(KERN_DEBUG "%ssta: associated as client with AP " 3678 local->dev->name, param->sta_addr);
3706 "%s\n",
3707 local->dev->name, print_mac(mac, param->sta_addr));
3708 memcpy(local->assoc_ap_addr, param->sta_addr, ETH_ALEN); 3679 memcpy(local->assoc_ap_addr, param->sta_addr, ETH_ALEN);
3709 return 0; 3680 return 0;
3710} 3681}
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 756ab56c1f40..02a312ca8607 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -27,7 +27,7 @@
27#include <net/net_namespace.h> 27#include <net/net_namespace.h>
28#include <net/iw_handler.h> 28#include <net/iw_handler.h>
29#include <net/ieee80211.h> 29#include <net/ieee80211.h>
30#include <net/ieee80211_crypt.h> 30#include <net/lib80211.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32 32
33#include "hostap_wlan.h" 33#include "hostap_wlan.h"
@@ -343,10 +343,11 @@ int hostap_set_encryption(local_info_t *local)
343 char keybuf[WEP_KEY_LEN + 1]; 343 char keybuf[WEP_KEY_LEN + 1];
344 enum { NONE, WEP, OTHER } encrypt_type; 344 enum { NONE, WEP, OTHER } encrypt_type;
345 345
346 idx = local->tx_keyidx; 346 idx = local->crypt_info.tx_keyidx;
347 if (local->crypt[idx] == NULL || local->crypt[idx]->ops == NULL) 347 if (local->crypt_info.crypt[idx] == NULL ||
348 local->crypt_info.crypt[idx]->ops == NULL)
348 encrypt_type = NONE; 349 encrypt_type = NONE;
349 else if (strcmp(local->crypt[idx]->ops->name, "WEP") == 0) 350 else if (strcmp(local->crypt_info.crypt[idx]->ops->name, "WEP") == 0)
350 encrypt_type = WEP; 351 encrypt_type = WEP;
351 else 352 else
352 encrypt_type = OTHER; 353 encrypt_type = OTHER;
@@ -394,17 +395,17 @@ int hostap_set_encryption(local_info_t *local)
394 /* 104-bit support seems to require that all the keys are set to the 395 /* 104-bit support seems to require that all the keys are set to the
395 * same keylen */ 396 * same keylen */
396 keylen = 6; /* first 5 octets */ 397 keylen = 6; /* first 5 octets */
397 len = local->crypt[idx]->ops->get_key(keybuf, sizeof(keybuf), 398 len = local->crypt_info.crypt[idx]->ops->get_key(keybuf, sizeof(keybuf), NULL,
398 NULL, local->crypt[idx]->priv); 399 local->crypt_info.crypt[idx]->priv);
399 if (idx >= 0 && idx < WEP_KEYS && len > 5) 400 if (idx >= 0 && idx < WEP_KEYS && len > 5)
400 keylen = WEP_KEY_LEN + 1; /* first 13 octets */ 401 keylen = WEP_KEY_LEN + 1; /* first 13 octets */
401 402
402 for (i = 0; i < WEP_KEYS; i++) { 403 for (i = 0; i < WEP_KEYS; i++) {
403 memset(keybuf, 0, sizeof(keybuf)); 404 memset(keybuf, 0, sizeof(keybuf));
404 if (local->crypt[i]) { 405 if (local->crypt_info.crypt[i]) {
405 (void) local->crypt[i]->ops->get_key( 406 (void) local->crypt_info.crypt[i]->ops->get_key(
406 keybuf, sizeof(keybuf), 407 keybuf, sizeof(keybuf),
407 NULL, local->crypt[i]->priv); 408 NULL, local->crypt_info.crypt[i]->priv);
408 } 409 }
409 if (local->func->set_rid(local->dev, 410 if (local->func->set_rid(local->dev,
410 HFA384X_RID_CNFDEFAULTKEY0 + i, 411 HFA384X_RID_CNFDEFAULTKEY0 + i,
@@ -530,10 +531,6 @@ int hostap_set_auth_algs(local_info_t *local)
530void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx) 531void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx)
531{ 532{
532 u16 status, fc; 533 u16 status, fc;
533 DECLARE_MAC_BUF(mac);
534 DECLARE_MAC_BUF(mac2);
535 DECLARE_MAC_BUF(mac3);
536 DECLARE_MAC_BUF(mac4);
537 534
538 status = __le16_to_cpu(rx->status); 535 status = __le16_to_cpu(rx->status);
539 536
@@ -552,12 +549,11 @@ void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx)
552 fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", 549 fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
553 fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); 550 fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
554 551
555 printk(KERN_DEBUG " A1=%s A2=%s A3=%s A4=%s\n", 552 printk(KERN_DEBUG " A1=%pM A2=%pM A3=%pM A4=%pM\n",
556 print_mac(mac, rx->addr1), print_mac(mac2, rx->addr2), 553 rx->addr1, rx->addr2, rx->addr3, rx->addr4);
557 print_mac(mac3, rx->addr3), print_mac(mac4, rx->addr4));
558 554
559 printk(KERN_DEBUG " dst=%s src=%s len=%d\n", 555 printk(KERN_DEBUG " dst=%pM src=%pM len=%d\n",
560 print_mac(mac, rx->dst_addr), print_mac(mac2, rx->src_addr), 556 rx->dst_addr, rx->src_addr,
561 __be16_to_cpu(rx->len)); 557 __be16_to_cpu(rx->len));
562} 558}
563 559
@@ -565,10 +561,6 @@ void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx)
565void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx) 561void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx)
566{ 562{
567 u16 fc; 563 u16 fc;
568 DECLARE_MAC_BUF(mac);
569 DECLARE_MAC_BUF(mac2);
570 DECLARE_MAC_BUF(mac3);
571 DECLARE_MAC_BUF(mac4);
572 564
573 printk(KERN_DEBUG "%s: TX status=0x%04x retry_count=%d tx_rate=%d " 565 printk(KERN_DEBUG "%s: TX status=0x%04x retry_count=%d tx_rate=%d "
574 "tx_control=0x%04x; jiffies=%ld\n", 566 "tx_control=0x%04x; jiffies=%ld\n",
@@ -584,12 +576,11 @@ void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx)
584 fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", 576 fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
585 fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); 577 fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
586 578
587 printk(KERN_DEBUG " A1=%s A2=%s A3=%s A4=%s\n", 579 printk(KERN_DEBUG " A1=%pM A2=%pM A3=%pM A4=%pM\n",
588 print_mac(mac, tx->addr1), print_mac(mac2, tx->addr2), 580 tx->addr1, tx->addr2, tx->addr3, tx->addr4);
589 print_mac(mac3, tx->addr3), print_mac(mac4, tx->addr4));
590 581
591 printk(KERN_DEBUG " dst=%s src=%s len=%d\n", 582 printk(KERN_DEBUG " dst=%pM src=%pM len=%d\n",
592 print_mac(mac, tx->dst_addr), print_mac(mac2, tx->src_addr), 583 tx->dst_addr, tx->src_addr,
593 __be16_to_cpu(tx->len)); 584 __be16_to_cpu(tx->len));
594} 585}
595 586
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 3a874fc621d3..8fdd41f4b4f2 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -312,7 +312,7 @@ static int prism2_pci_probe(struct pci_dev *pdev,
312 goto err_out_disable; 312 goto err_out_disable;
313 } 313 }
314 314
315 mem = ioremap(phymem, pci_resource_len(pdev, 0)); 315 mem = pci_ioremap_bar(pdev, 0);
316 if (mem == NULL) { 316 if (mem == NULL) {
317 printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ; 317 printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ;
318 goto fail; 318 goto fail;
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index b03536008ad9..005ff25a405f 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -2,7 +2,7 @@
2 2
3#include <linux/types.h> 3#include <linux/types.h>
4#include <linux/proc_fs.h> 4#include <linux/proc_fs.h>
5#include <net/ieee80211_crypt.h> 5#include <net/lib80211.h>
6 6
7#include "hostap_wlan.h" 7#include "hostap_wlan.h"
8#include "hostap.h" 8#include "hostap.h"
@@ -36,9 +36,10 @@ static int prism2_debug_proc_read(char *page, char **start, off_t off,
36 p += sprintf(p, "dev_enabled=%d\n", local->dev_enabled); 36 p += sprintf(p, "dev_enabled=%d\n", local->dev_enabled);
37 p += sprintf(p, "sw_tick_stuck=%d\n", local->sw_tick_stuck); 37 p += sprintf(p, "sw_tick_stuck=%d\n", local->sw_tick_stuck);
38 for (i = 0; i < WEP_KEYS; i++) { 38 for (i = 0; i < WEP_KEYS; i++) {
39 if (local->crypt[i] && local->crypt[i]->ops) { 39 if (local->crypt_info.crypt[i] &&
40 p += sprintf(p, "crypt[%d]=%s\n", 40 local->crypt_info.crypt[i]->ops) {
41 i, local->crypt[i]->ops->name); 41 p += sprintf(p, "crypt[%d]=%s\n", i,
42 local->crypt_info.crypt[i]->ops->name);
42 } 43 }
43 } 44 }
44 p += sprintf(p, "pri_only=%d\n", local->pri_only); 45 p += sprintf(p, "pri_only=%d\n", local->pri_only);
@@ -106,7 +107,6 @@ static int prism2_wds_proc_read(char *page, char **start, off_t off,
106 local_info_t *local = (local_info_t *) data; 107 local_info_t *local = (local_info_t *) data;
107 struct list_head *ptr; 108 struct list_head *ptr;
108 struct hostap_interface *iface; 109 struct hostap_interface *iface;
109 DECLARE_MAC_BUF(mac);
110 110
111 if (off > PROC_LIMIT) { 111 if (off > PROC_LIMIT) {
112 *eof = 1; 112 *eof = 1;
@@ -118,9 +118,9 @@ static int prism2_wds_proc_read(char *page, char **start, off_t off,
118 iface = list_entry(ptr, struct hostap_interface, list); 118 iface = list_entry(ptr, struct hostap_interface, list);
119 if (iface->type != HOSTAP_INTERFACE_WDS) 119 if (iface->type != HOSTAP_INTERFACE_WDS)
120 continue; 120 continue;
121 p += sprintf(p, "%s\t%s\n", 121 p += sprintf(p, "%s\t%pM\n",
122 iface->dev->name, 122 iface->dev->name,
123 print_mac(mac, iface->u.wds.remote_addr)); 123 iface->u.wds.remote_addr);
124 if ((p - page) > PROC_LIMIT) { 124 if ((p - page) > PROC_LIMIT) {
125 printk(KERN_DEBUG "%s: wds proc did not fit\n", 125 printk(KERN_DEBUG "%s: wds proc did not fit\n",
126 local->dev->name); 126 local->dev->name);
@@ -148,7 +148,6 @@ static int prism2_bss_list_proc_read(char *page, char **start, off_t off,
148 struct list_head *ptr; 148 struct list_head *ptr;
149 struct hostap_bss_info *bss; 149 struct hostap_bss_info *bss;
150 int i; 150 int i;
151 DECLARE_MAC_BUF(mac);
152 151
153 if (off > PROC_LIMIT) { 152 if (off > PROC_LIMIT) {
154 *eof = 1; 153 *eof = 1;
@@ -160,8 +159,8 @@ static int prism2_bss_list_proc_read(char *page, char **start, off_t off,
160 spin_lock_bh(&local->lock); 159 spin_lock_bh(&local->lock);
161 list_for_each(ptr, &local->bss_list) { 160 list_for_each(ptr, &local->bss_list) {
162 bss = list_entry(ptr, struct hostap_bss_info, list); 161 bss = list_entry(ptr, struct hostap_bss_info, list);
163 p += sprintf(p, "%s\t%lu\t%u\t0x%x\t", 162 p += sprintf(p, "%pM\t%lu\t%u\t0x%x\t",
164 print_mac(mac, bss->bssid), bss->last_update, 163 bss->bssid, bss->last_update,
165 bss->count, bss->capab_info); 164 bss->count, bss->capab_info);
166 for (i = 0; i < bss->ssid_len; i++) { 165 for (i = 0; i < bss->ssid_len; i++) {
167 p += sprintf(p, "%c", 166 p += sprintf(p, "%c",
@@ -208,12 +207,13 @@ static int prism2_crypt_proc_read(char *page, char **start, off_t off,
208 return 0; 207 return 0;
209 } 208 }
210 209
211 p += sprintf(p, "tx_keyidx=%d\n", local->tx_keyidx); 210 p += sprintf(p, "tx_keyidx=%d\n", local->crypt_info.tx_keyidx);
212 for (i = 0; i < WEP_KEYS; i++) { 211 for (i = 0; i < WEP_KEYS; i++) {
213 if (local->crypt[i] && local->crypt[i]->ops && 212 if (local->crypt_info.crypt[i] &&
214 local->crypt[i]->ops->print_stats) { 213 local->crypt_info.crypt[i]->ops &&
215 p = local->crypt[i]->ops->print_stats( 214 local->crypt_info.crypt[i]->ops->print_stats) {
216 p, local->crypt[i]->priv); 215 p = local->crypt_info.crypt[i]->ops->print_stats(
216 p, local->crypt_info.crypt[i]->priv);
217 } 217 }
218 } 218 }
219 219
@@ -314,7 +314,6 @@ static int prism2_scan_results_proc_read(char *page, char **start, off_t off,
314 int entry, i, len, total = 0; 314 int entry, i, len, total = 0;
315 struct hfa384x_hostscan_result *scanres; 315 struct hfa384x_hostscan_result *scanres;
316 u8 *pos; 316 u8 *pos;
317 DECLARE_MAC_BUF(mac);
318 317
319 p += sprintf(p, "CHID ANL SL BcnInt Capab Rate BSSID ATIM SupRates " 318 p += sprintf(p, "CHID ANL SL BcnInt Capab Rate BSSID ATIM SupRates "
320 "SSID\n"); 319 "SSID\n");
@@ -332,14 +331,14 @@ static int prism2_scan_results_proc_read(char *page, char **start, off_t off,
332 if ((p - page) > (PAGE_SIZE - 200)) 331 if ((p - page) > (PAGE_SIZE - 200))
333 break; 332 break;
334 333
335 p += sprintf(p, "%d %d %d %d 0x%02x %d %s %d ", 334 p += sprintf(p, "%d %d %d %d 0x%02x %d %pM %d ",
336 le16_to_cpu(scanres->chid), 335 le16_to_cpu(scanres->chid),
337 (s16) le16_to_cpu(scanres->anl), 336 (s16) le16_to_cpu(scanres->anl),
338 (s16) le16_to_cpu(scanres->sl), 337 (s16) le16_to_cpu(scanres->sl),
339 le16_to_cpu(scanres->beacon_interval), 338 le16_to_cpu(scanres->beacon_interval),
340 le16_to_cpu(scanres->capability), 339 le16_to_cpu(scanres->capability),
341 le16_to_cpu(scanres->rate), 340 le16_to_cpu(scanres->rate),
342 print_mac(mac, scanres->bssid), 341 scanres->bssid,
343 le16_to_cpu(scanres->atim)); 342 le16_to_cpu(scanres->atim));
344 343
345 pos = scanres->sup_rates; 344 pos = scanres->sup_rates;
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index a68f97c39359..4d8d51a353cd 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -6,6 +6,7 @@
6#include <linux/mutex.h> 6#include <linux/mutex.h>
7#include <net/iw_handler.h> 7#include <net/iw_handler.h>
8#include <net/ieee80211_radiotap.h> 8#include <net/ieee80211_radiotap.h>
9#include <net/lib80211.h>
9 10
10#include "hostap_config.h" 11#include "hostap_config.h"
11#include "hostap_common.h" 12#include "hostap_common.h"
@@ -763,10 +764,7 @@ struct local_info {
763 764
764#define WEP_KEYS 4 765#define WEP_KEYS 4
765#define WEP_KEY_LEN 13 766#define WEP_KEY_LEN 13
766 struct ieee80211_crypt_data *crypt[WEP_KEYS]; 767 struct lib80211_crypt_info crypt_info;
767 int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
768 struct timer_list crypt_deinit_timer;
769 struct list_head crypt_deinit_list;
770 768
771 int open_wep; /* allow unencrypted frames */ 769 int open_wep; /* allow unencrypted frames */
772 int host_encrypt; 770 int host_encrypt;
@@ -822,7 +820,7 @@ struct local_info {
822 int last_scan_results_count; 820 int last_scan_results_count;
823 enum { PRISM2_SCAN, PRISM2_HOSTSCAN } last_scan_type; 821 enum { PRISM2_SCAN, PRISM2_HOSTSCAN } last_scan_type;
824 struct work_struct info_queue; 822 struct work_struct info_queue;
825 long pending_info; /* bit field of pending info_queue items */ 823 unsigned long pending_info; /* bit field of pending info_queue items */
826#define PRISM2_INFO_PENDING_LINKSTATUS 0 824#define PRISM2_INFO_PENDING_LINKSTATUS 0
827#define PRISM2_INFO_PENDING_SCANRESULTS 1 825#define PRISM2_INFO_PENDING_SCANRESULTS 1
828 int prev_link_status; /* previous received LinkStatus info */ 826 int prev_link_status; /* previous received LinkStatus info */
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
new file mode 100644
index 000000000000..3d5cc4463d4d
--- /dev/null
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -0,0 +1,191 @@
1#
2# Intel Centrino wireless drivers
3#
4
5config IPW2100
6 tristate "Intel PRO/Wireless 2100 Network Connection"
7 depends on PCI && WLAN_80211
8 select WIRELESS_EXT
9 select FW_LOADER
10 select LIB80211
11 select LIBIPW
12 ---help---
13 A driver for the Intel PRO/Wireless 2100 Network
14 Connection 802.11b wireless network adapter.
15
16 See <file:Documentation/networking/README.ipw2100> for information on
17 the capabilities currently enabled in this driver and for tips
18 for debugging issues and problems.
19
20 In order to use this driver, you will need a firmware image for it.
21 You can obtain the firmware from
22 <http://ipw2100.sf.net/>. Once you have the firmware image, you
23 will need to place it in /lib/firmware.
24
25 You will also very likely need the Wireless Tools in order to
26 configure your card:
27
28 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
29
30 It is recommended that you compile this driver as a module (M)
31 rather than built-in (Y). This driver requires firmware at device
32 initialization time, and when built-in this typically happens
33 before the filesystem is accessible (hence firmware will be
34 unavailable and initialization will fail). If you do choose to build
35 this driver into your kernel image, you can avoid this problem by
36 including the firmware and a firmware loader in an initramfs.
37
38config IPW2100_MONITOR
39 bool "Enable promiscuous mode"
40 depends on IPW2100
41 ---help---
42 Enables promiscuous/monitor mode support for the ipw2100 driver.
43 With this feature compiled into the driver, you can switch to
44 promiscuous mode via the Wireless Tool's Monitor mode. While in this
45 mode, no packets can be sent.
46
47config IPW2100_DEBUG
48 bool "Enable full debugging output in IPW2100 module."
49 depends on IPW2100
50 ---help---
51 This option will enable debug tracing output for the IPW2100.
52
53 This will result in the kernel module being ~60k larger. You can
54 control which debug output is sent to the kernel log by setting the
55 value in
56
57 /sys/bus/pci/drivers/ipw2100/debug_level
58
59 This entry will only exist if this option is enabled.
60
61 If you are not trying to debug or develop the IPW2100 driver, you
62 most likely want to say N here.
63
64config IPW2200
65 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
66 depends on PCI && WLAN_80211
67 select WIRELESS_EXT
68 select FW_LOADER
69 select LIB80211
70 select LIBIPW
71 ---help---
72 A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
73 Connection adapters.
74
75 See <file:Documentation/networking/README.ipw2200> for
76 information on the capabilities currently enabled in this
77 driver and for tips for debugging issues and problems.
78
79 In order to use this driver, you will need a firmware image for it.
80 You can obtain the firmware from
81 <http://ipw2200.sf.net/>. See the above referenced README.ipw2200
82 for information on where to install the firmware images.
83
84 You will also very likely need the Wireless Tools in order to
85 configure your card:
86
87 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
88
89 It is recommended that you compile this driver as a module (M)
90 rather than built-in (Y). This driver requires firmware at device
91 initialization time, and when built-in this typically happens
92 before the filesystem is accessible (hence firmware will be
93 unavailable and initialization will fail). If you do choose to build
94 this driver into your kernel image, you can avoid this problem by
95 including the firmware and a firmware loader in an initramfs.
96
97config IPW2200_MONITOR
98 bool "Enable promiscuous mode"
99 depends on IPW2200
100 ---help---
101 Enables promiscuous/monitor mode support for the ipw2200 driver.
102 With this feature compiled into the driver, you can switch to
103 promiscuous mode via the Wireless Tool's Monitor mode. While in this
104 mode, no packets can be sent.
105
106config IPW2200_RADIOTAP
107 bool "Enable radiotap format 802.11 raw packet support"
108 depends on IPW2200_MONITOR
109
110config IPW2200_PROMISCUOUS
111 bool "Enable creation of a RF radiotap promiscuous interface"
112 depends on IPW2200_MONITOR
113 select IPW2200_RADIOTAP
114 ---help---
115 Enables the creation of a second interface prefixed 'rtap'.
116 This second interface will provide every received in radiotap
117 format.
118
119 This is useful for performing wireless network analysis while
120 maintaining an active association.
121
122 Example usage:
123
124 % modprobe ipw2200 rtap_iface=1
125 % ifconfig rtap0 up
126 % tethereal -i rtap0
127
128 If you do not specify 'rtap_iface=1' as a module parameter then
129 the rtap interface will not be created and you will need to turn
130 it on via sysfs:
131
132 % echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface
133
134config IPW2200_QOS
135 bool "Enable QoS support"
136 depends on IPW2200 && EXPERIMENTAL
137
138config IPW2200_DEBUG
139 bool "Enable full debugging output in IPW2200 module."
140 depends on IPW2200
141 ---help---
142 This option will enable low level debug tracing output for IPW2200.
143
144 Note, normal debug code is already compiled in. This low level
145 debug option enables debug on hot paths (e.g Tx, Rx, ISR) and
146 will result in the kernel module being ~70 larger. Most users
147 will typically not need this high verbosity debug information.
148
149 If you are not sure, say N here.
150
151config LIBIPW
152 tristate
153 select WIRELESS_EXT
154 select CRYPTO
155 select CRYPTO_ARC4
156 select CRYPTO_ECB
157 select CRYPTO_AES
158 select CRYPTO_MICHAEL_MIC
159 select CRYPTO_ECB
160 select CRC32
161 select LIB80211
162 select LIB80211_CRYPT_WEP
163 select LIB80211_CRYPT_TKIP
164 select LIB80211_CRYPT_CCMP
165 ---help---
166 This option enables the hardware independent IEEE 802.11
167 networking stack. This component is deprecated in favor of the
168 mac80211 component.
169
170config LIBIPW_DEBUG
171 bool "Full debugging output for the LIBIPW component"
172 depends on LIBIPW
173 ---help---
174 This option will enable debug tracing output for the
175 libipw component.
176
177 This will result in the kernel module being ~70k larger. You
178 can control which debug output is sent to the kernel log by
179 setting the value in
180
181 /proc/net/ieee80211/debug_level
182
183 For example:
184
185 % echo 0x00000FFO > /proc/net/ieee80211/debug_level
186
187 For a list of values you can assign to debug_level, you
188 can look at the bit mask values in <net/ieee80211.h>
189
190 If you are not trying to debug or develop the libipw
191 component, you most likely want to say N here.
diff --git a/drivers/net/wireless/ipw2x00/Makefile b/drivers/net/wireless/ipw2x00/Makefile
new file mode 100644
index 000000000000..aecd2cff462b
--- /dev/null
+++ b/drivers/net/wireless/ipw2x00/Makefile
@@ -0,0 +1,14 @@
1#
2# Makefile for the Intel Centrino wireless drivers
3#
4
5obj-$(CONFIG_IPW2100) += ipw2100.o
6obj-$(CONFIG_IPW2200) += ipw2200.o
7
8obj-$(CONFIG_LIBIPW) += libipw.o
9libipw-objs := \
10 libipw_module.o \
11 libipw_tx.o \
12 libipw_rx.o \
13 libipw_wx.o \
14 libipw_geo.o
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index bca74811bc7f..1667065b86a7 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -163,6 +163,8 @@ that only one external action is invoked at a time.
163#include <linux/ctype.h> 163#include <linux/ctype.h>
164#include <linux/pm_qos_params.h> 164#include <linux/pm_qos_params.h>
165 165
166#include <net/lib80211.h>
167
166#include "ipw2100.h" 168#include "ipw2100.h"
167 169
168#define IPW2100_VERSION "git-1.2.2" 170#define IPW2100_VERSION "git-1.2.2"
@@ -185,7 +187,7 @@ MODULE_LICENSE("GPL");
185static int debug = 0; 187static int debug = 0;
186static int mode = 0; 188static int mode = 0;
187static int channel = 0; 189static int channel = 0;
188static int associate = 1; 190static int associate = 0;
189static int disable = 0; 191static int disable = 0;
190#ifdef CONFIG_PM 192#ifdef CONFIG_PM
191static struct ipw2100_fw ipw2100_firmware; 193static struct ipw2100_fw ipw2100_firmware;
@@ -201,7 +203,7 @@ module_param(disable, int, 0444);
201MODULE_PARM_DESC(debug, "debug level"); 203MODULE_PARM_DESC(debug, "debug level");
202MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)"); 204MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
203MODULE_PARM_DESC(channel, "channel"); 205MODULE_PARM_DESC(channel, "channel");
204MODULE_PARM_DESC(associate, "auto associate when scanning (default on)"); 206MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
205MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); 207MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
206 208
207static u32 ipw2100_debug_level = IPW_DL_NONE; 209static u32 ipw2100_debug_level = IPW_DL_NONE;
@@ -1914,7 +1916,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
1914 u32 chan; 1916 u32 chan;
1915 char *txratename; 1917 char *txratename;
1916 u8 bssid[ETH_ALEN]; 1918 u8 bssid[ETH_ALEN];
1917 DECLARE_MAC_BUF(mac); 1919 DECLARE_SSID_BUF(ssid);
1918 1920
1919 /* 1921 /*
1920 * TBD: BSSID is usually 00:00:00:00:00:00 here and not 1922 * TBD: BSSID is usually 00:00:00:00:00:00 here and not
@@ -1975,10 +1977,9 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
1975 break; 1977 break;
1976 } 1978 }
1977 1979
1978 IPW_DEBUG_INFO("%s: Associated with '%s' at %s, channel %d (BSSID=" 1980 IPW_DEBUG_INFO("%s: Associated with '%s' at %s, channel %d (BSSID=%pM)\n",
1979 "%s)\n", 1981 priv->net_dev->name, print_ssid(ssid, essid, essid_len),
1980 priv->net_dev->name, escape_essid(essid, essid_len), 1982 txratename, chan, bssid);
1981 txratename, chan, print_mac(mac, bssid));
1982 1983
1983 /* now we copy read ssid into dev */ 1984 /* now we copy read ssid into dev */
1984 if (!(priv->config & CFG_STATIC_ESSID)) { 1985 if (!(priv->config & CFG_STATIC_ESSID)) {
@@ -2004,8 +2005,9 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
2004 .host_command_length = ssid_len 2005 .host_command_length = ssid_len
2005 }; 2006 };
2006 int err; 2007 int err;
2008 DECLARE_SSID_BUF(ssid);
2007 2009
2008 IPW_DEBUG_HC("SSID: '%s'\n", escape_essid(essid, ssid_len)); 2010 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
2009 2011
2010 if (ssid_len) 2012 if (ssid_len)
2011 memcpy(cmd.host_command_parameters, essid, ssid_len); 2013 memcpy(cmd.host_command_parameters, essid, ssid_len);
@@ -2046,12 +2048,12 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
2046 2048
2047static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) 2049static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
2048{ 2050{
2049 DECLARE_MAC_BUF(mac); 2051 DECLARE_SSID_BUF(ssid);
2050 2052
2051 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, 2053 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
2052 "disassociated: '%s' %s \n", 2054 "disassociated: '%s' %pM \n",
2053 escape_essid(priv->essid, priv->essid_len), 2055 print_ssid(ssid, priv->essid, priv->essid_len),
2054 print_mac(mac, priv->bssid)); 2056 priv->bssid);
2055 2057
2056 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); 2058 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2057 2059
@@ -4008,7 +4010,7 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr,
4008 else 4010 else
4009 len += sprintf(buf + len, "not connected\n"); 4011 len += sprintf(buf + len, "not connected\n");
4010 4012
4011 DUMP_VAR(ieee->crypt[priv->ieee->tx_keyidx], "p"); 4013 DUMP_VAR(ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx], "p");
4012 DUMP_VAR(status, "08lx"); 4014 DUMP_VAR(status, "08lx");
4013 DUMP_VAR(config, "08lx"); 4015 DUMP_VAR(config, "08lx");
4014 DUMP_VAR(capability, "08lx"); 4016 DUMP_VAR(capability, "08lx");
@@ -4058,7 +4060,6 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
4058 char *out = buf; 4060 char *out = buf;
4059 int length; 4061 int length;
4060 int ret; 4062 int ret;
4061 DECLARE_MAC_BUF(mac);
4062 4063
4063 if (priv->status & STATUS_RF_KILL_MASK) 4064 if (priv->status & STATUS_RF_KILL_MASK)
4064 return 0; 4065 return 0;
@@ -4086,7 +4087,7 @@ static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
4086 __LINE__); 4087 __LINE__);
4087 4088
4088 out += sprintf(out, "ESSID: %s\n", essid); 4089 out += sprintf(out, "ESSID: %s\n", essid);
4089 out += sprintf(out, "BSSID: %s\n", print_mac(mac, bssid)); 4090 out += sprintf(out, "BSSID: %pM\n", bssid);
4090 out += sprintf(out, "Channel: %d\n", chan); 4091 out += sprintf(out, "Channel: %d\n", chan);
4091 4092
4092 return out - buf; 4093 return out - buf;
@@ -4662,7 +4663,6 @@ static int ipw2100_read_mac_address(struct ipw2100_priv *priv)
4662{ 4663{
4663 u32 length = ETH_ALEN; 4664 u32 length = ETH_ALEN;
4664 u8 addr[ETH_ALEN]; 4665 u8 addr[ETH_ALEN];
4665 DECLARE_MAC_BUF(mac);
4666 4666
4667 int err; 4667 int err;
4668 4668
@@ -4673,8 +4673,7 @@ static int ipw2100_read_mac_address(struct ipw2100_priv *priv)
4673 } 4673 }
4674 4674
4675 memcpy(priv->net_dev->dev_addr, addr, ETH_ALEN); 4675 memcpy(priv->net_dev->dev_addr, addr, ETH_ALEN);
4676 IPW_DEBUG_INFO("card MAC is %s\n", 4676 IPW_DEBUG_INFO("card MAC is %pM\n", priv->net_dev->dev_addr);
4677 print_mac(mac, priv->net_dev->dev_addr));
4678 4677
4679 return 0; 4678 return 0;
4680} 4679}
@@ -5053,10 +5052,8 @@ static int ipw2100_set_mandatory_bssid(struct ipw2100_priv *priv, u8 * bssid,
5053 int err; 5052 int err;
5054 5053
5055#ifdef CONFIG_IPW2100_DEBUG 5054#ifdef CONFIG_IPW2100_DEBUG
5056 DECLARE_MAC_BUF(mac);
5057 if (bssid != NULL) 5055 if (bssid != NULL)
5058 IPW_DEBUG_HC("MANDATORY_BSSID: %s\n", 5056 IPW_DEBUG_HC("MANDATORY_BSSID: %pM\n", bssid);
5059 print_mac(mac, bssid));
5060 else 5057 else
5061 IPW_DEBUG_HC("MANDATORY_BSSID: <clear>\n"); 5058 IPW_DEBUG_HC("MANDATORY_BSSID: <clear>\n");
5062#endif 5059#endif
@@ -5271,21 +5268,21 @@ static int ipw2100_set_ibss_beacon_interval(struct ipw2100_priv *priv,
5271 return 0; 5268 return 0;
5272} 5269}
5273 5270
5274void ipw2100_queues_initialize(struct ipw2100_priv *priv) 5271static void ipw2100_queues_initialize(struct ipw2100_priv *priv)
5275{ 5272{
5276 ipw2100_tx_initialize(priv); 5273 ipw2100_tx_initialize(priv);
5277 ipw2100_rx_initialize(priv); 5274 ipw2100_rx_initialize(priv);
5278 ipw2100_msg_initialize(priv); 5275 ipw2100_msg_initialize(priv);
5279} 5276}
5280 5277
5281void ipw2100_queues_free(struct ipw2100_priv *priv) 5278static void ipw2100_queues_free(struct ipw2100_priv *priv)
5282{ 5279{
5283 ipw2100_tx_free(priv); 5280 ipw2100_tx_free(priv);
5284 ipw2100_rx_free(priv); 5281 ipw2100_rx_free(priv);
5285 ipw2100_msg_free(priv); 5282 ipw2100_msg_free(priv);
5286} 5283}
5287 5284
5288int ipw2100_queues_allocate(struct ipw2100_priv *priv) 5285static int ipw2100_queues_allocate(struct ipw2100_priv *priv)
5289{ 5286{
5290 if (ipw2100_tx_allocate(priv) || 5287 if (ipw2100_tx_allocate(priv) ||
5291 ipw2100_rx_allocate(priv) || ipw2100_msg_allocate(priv)) 5288 ipw2100_rx_allocate(priv) || ipw2100_msg_allocate(priv))
@@ -5517,7 +5514,7 @@ static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode)
5517 } 5514 }
5518 } 5515 }
5519 5516
5520 ipw2100_set_key_index(priv, priv->ieee->tx_keyidx, 1); 5517 ipw2100_set_key_index(priv, priv->ieee->crypt_info.tx_keyidx, 1);
5521 } 5518 }
5522 5519
5523 /* Always enable privacy so the Host can filter WEP packets if 5520 /* Always enable privacy so the Host can filter WEP packets if
@@ -6905,7 +6902,6 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
6905 static const unsigned char off[] = { 6902 static const unsigned char off[] = {
6906 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 6903 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
6907 }; 6904 };
6908 DECLARE_MAC_BUF(mac);
6909 6905
6910 // sanity checks 6906 // sanity checks
6911 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) 6907 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
@@ -6931,8 +6927,7 @@ static int ipw2100_wx_set_wap(struct net_device *dev,
6931 6927
6932 err = ipw2100_set_mandatory_bssid(priv, wrqu->ap_addr.sa_data, 0); 6928 err = ipw2100_set_mandatory_bssid(priv, wrqu->ap_addr.sa_data, 0);
6933 6929
6934 IPW_DEBUG_WX("SET BSSID -> %s\n", 6930 IPW_DEBUG_WX("SET BSSID -> %pM\n", wrqu->ap_addr.sa_data);
6935 print_mac(mac, wrqu->ap_addr.sa_data));
6936 6931
6937 done: 6932 done:
6938 mutex_unlock(&priv->action_mutex); 6933 mutex_unlock(&priv->action_mutex);
@@ -6948,7 +6943,6 @@ static int ipw2100_wx_get_wap(struct net_device *dev,
6948 */ 6943 */
6949 6944
6950 struct ipw2100_priv *priv = ieee80211_priv(dev); 6945 struct ipw2100_priv *priv = ieee80211_priv(dev);
6951 DECLARE_MAC_BUF(mac);
6952 6946
6953 /* If we are associated, trying to associate, or have a statically 6947 /* If we are associated, trying to associate, or have a statically
6954 * configured BSSID then return that; otherwise return ANY */ 6948 * configured BSSID then return that; otherwise return ANY */
@@ -6958,8 +6952,7 @@ static int ipw2100_wx_get_wap(struct net_device *dev,
6958 } else 6952 } else
6959 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); 6953 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
6960 6954
6961 IPW_DEBUG_WX("Getting WAP BSSID: %s\n", 6955 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data);
6962 print_mac(mac, wrqu->ap_addr.sa_data));
6963 return 0; 6956 return 0;
6964} 6957}
6965 6958
@@ -6971,6 +6964,7 @@ static int ipw2100_wx_set_essid(struct net_device *dev,
6971 char *essid = ""; /* ANY */ 6964 char *essid = ""; /* ANY */
6972 int length = 0; 6965 int length = 0;
6973 int err = 0; 6966 int err = 0;
6967 DECLARE_SSID_BUF(ssid);
6974 6968
6975 mutex_lock(&priv->action_mutex); 6969 mutex_lock(&priv->action_mutex);
6976 if (!(priv->status & STATUS_INITIALIZED)) { 6970 if (!(priv->status & STATUS_INITIALIZED)) {
@@ -7000,8 +6994,8 @@ static int ipw2100_wx_set_essid(struct net_device *dev,
7000 goto done; 6994 goto done;
7001 } 6995 }
7002 6996
7003 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length), 6997 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
7004 length); 6998 print_ssid(ssid, essid, length), length);
7005 6999
7006 priv->essid_len = length; 7000 priv->essid_len = length;
7007 memcpy(priv->essid, essid, priv->essid_len); 7001 memcpy(priv->essid, essid, priv->essid_len);
@@ -7022,12 +7016,13 @@ static int ipw2100_wx_get_essid(struct net_device *dev,
7022 */ 7016 */
7023 7017
7024 struct ipw2100_priv *priv = ieee80211_priv(dev); 7018 struct ipw2100_priv *priv = ieee80211_priv(dev);
7019 DECLARE_SSID_BUF(ssid);
7025 7020
7026 /* If we are associated, trying to associate, or have a statically 7021 /* If we are associated, trying to associate, or have a statically
7027 * configured ESSID then return that; otherwise return ANY */ 7022 * configured ESSID then return that; otherwise return ANY */
7028 if (priv->config & CFG_STATIC_ESSID || priv->status & STATUS_ASSOCIATED) { 7023 if (priv->config & CFG_STATIC_ESSID || priv->status & STATUS_ASSOCIATED) {
7029 IPW_DEBUG_WX("Getting essid: '%s'\n", 7024 IPW_DEBUG_WX("Getting essid: '%s'\n",
7030 escape_essid(priv->essid, priv->essid_len)); 7025 print_ssid(ssid, priv->essid, priv->essid_len));
7031 memcpy(extra, priv->essid, priv->essid_len); 7026 memcpy(extra, priv->essid, priv->essid_len);
7032 wrqu->essid.length = priv->essid_len; 7027 wrqu->essid.length = priv->essid_len;
7033 wrqu->essid.flags = 1; /* active */ 7028 wrqu->essid.flags = 1; /* active */
@@ -7625,7 +7620,7 @@ static int ipw2100_wx_set_auth(struct net_device *dev,
7625 struct ipw2100_priv *priv = ieee80211_priv(dev); 7620 struct ipw2100_priv *priv = ieee80211_priv(dev);
7626 struct ieee80211_device *ieee = priv->ieee; 7621 struct ieee80211_device *ieee = priv->ieee;
7627 struct iw_param *param = &wrqu->param; 7622 struct iw_param *param = &wrqu->param;
7628 struct ieee80211_crypt_data *crypt; 7623 struct lib80211_crypt_data *crypt;
7629 unsigned long flags; 7624 unsigned long flags;
7630 int ret = 0; 7625 int ret = 0;
7631 7626
@@ -7640,7 +7635,7 @@ static int ipw2100_wx_set_auth(struct net_device *dev,
7640 break; 7635 break;
7641 7636
7642 case IW_AUTH_TKIP_COUNTERMEASURES: 7637 case IW_AUTH_TKIP_COUNTERMEASURES:
7643 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx]; 7638 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
7644 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags) 7639 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
7645 break; 7640 break;
7646 7641
@@ -7717,7 +7712,7 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
7717{ 7712{
7718 struct ipw2100_priv *priv = ieee80211_priv(dev); 7713 struct ipw2100_priv *priv = ieee80211_priv(dev);
7719 struct ieee80211_device *ieee = priv->ieee; 7714 struct ieee80211_device *ieee = priv->ieee;
7720 struct ieee80211_crypt_data *crypt; 7715 struct lib80211_crypt_data *crypt;
7721 struct iw_param *param = &wrqu->param; 7716 struct iw_param *param = &wrqu->param;
7722 int ret = 0; 7717 int ret = 0;
7723 7718
@@ -7733,7 +7728,7 @@ static int ipw2100_wx_get_auth(struct net_device *dev,
7733 break; 7728 break;
7734 7729
7735 case IW_AUTH_TKIP_COUNTERMEASURES: 7730 case IW_AUTH_TKIP_COUNTERMEASURES:
7736 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx]; 7731 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
7737 if (!crypt || !crypt->ops->get_flags) { 7732 if (!crypt || !crypt->ops->get_flags) {
7738 IPW_DEBUG_WARNING("Can't get TKIP countermeasures: " 7733 IPW_DEBUG_WARNING("Can't get TKIP countermeasures: "
7739 "crypt not set!\n"); 7734 "crypt not set!\n");
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index bbf1ddcafba8..bbf1ddcafba8 100644
--- a/drivers/net/wireless/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 7a9f901d4ff6..625f2cf99fa9 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -87,7 +87,7 @@ static int channel = 0;
87static int mode = 0; 87static int mode = 0;
88 88
89static u32 ipw_debug_level; 89static u32 ipw_debug_level;
90static int associate = 1; 90static int associate;
91static int auto_create = 1; 91static int auto_create = 1;
92static int led = 0; 92static int led = 0;
93static int disable = 0; 93static int disable = 0;
@@ -2265,8 +2265,8 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2265 return -1; 2265 return -1;
2266 } 2266 }
2267 2267
2268 IPW_DEBUG_INFO("%s: Setting MAC to %s\n", 2268 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2269 priv->net_dev->name, print_mac(mac, mac)); 2269 priv->net_dev->name, mac);
2270 2270
2271 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); 2271 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2272} 2272}
@@ -3812,7 +3812,6 @@ static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3812{ 3812{
3813 struct ipw_station_entry entry; 3813 struct ipw_station_entry entry;
3814 int i; 3814 int i;
3815 DECLARE_MAC_BUF(mac);
3816 3815
3817 for (i = 0; i < priv->num_stations; i++) { 3816 for (i = 0; i < priv->num_stations; i++) {
3818 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) { 3817 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
@@ -3829,7 +3828,7 @@ static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3829 if (i == MAX_STATIONS) 3828 if (i == MAX_STATIONS)
3830 return IPW_INVALID_STATION; 3829 return IPW_INVALID_STATION;
3831 3830
3832 IPW_DEBUG_SCAN("Adding AdHoc station: %s\n", print_mac(mac, bssid)); 3831 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3833 3832
3834 entry.reserved = 0; 3833 entry.reserved = 0;
3835 entry.support_mode = 0; 3834 entry.support_mode = 0;
@@ -3856,7 +3855,6 @@ static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3856static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) 3855static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3857{ 3856{
3858 int err; 3857 int err;
3859 DECLARE_MAC_BUF(mac);
3860 3858
3861 if (priv->status & STATUS_ASSOCIATING) { 3859 if (priv->status & STATUS_ASSOCIATING) {
3862 IPW_DEBUG_ASSOC("Disassociating while associating.\n"); 3860 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
@@ -3869,9 +3867,9 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3869 return; 3867 return;
3870 } 3868 }
3871 3869
3872 IPW_DEBUG_ASSOC("Disassocation attempt from %s " 3870 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3873 "on channel %d.\n", 3871 "on channel %d.\n",
3874 print_mac(mac, priv->assoc_request.bssid), 3872 priv->assoc_request.bssid,
3875 priv->assoc_request.channel); 3873 priv->assoc_request.channel);
3876 3874
3877 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED); 3875 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
@@ -4347,7 +4345,8 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4347 return; 4345 return;
4348 } 4346 }
4349 4347
4350 if (priv->status & STATUS_SCANNING) { 4348 if (priv->status & STATUS_SCANNING &&
4349 missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
4351 /* Stop scan to keep fw from getting 4350 /* Stop scan to keep fw from getting
4352 * stuck (only if we aren't roaming -- 4351 * stuck (only if we aren't roaming --
4353 * otherwise we'll never scan more than 2 or 3 4352 * otherwise we'll never scan more than 2 or 3
@@ -4398,7 +4397,7 @@ static void handle_scan_event(struct ipw_priv *priv)
4398static void ipw_rx_notification(struct ipw_priv *priv, 4397static void ipw_rx_notification(struct ipw_priv *priv,
4399 struct ipw_rx_notification *notif) 4398 struct ipw_rx_notification *notif)
4400{ 4399{
4401 DECLARE_MAC_BUF(mac); 4400 DECLARE_SSID_BUF(ssid);
4402 u16 size = le16_to_cpu(notif->size); 4401 u16 size = le16_to_cpu(notif->size);
4403 notif->size = le16_to_cpu(notif->size); 4402 notif->size = le16_to_cpu(notif->size);
4404 4403
@@ -4412,11 +4411,10 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4412 case CMAS_ASSOCIATED:{ 4411 case CMAS_ASSOCIATED:{
4413 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4412 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4414 IPW_DL_ASSOC, 4413 IPW_DL_ASSOC,
4415 "associated: '%s' %s" 4414 "associated: '%s' %pM \n",
4416 " \n", 4415 print_ssid(ssid, priv->essid,
4417 escape_essid(priv->essid, 4416 priv->essid_len),
4418 priv->essid_len), 4417 priv->bssid);
4419 print_mac(mac, priv->bssid));
4420 4418
4421 switch (priv->ieee->iw_mode) { 4419 switch (priv->ieee->iw_mode) {
4422 case IW_MODE_INFRA: 4420 case IW_MODE_INFRA:
@@ -4450,7 +4448,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4450 4448
4451#ifdef CONFIG_IPW2200_QOS 4449#ifdef CONFIG_IPW2200_QOS
4452#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ 4450#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4453 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_ctl)) 4451 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4454 if ((priv->status & STATUS_AUTH) && 4452 if ((priv->status & STATUS_AUTH) &&
4455 (IPW_GET_PACKET_STYPE(&notif->u.raw) 4453 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4456 == IEEE80211_STYPE_ASSOC_RESP)) { 4454 == IEEE80211_STYPE_ASSOC_RESP)) {
@@ -4493,13 +4491,14 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4493 IPW_DL_STATE | 4491 IPW_DL_STATE |
4494 IPW_DL_ASSOC, 4492 IPW_DL_ASSOC,
4495 "deauthenticated: '%s' " 4493 "deauthenticated: '%s' "
4496 "%s" 4494 "%pM"
4497 ": (0x%04X) - %s \n", 4495 ": (0x%04X) - %s \n",
4498 escape_essid(priv-> 4496 print_ssid(ssid,
4499 essid, 4497 priv->
4500 priv-> 4498 essid,
4501 essid_len), 4499 priv->
4502 print_mac(mac, priv->bssid), 4500 essid_len),
4501 priv->bssid,
4503 le16_to_cpu(auth->status), 4502 le16_to_cpu(auth->status),
4504 ipw_get_status_code 4503 ipw_get_status_code
4505 (le16_to_cpu 4504 (le16_to_cpu
@@ -4516,11 +4515,10 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4516 4515
4517 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4516 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4518 IPW_DL_ASSOC, 4517 IPW_DL_ASSOC,
4519 "authenticated: '%s' %s" 4518 "authenticated: '%s' %pM\n",
4520 "\n", 4519 print_ssid(ssid, priv->essid,
4521 escape_essid(priv->essid, 4520 priv->essid_len),
4522 priv->essid_len), 4521 priv->bssid);
4523 print_mac(mac, priv->bssid));
4524 break; 4522 break;
4525 } 4523 }
4526 4524
@@ -4545,11 +4543,10 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4545 4543
4546 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4544 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4547 IPW_DL_ASSOC, 4545 IPW_DL_ASSOC,
4548 "disassociated: '%s' %s" 4546 "disassociated: '%s' %pM \n",
4549 " \n", 4547 print_ssid(ssid, priv->essid,
4550 escape_essid(priv->essid, 4548 priv->essid_len),
4551 priv->essid_len), 4549 priv->bssid);
4552 print_mac(mac, priv->bssid));
4553 4550
4554 priv->status &= 4551 priv->status &=
4555 ~(STATUS_DISASSOCIATING | 4552 ~(STATUS_DISASSOCIATING |
@@ -4584,10 +4581,10 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4584 switch (auth->state) { 4581 switch (auth->state) {
4585 case CMAS_AUTHENTICATED: 4582 case CMAS_AUTHENTICATED:
4586 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4583 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4587 "authenticated: '%s' %s \n", 4584 "authenticated: '%s' %pM \n",
4588 escape_essid(priv->essid, 4585 print_ssid(ssid, priv->essid,
4589 priv->essid_len), 4586 priv->essid_len),
4590 print_mac(mac, priv->bssid)); 4587 priv->bssid);
4591 priv->status |= STATUS_AUTH; 4588 priv->status |= STATUS_AUTH;
4592 break; 4589 break;
4593 4590
@@ -4603,10 +4600,10 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4603 } 4600 }
4604 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4601 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4605 IPW_DL_ASSOC, 4602 IPW_DL_ASSOC,
4606 "deauthenticated: '%s' %s\n", 4603 "deauthenticated: '%s' %pM\n",
4607 escape_essid(priv->essid, 4604 print_ssid(ssid, priv->essid,
4608 priv->essid_len), 4605 priv->essid_len),
4609 print_mac(mac, priv->bssid)); 4606 priv->bssid);
4610 4607
4611 priv->status &= ~(STATUS_ASSOCIATING | 4608 priv->status &= ~(STATUS_ASSOCIATING |
4612 STATUS_AUTH | 4609 STATUS_AUTH |
@@ -5430,27 +5427,17 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
5430 int roaming) 5427 int roaming)
5431{ 5428{
5432 struct ipw_supported_rates rates; 5429 struct ipw_supported_rates rates;
5433 DECLARE_MAC_BUF(mac); 5430 DECLARE_SSID_BUF(ssid);
5434 DECLARE_MAC_BUF(mac2);
5435 5431
5436 /* Verify that this network's capability is compatible with the 5432 /* Verify that this network's capability is compatible with the
5437 * current mode (AdHoc or Infrastructure) */ 5433 * current mode (AdHoc or Infrastructure) */
5438 if ((priv->ieee->iw_mode == IW_MODE_ADHOC && 5434 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5439 !(network->capability & WLAN_CAPABILITY_IBSS))) { 5435 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5440 IPW_DEBUG_MERGE("Network '%s (%s)' excluded due to " 5436 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5441 "capability mismatch.\n", 5437 "capability mismatch.\n",
5442 escape_essid(network->ssid, network->ssid_len), 5438 print_ssid(ssid, network->ssid,
5443 print_mac(mac, network->bssid)); 5439 network->ssid_len),
5444 return 0; 5440 network->bssid);
5445 }
5446
5447 /* If we do not have an ESSID for this AP, we can not associate with
5448 * it */
5449 if (network->flags & NETWORK_EMPTY_ESSID) {
5450 IPW_DEBUG_MERGE("Network '%s (%s)' excluded "
5451 "because of hidden ESSID.\n",
5452 escape_essid(network->ssid, network->ssid_len),
5453 print_mac(mac, network->bssid));
5454 return 0; 5441 return 0;
5455 } 5442 }
5456 5443
@@ -5460,11 +5447,11 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
5460 if ((network->ssid_len != match->network->ssid_len) || 5447 if ((network->ssid_len != match->network->ssid_len) ||
5461 memcmp(network->ssid, match->network->ssid, 5448 memcmp(network->ssid, match->network->ssid,
5462 network->ssid_len)) { 5449 network->ssid_len)) {
5463 IPW_DEBUG_MERGE("Network '%s (%s)' excluded " 5450 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5464 "because of non-network ESSID.\n", 5451 "because of non-network ESSID.\n",
5465 escape_essid(network->ssid, 5452 print_ssid(ssid, network->ssid,
5466 network->ssid_len), 5453 network->ssid_len),
5467 print_mac(mac, network->bssid)); 5454 network->bssid);
5468 return 0; 5455 return 0;
5469 } 5456 }
5470 } else { 5457 } else {
@@ -5477,13 +5464,14 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
5477 char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 5464 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5478 5465
5479 strncpy(escaped, 5466 strncpy(escaped,
5480 escape_essid(network->ssid, network->ssid_len), 5467 print_ssid(ssid, network->ssid,
5468 network->ssid_len),
5481 sizeof(escaped)); 5469 sizeof(escaped));
5482 IPW_DEBUG_MERGE("Network '%s (%s)' excluded " 5470 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5483 "because of ESSID mismatch: '%s'.\n", 5471 "because of ESSID mismatch: '%s'.\n",
5484 escaped, print_mac(mac, network->bssid), 5472 escaped, network->bssid,
5485 escape_essid(priv->essid, 5473 print_ssid(ssid, priv->essid,
5486 priv->essid_len)); 5474 priv->essid_len));
5487 return 0; 5475 return 0;
5488 } 5476 }
5489 } 5477 }
@@ -5494,24 +5482,25 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
5494 if (network->time_stamp[0] < match->network->time_stamp[0]) { 5482 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5495 IPW_DEBUG_MERGE("Network '%s excluded because newer than " 5483 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5496 "current network.\n", 5484 "current network.\n",
5497 escape_essid(match->network->ssid, 5485 print_ssid(ssid, match->network->ssid,
5498 match->network->ssid_len)); 5486 match->network->ssid_len));
5499 return 0; 5487 return 0;
5500 } else if (network->time_stamp[1] < match->network->time_stamp[1]) { 5488 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5501 IPW_DEBUG_MERGE("Network '%s excluded because newer than " 5489 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5502 "current network.\n", 5490 "current network.\n",
5503 escape_essid(match->network->ssid, 5491 print_ssid(ssid, match->network->ssid,
5504 match->network->ssid_len)); 5492 match->network->ssid_len));
5505 return 0; 5493 return 0;
5506 } 5494 }
5507 5495
5508 /* Now go through and see if the requested network is valid... */ 5496 /* Now go through and see if the requested network is valid... */
5509 if (priv->ieee->scan_age != 0 && 5497 if (priv->ieee->scan_age != 0 &&
5510 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5498 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5511 IPW_DEBUG_MERGE("Network '%s (%s)' excluded " 5499 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5512 "because of age: %ums.\n", 5500 "because of age: %ums.\n",
5513 escape_essid(network->ssid, network->ssid_len), 5501 print_ssid(ssid, network->ssid,
5514 print_mac(mac, network->bssid), 5502 network->ssid_len),
5503 network->bssid,
5515 jiffies_to_msecs(jiffies - 5504 jiffies_to_msecs(jiffies -
5516 network->last_scanned)); 5505 network->last_scanned));
5517 return 0; 5506 return 0;
@@ -5519,10 +5508,11 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
5519 5508
5520 if ((priv->config & CFG_STATIC_CHANNEL) && 5509 if ((priv->config & CFG_STATIC_CHANNEL) &&
5521 (network->channel != priv->channel)) { 5510 (network->channel != priv->channel)) {
5522 IPW_DEBUG_MERGE("Network '%s (%s)' excluded " 5511 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5523 "because of channel mismatch: %d != %d.\n", 5512 "because of channel mismatch: %d != %d.\n",
5524 escape_essid(network->ssid, network->ssid_len), 5513 print_ssid(ssid, network->ssid,
5525 print_mac(mac, network->bssid), 5514 network->ssid_len),
5515 network->bssid,
5526 network->channel, priv->channel); 5516 network->channel, priv->channel);
5527 return 0; 5517 return 0;
5528 } 5518 }
@@ -5530,10 +5520,11 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
5530 /* Verify privacy compatability */ 5520 /* Verify privacy compatability */
5531 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != 5521 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5532 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { 5522 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5533 IPW_DEBUG_MERGE("Network '%s (%s)' excluded " 5523 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5534 "because of privacy mismatch: %s != %s.\n", 5524 "because of privacy mismatch: %s != %s.\n",
5535 escape_essid(network->ssid, network->ssid_len), 5525 print_ssid(ssid, network->ssid,
5536 print_mac(mac, network->bssid), 5526 network->ssid_len),
5527 network->bssid,
5537 priv-> 5528 priv->
5538 capability & CAP_PRIVACY_ON ? "on" : "off", 5529 capability & CAP_PRIVACY_ON ? "on" : "off",
5539 network-> 5530 network->
@@ -5543,41 +5534,44 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
5543 } 5534 }
5544 5535
5545 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) { 5536 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5546 IPW_DEBUG_MERGE("Network '%s (%s)' excluded " 5537 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5547 "because of the same BSSID match: %s" 5538 "because of the same BSSID match: %pM"
5548 ".\n", escape_essid(network->ssid, 5539 ".\n", print_ssid(ssid, network->ssid,
5549 network->ssid_len), 5540 network->ssid_len),
5550 print_mac(mac, network->bssid), 5541 network->bssid,
5551 print_mac(mac2, priv->bssid)); 5542 priv->bssid);
5552 return 0; 5543 return 0;
5553 } 5544 }
5554 5545
5555 /* Filter out any incompatible freq / mode combinations */ 5546 /* Filter out any incompatible freq / mode combinations */
5556 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) { 5547 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5557 IPW_DEBUG_MERGE("Network '%s (%s)' excluded " 5548 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5558 "because of invalid frequency/mode " 5549 "because of invalid frequency/mode "
5559 "combination.\n", 5550 "combination.\n",
5560 escape_essid(network->ssid, network->ssid_len), 5551 print_ssid(ssid, network->ssid,
5561 print_mac(mac, network->bssid)); 5552 network->ssid_len),
5553 network->bssid);
5562 return 0; 5554 return 0;
5563 } 5555 }
5564 5556
5565 /* Ensure that the rates supported by the driver are compatible with 5557 /* Ensure that the rates supported by the driver are compatible with
5566 * this AP, including verification of basic rates (mandatory) */ 5558 * this AP, including verification of basic rates (mandatory) */
5567 if (!ipw_compatible_rates(priv, network, &rates)) { 5559 if (!ipw_compatible_rates(priv, network, &rates)) {
5568 IPW_DEBUG_MERGE("Network '%s (%s)' excluded " 5560 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5569 "because configured rate mask excludes " 5561 "because configured rate mask excludes "
5570 "AP mandatory rate.\n", 5562 "AP mandatory rate.\n",
5571 escape_essid(network->ssid, network->ssid_len), 5563 print_ssid(ssid, network->ssid,
5572 print_mac(mac, network->bssid)); 5564 network->ssid_len),
5565 network->bssid);
5573 return 0; 5566 return 0;
5574 } 5567 }
5575 5568
5576 if (rates.num_rates == 0) { 5569 if (rates.num_rates == 0) {
5577 IPW_DEBUG_MERGE("Network '%s (%s)' excluded " 5570 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5578 "because of no compatible rates.\n", 5571 "because of no compatible rates.\n",
5579 escape_essid(network->ssid, network->ssid_len), 5572 print_ssid(ssid, network->ssid,
5580 print_mac(mac, network->bssid)); 5573 network->ssid_len),
5574 network->bssid);
5581 return 0; 5575 return 0;
5582 } 5576 }
5583 5577
@@ -5588,15 +5582,16 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
5588 /* Set up 'new' AP to this network */ 5582 /* Set up 'new' AP to this network */
5589 ipw_copy_rates(&match->rates, &rates); 5583 ipw_copy_rates(&match->rates, &rates);
5590 match->network = network; 5584 match->network = network;
5591 IPW_DEBUG_MERGE("Network '%s (%s)' is a viable match.\n", 5585 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5592 escape_essid(network->ssid, network->ssid_len), 5586 print_ssid(ssid, network->ssid, network->ssid_len),
5593 print_mac(mac, network->bssid)); 5587 network->bssid);
5594 5588
5595 return 1; 5589 return 1;
5596} 5590}
5597 5591
5598static void ipw_merge_adhoc_network(struct work_struct *work) 5592static void ipw_merge_adhoc_network(struct work_struct *work)
5599{ 5593{
5594 DECLARE_SSID_BUF(ssid);
5600 struct ipw_priv *priv = 5595 struct ipw_priv *priv =
5601 container_of(work, struct ipw_priv, merge_networks); 5596 container_of(work, struct ipw_priv, merge_networks);
5602 struct ieee80211_network *network = NULL; 5597 struct ieee80211_network *network = NULL;
@@ -5627,8 +5622,8 @@ static void ipw_merge_adhoc_network(struct work_struct *work)
5627 mutex_lock(&priv->mutex); 5622 mutex_lock(&priv->mutex);
5628 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) { 5623 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5629 IPW_DEBUG_MERGE("remove network %s\n", 5624 IPW_DEBUG_MERGE("remove network %s\n",
5630 escape_essid(priv->essid, 5625 print_ssid(ssid, priv->essid,
5631 priv->essid_len)); 5626 priv->essid_len));
5632 ipw_remove_current_network(priv); 5627 ipw_remove_current_network(priv);
5633 } 5628 }
5634 5629
@@ -5644,7 +5639,7 @@ static int ipw_best_network(struct ipw_priv *priv,
5644 struct ieee80211_network *network, int roaming) 5639 struct ieee80211_network *network, int roaming)
5645{ 5640{
5646 struct ipw_supported_rates rates; 5641 struct ipw_supported_rates rates;
5647 DECLARE_MAC_BUF(mac); 5642 DECLARE_SSID_BUF(ssid);
5648 5643
5649 /* Verify that this network's capability is compatible with the 5644 /* Verify that this network's capability is compatible with the
5650 * current mode (AdHoc or Infrastructure) */ 5645 * current mode (AdHoc or Infrastructure) */
@@ -5652,20 +5647,11 @@ static int ipw_best_network(struct ipw_priv *priv,
5652 !(network->capability & WLAN_CAPABILITY_ESS)) || 5647 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5653 (priv->ieee->iw_mode == IW_MODE_ADHOC && 5648 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5654 !(network->capability & WLAN_CAPABILITY_IBSS))) { 5649 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5655 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded due to " 5650 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5656 "capability mismatch.\n", 5651 "capability mismatch.\n",
5657 escape_essid(network->ssid, network->ssid_len), 5652 print_ssid(ssid, network->ssid,
5658 print_mac(mac, network->bssid)); 5653 network->ssid_len),
5659 return 0; 5654 network->bssid);
5660 }
5661
5662 /* If we do not have an ESSID for this AP, we can not associate with
5663 * it */
5664 if (network->flags & NETWORK_EMPTY_ESSID) {
5665 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded "
5666 "because of hidden ESSID.\n",
5667 escape_essid(network->ssid, network->ssid_len),
5668 print_mac(mac, network->bssid));
5669 return 0; 5655 return 0;
5670 } 5656 }
5671 5657
@@ -5675,11 +5661,11 @@ static int ipw_best_network(struct ipw_priv *priv,
5675 if ((network->ssid_len != match->network->ssid_len) || 5661 if ((network->ssid_len != match->network->ssid_len) ||
5676 memcmp(network->ssid, match->network->ssid, 5662 memcmp(network->ssid, match->network->ssid,
5677 network->ssid_len)) { 5663 network->ssid_len)) {
5678 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " 5664 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5679 "because of non-network ESSID.\n", 5665 "because of non-network ESSID.\n",
5680 escape_essid(network->ssid, 5666 print_ssid(ssid, network->ssid,
5681 network->ssid_len), 5667 network->ssid_len),
5682 print_mac(mac, network->bssid)); 5668 network->bssid);
5683 return 0; 5669 return 0;
5684 } 5670 }
5685 } else { 5671 } else {
@@ -5691,13 +5677,14 @@ static int ipw_best_network(struct ipw_priv *priv,
5691 min(network->ssid_len, priv->essid_len)))) { 5677 min(network->ssid_len, priv->essid_len)))) {
5692 char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 5678 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5693 strncpy(escaped, 5679 strncpy(escaped,
5694 escape_essid(network->ssid, network->ssid_len), 5680 print_ssid(ssid, network->ssid,
5681 network->ssid_len),
5695 sizeof(escaped)); 5682 sizeof(escaped));
5696 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " 5683 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5697 "because of ESSID mismatch: '%s'.\n", 5684 "because of ESSID mismatch: '%s'.\n",
5698 escaped, print_mac(mac, network->bssid), 5685 escaped, network->bssid,
5699 escape_essid(priv->essid, 5686 print_ssid(ssid, priv->essid,
5700 priv->essid_len)); 5687 priv->essid_len));
5701 return 0; 5688 return 0;
5702 } 5689 }
5703 } 5690 }
@@ -5707,14 +5694,14 @@ static int ipw_best_network(struct ipw_priv *priv,
5707 if (match->network && match->network->stats.rssi > network->stats.rssi) { 5694 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5708 char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; 5695 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5709 strncpy(escaped, 5696 strncpy(escaped,
5710 escape_essid(network->ssid, network->ssid_len), 5697 print_ssid(ssid, network->ssid, network->ssid_len),
5711 sizeof(escaped)); 5698 sizeof(escaped));
5712 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded because " 5699 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5713 "'%s (%s)' has a stronger signal.\n", 5700 "'%s (%pM)' has a stronger signal.\n",
5714 escaped, print_mac(mac, network->bssid), 5701 escaped, network->bssid,
5715 escape_essid(match->network->ssid, 5702 print_ssid(ssid, match->network->ssid,
5716 match->network->ssid_len), 5703 match->network->ssid_len),
5717 print_mac(mac, match->network->bssid)); 5704 match->network->bssid);
5718 return 0; 5705 return 0;
5719 } 5706 }
5720 5707
@@ -5722,11 +5709,12 @@ static int ipw_best_network(struct ipw_priv *priv,
5722 * last 3 seconds, do not try and associate again... */ 5709 * last 3 seconds, do not try and associate again... */
5723 if (network->last_associate && 5710 if (network->last_associate &&
5724 time_after(network->last_associate + (HZ * 3UL), jiffies)) { 5711 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5725 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " 5712 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5726 "because of storming (%ums since last " 5713 "because of storming (%ums since last "
5727 "assoc attempt).\n", 5714 "assoc attempt).\n",
5728 escape_essid(network->ssid, network->ssid_len), 5715 print_ssid(ssid, network->ssid,
5729 print_mac(mac, network->bssid), 5716 network->ssid_len),
5717 network->bssid,
5730 jiffies_to_msecs(jiffies - 5718 jiffies_to_msecs(jiffies -
5731 network->last_associate)); 5719 network->last_associate));
5732 return 0; 5720 return 0;
@@ -5735,10 +5723,11 @@ static int ipw_best_network(struct ipw_priv *priv,
5735 /* Now go through and see if the requested network is valid... */ 5723 /* Now go through and see if the requested network is valid... */
5736 if (priv->ieee->scan_age != 0 && 5724 if (priv->ieee->scan_age != 0 &&
5737 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) { 5725 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5738 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " 5726 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5739 "because of age: %ums.\n", 5727 "because of age: %ums.\n",
5740 escape_essid(network->ssid, network->ssid_len), 5728 print_ssid(ssid, network->ssid,
5741 print_mac(mac, network->bssid), 5729 network->ssid_len),
5730 network->bssid,
5742 jiffies_to_msecs(jiffies - 5731 jiffies_to_msecs(jiffies -
5743 network->last_scanned)); 5732 network->last_scanned));
5744 return 0; 5733 return 0;
@@ -5746,10 +5735,11 @@ static int ipw_best_network(struct ipw_priv *priv,
5746 5735
5747 if ((priv->config & CFG_STATIC_CHANNEL) && 5736 if ((priv->config & CFG_STATIC_CHANNEL) &&
5748 (network->channel != priv->channel)) { 5737 (network->channel != priv->channel)) {
5749 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " 5738 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5750 "because of channel mismatch: %d != %d.\n", 5739 "because of channel mismatch: %d != %d.\n",
5751 escape_essid(network->ssid, network->ssid_len), 5740 print_ssid(ssid, network->ssid,
5752 print_mac(mac, network->bssid), 5741 network->ssid_len),
5742 network->bssid,
5753 network->channel, priv->channel); 5743 network->channel, priv->channel);
5754 return 0; 5744 return 0;
5755 } 5745 }
@@ -5757,10 +5747,11 @@ static int ipw_best_network(struct ipw_priv *priv,
5757 /* Verify privacy compatability */ 5747 /* Verify privacy compatability */
5758 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) != 5748 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5759 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) { 5749 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5760 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " 5750 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5761 "because of privacy mismatch: %s != %s.\n", 5751 "because of privacy mismatch: %s != %s.\n",
5762 escape_essid(network->ssid, network->ssid_len), 5752 print_ssid(ssid, network->ssid,
5763 print_mac(mac, network->bssid), 5753 network->ssid_len),
5754 network->bssid,
5764 priv->capability & CAP_PRIVACY_ON ? "on" : 5755 priv->capability & CAP_PRIVACY_ON ? "on" :
5765 "off", 5756 "off",
5766 network->capability & 5757 network->capability &
@@ -5770,48 +5761,53 @@ static int ipw_best_network(struct ipw_priv *priv,
5770 5761
5771 if ((priv->config & CFG_STATIC_BSSID) && 5762 if ((priv->config & CFG_STATIC_BSSID) &&
5772 memcmp(network->bssid, priv->bssid, ETH_ALEN)) { 5763 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5773 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " 5764 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5774 "because of BSSID mismatch: %s.\n", 5765 "because of BSSID mismatch: %pM.\n",
5775 escape_essid(network->ssid, network->ssid_len), 5766 print_ssid(ssid, network->ssid,
5776 print_mac(mac, network->bssid), print_mac(mac, priv->bssid)); 5767 network->ssid_len),
5768 network->bssid, priv->bssid);
5777 return 0; 5769 return 0;
5778 } 5770 }
5779 5771
5780 /* Filter out any incompatible freq / mode combinations */ 5772 /* Filter out any incompatible freq / mode combinations */
5781 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) { 5773 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5782 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " 5774 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5783 "because of invalid frequency/mode " 5775 "because of invalid frequency/mode "
5784 "combination.\n", 5776 "combination.\n",
5785 escape_essid(network->ssid, network->ssid_len), 5777 print_ssid(ssid, network->ssid,
5786 print_mac(mac, network->bssid)); 5778 network->ssid_len),
5779 network->bssid);
5787 return 0; 5780 return 0;
5788 } 5781 }
5789 5782
5790 /* Filter out invalid channel in current GEO */ 5783 /* Filter out invalid channel in current GEO */
5791 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) { 5784 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5792 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " 5785 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5793 "because of invalid channel in current GEO\n", 5786 "because of invalid channel in current GEO\n",
5794 escape_essid(network->ssid, network->ssid_len), 5787 print_ssid(ssid, network->ssid,
5795 print_mac(mac, network->bssid)); 5788 network->ssid_len),
5789 network->bssid);
5796 return 0; 5790 return 0;
5797 } 5791 }
5798 5792
5799 /* Ensure that the rates supported by the driver are compatible with 5793 /* Ensure that the rates supported by the driver are compatible with
5800 * this AP, including verification of basic rates (mandatory) */ 5794 * this AP, including verification of basic rates (mandatory) */
5801 if (!ipw_compatible_rates(priv, network, &rates)) { 5795 if (!ipw_compatible_rates(priv, network, &rates)) {
5802 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " 5796 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5803 "because configured rate mask excludes " 5797 "because configured rate mask excludes "
5804 "AP mandatory rate.\n", 5798 "AP mandatory rate.\n",
5805 escape_essid(network->ssid, network->ssid_len), 5799 print_ssid(ssid, network->ssid,
5806 print_mac(mac, network->bssid)); 5800 network->ssid_len),
5801 network->bssid);
5807 return 0; 5802 return 0;
5808 } 5803 }
5809 5804
5810 if (rates.num_rates == 0) { 5805 if (rates.num_rates == 0) {
5811 IPW_DEBUG_ASSOC("Network '%s (%s)' excluded " 5806 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5812 "because of no compatible rates.\n", 5807 "because of no compatible rates.\n",
5813 escape_essid(network->ssid, network->ssid_len), 5808 print_ssid(ssid, network->ssid,
5814 print_mac(mac, network->bssid)); 5809 network->ssid_len),
5810 network->bssid);
5815 return 0; 5811 return 0;
5816 } 5812 }
5817 5813
@@ -5823,9 +5819,9 @@ static int ipw_best_network(struct ipw_priv *priv,
5823 ipw_copy_rates(&match->rates, &rates); 5819 ipw_copy_rates(&match->rates, &rates);
5824 match->network = network; 5820 match->network = network;
5825 5821
5826 IPW_DEBUG_ASSOC("Network '%s (%s)' is a viable match.\n", 5822 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5827 escape_essid(network->ssid, network->ssid_len), 5823 print_ssid(ssid, network->ssid, network->ssid_len),
5828 print_mac(mac, network->bssid)); 5824 network->bssid);
5829 5825
5830 return 1; 5826 return 1;
5831} 5827}
@@ -6067,7 +6063,7 @@ static void ipw_bg_adhoc_check(struct work_struct *work)
6067 6063
6068static void ipw_debug_config(struct ipw_priv *priv) 6064static void ipw_debug_config(struct ipw_priv *priv)
6069{ 6065{
6070 DECLARE_MAC_BUF(mac); 6066 DECLARE_SSID_BUF(ssid);
6071 IPW_DEBUG_INFO("Scan completed, no valid APs matched " 6067 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6072 "[CFG 0x%08X]\n", priv->config); 6068 "[CFG 0x%08X]\n", priv->config);
6073 if (priv->config & CFG_STATIC_CHANNEL) 6069 if (priv->config & CFG_STATIC_CHANNEL)
@@ -6076,12 +6072,11 @@ static void ipw_debug_config(struct ipw_priv *priv)
6076 IPW_DEBUG_INFO("Channel unlocked.\n"); 6072 IPW_DEBUG_INFO("Channel unlocked.\n");
6077 if (priv->config & CFG_STATIC_ESSID) 6073 if (priv->config & CFG_STATIC_ESSID)
6078 IPW_DEBUG_INFO("ESSID locked to '%s'\n", 6074 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6079 escape_essid(priv->essid, priv->essid_len)); 6075 print_ssid(ssid, priv->essid, priv->essid_len));
6080 else 6076 else
6081 IPW_DEBUG_INFO("ESSID unlocked.\n"); 6077 IPW_DEBUG_INFO("ESSID unlocked.\n");
6082 if (priv->config & CFG_STATIC_BSSID) 6078 if (priv->config & CFG_STATIC_BSSID)
6083 IPW_DEBUG_INFO("BSSID locked to %s\n", 6079 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6084 print_mac(mac, priv->bssid));
6085 else 6080 else
6086 IPW_DEBUG_INFO("BSSID unlocked.\n"); 6081 IPW_DEBUG_INFO("BSSID unlocked.\n");
6087 if (priv->capability & CAP_PRIVACY_ON) 6082 if (priv->capability & CAP_PRIVACY_ON)
@@ -6277,6 +6272,20 @@ static void ipw_add_scan_channels(struct ipw_priv *priv,
6277 } 6272 }
6278} 6273}
6279 6274
6275static int ipw_passive_dwell_time(struct ipw_priv *priv)
6276{
6277 /* staying on passive channels longer than the DTIM interval during a
6278 * scan, while associated, causes the firmware to cancel the scan
6279 * without notification. Hence, don't stay on passive channels longer
6280 * than the beacon interval.
6281 */
6282 if (priv->status & STATUS_ASSOCIATED
6283 && priv->assoc_network->beacon_interval > 10)
6284 return priv->assoc_network->beacon_interval - 10;
6285 else
6286 return 120;
6287}
6288
6280static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct) 6289static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6281{ 6290{
6282 struct ipw_scan_request_ext scan; 6291 struct ipw_scan_request_ext scan;
@@ -6320,16 +6329,16 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6320 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee)); 6329 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6321 6330
6322 if (type == IW_SCAN_TYPE_PASSIVE) { 6331 if (type == IW_SCAN_TYPE_PASSIVE) {
6323 IPW_DEBUG_WX("use passive scanning\n"); 6332 IPW_DEBUG_WX("use passive scanning\n");
6324 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN; 6333 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6325 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6334 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6326 cpu_to_le16(120); 6335 cpu_to_le16(ipw_passive_dwell_time(priv));
6327 ipw_add_scan_channels(priv, &scan, scan_type); 6336 ipw_add_scan_channels(priv, &scan, scan_type);
6328 goto send_request; 6337 goto send_request;
6329 } 6338 }
6330 6339
6331 /* Use active scan by default. */ 6340 /* Use active scan by default. */
6332 if (priv->config & CFG_SPEED_SCAN) 6341 if (priv->config & CFG_SPEED_SCAN)
6333 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 6342 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6334 cpu_to_le16(30); 6343 cpu_to_le16(30);
6335 else 6344 else
@@ -6339,7 +6348,8 @@ static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6339 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 6348 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6340 cpu_to_le16(20); 6349 cpu_to_le16(20);
6341 6350
6342 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120); 6351 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6352 cpu_to_le16(ipw_passive_dwell_time(priv));
6343 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20); 6353 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6344 6354
6345#ifdef CONFIG_IPW2200_MONITOR 6355#ifdef CONFIG_IPW2200_MONITOR
@@ -6607,7 +6617,7 @@ static int ipw_wx_set_auth(struct net_device *dev,
6607 struct ipw_priv *priv = ieee80211_priv(dev); 6617 struct ipw_priv *priv = ieee80211_priv(dev);
6608 struct ieee80211_device *ieee = priv->ieee; 6618 struct ieee80211_device *ieee = priv->ieee;
6609 struct iw_param *param = &wrqu->param; 6619 struct iw_param *param = &wrqu->param;
6610 struct ieee80211_crypt_data *crypt; 6620 struct lib80211_crypt_data *crypt;
6611 unsigned long flags; 6621 unsigned long flags;
6612 int ret = 0; 6622 int ret = 0;
6613 6623
@@ -6629,7 +6639,7 @@ static int ipw_wx_set_auth(struct net_device *dev,
6629 break; 6639 break;
6630 6640
6631 case IW_AUTH_TKIP_COUNTERMEASURES: 6641 case IW_AUTH_TKIP_COUNTERMEASURES:
6632 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx]; 6642 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6633 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags) 6643 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6634 break; 6644 break;
6635 6645
@@ -6706,7 +6716,7 @@ static int ipw_wx_get_auth(struct net_device *dev,
6706{ 6716{
6707 struct ipw_priv *priv = ieee80211_priv(dev); 6717 struct ipw_priv *priv = ieee80211_priv(dev);
6708 struct ieee80211_device *ieee = priv->ieee; 6718 struct ieee80211_device *ieee = priv->ieee;
6709 struct ieee80211_crypt_data *crypt; 6719 struct lib80211_crypt_data *crypt;
6710 struct iw_param *param = &wrqu->param; 6720 struct iw_param *param = &wrqu->param;
6711 int ret = 0; 6721 int ret = 0;
6712 6722
@@ -6722,7 +6732,7 @@ static int ipw_wx_get_auth(struct net_device *dev,
6722 break; 6732 break;
6723 6733
6724 case IW_AUTH_TKIP_COUNTERMEASURES: 6734 case IW_AUTH_TKIP_COUNTERMEASURES:
6725 crypt = priv->ieee->crypt[priv->ieee->tx_keyidx]; 6735 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6726 if (!crypt || !crypt->ops->get_flags) 6736 if (!crypt || !crypt->ops->get_flags)
6727 break; 6737 break;
6728 6738
@@ -6893,8 +6903,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6893 if ((priv->status & STATUS_ASSOCIATED) && 6903 if ((priv->status & STATUS_ASSOCIATED) &&
6894 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) { 6904 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6895 if (memcmp(network->bssid, priv->bssid, ETH_ALEN)) 6905 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6896 if ((network->capability & WLAN_CAPABILITY_IBSS) && 6906 if (network->capability & WLAN_CAPABILITY_IBSS)
6897 !(network->flags & NETWORK_EMPTY_ESSID))
6898 if ((network->ssid_len == 6907 if ((network->ssid_len ==
6899 priv->assoc_network->ssid_len) && 6908 priv->assoc_network->ssid_len) &&
6900 !memcmp(network->ssid, 6909 !memcmp(network->ssid,
@@ -7296,7 +7305,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
7296 struct ipw_supported_rates *rates, int roaming) 7305 struct ipw_supported_rates *rates, int roaming)
7297{ 7306{
7298 int err; 7307 int err;
7299 DECLARE_MAC_BUF(mac); 7308 DECLARE_SSID_BUF(ssid);
7300 7309
7301 if (priv->config & CFG_FIXED_RATE) 7310 if (priv->config & CFG_FIXED_RATE)
7302 ipw_set_fixed_rate(priv, network->mode); 7311 ipw_set_fixed_rate(priv, network->mode);
@@ -7365,7 +7374,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
7365 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, " 7374 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7366 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n", 7375 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7367 roaming ? "Rea" : "A", 7376 roaming ? "Rea" : "A",
7368 escape_essid(priv->essid, priv->essid_len), 7377 print_ssid(ssid, priv->essid, priv->essid_len),
7369 network->channel, 7378 network->channel,
7370 ipw_modes[priv->assoc_request.ieee_mode], 7379 ipw_modes[priv->assoc_request.ieee_mode],
7371 rates->num_rates, 7380 rates->num_rates,
@@ -7464,9 +7473,9 @@ static int ipw_associate_network(struct ipw_priv *priv,
7464 return err; 7473 return err;
7465 } 7474 }
7466 7475
7467 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %s \n", 7476 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
7468 escape_essid(priv->essid, priv->essid_len), 7477 print_ssid(ssid, priv->essid, priv->essid_len),
7469 print_mac(mac, priv->bssid)); 7478 priv->bssid);
7470 7479
7471 return 0; 7480 return 0;
7472} 7481}
@@ -7556,6 +7565,7 @@ static int ipw_associate(void *data)
7556 struct ipw_supported_rates *rates; 7565 struct ipw_supported_rates *rates;
7557 struct list_head *element; 7566 struct list_head *element;
7558 unsigned long flags; 7567 unsigned long flags;
7568 DECLARE_SSID_BUF(ssid);
7559 7569
7560 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 7570 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7561 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n"); 7571 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
@@ -7582,8 +7592,7 @@ static int ipw_associate(void *data)
7582 } 7592 }
7583 7593
7584 if (!(priv->config & CFG_ASSOCIATE) && 7594 if (!(priv->config & CFG_ASSOCIATE) &&
7585 !(priv->config & (CFG_STATIC_ESSID | 7595 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7586 CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
7587 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n"); 7596 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7588 return 0; 7597 return 0;
7589 } 7598 }
@@ -7605,7 +7614,6 @@ static int ipw_associate(void *data)
7605 if (list_empty(&priv->ieee->network_free_list)) { 7614 if (list_empty(&priv->ieee->network_free_list)) {
7606 struct ieee80211_network *oldest = NULL; 7615 struct ieee80211_network *oldest = NULL;
7607 struct ieee80211_network *target; 7616 struct ieee80211_network *target;
7608 DECLARE_MAC_BUF(mac);
7609 7617
7610 list_for_each_entry(target, &priv->ieee->network_list, list) { 7618 list_for_each_entry(target, &priv->ieee->network_list, list) {
7611 if ((oldest == NULL) || 7619 if ((oldest == NULL) ||
@@ -7616,11 +7624,11 @@ static int ipw_associate(void *data)
7616 /* If there are no more slots, expire the oldest */ 7624 /* If there are no more slots, expire the oldest */
7617 list_del(&oldest->list); 7625 list_del(&oldest->list);
7618 target = oldest; 7626 target = oldest;
7619 IPW_DEBUG_ASSOC("Expired '%s' (%s) from " 7627 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7620 "network list.\n", 7628 "network list.\n",
7621 escape_essid(target->ssid, 7629 print_ssid(ssid, target->ssid,
7622 target->ssid_len), 7630 target->ssid_len),
7623 print_mac(mac, target->bssid)); 7631 target->bssid);
7624 list_add_tail(&target->list, 7632 list_add_tail(&target->list,
7625 &priv->ieee->network_free_list); 7633 &priv->ieee->network_free_list);
7626 } 7634 }
@@ -7673,12 +7681,12 @@ static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7673 u16 fc; 7681 u16 fc;
7674 7682
7675 hdr = (struct ieee80211_hdr *)skb->data; 7683 hdr = (struct ieee80211_hdr *)skb->data;
7676 fc = le16_to_cpu(hdr->frame_ctl); 7684 fc = le16_to_cpu(hdr->frame_control);
7677 if (!(fc & IEEE80211_FCTL_PROTECTED)) 7685 if (!(fc & IEEE80211_FCTL_PROTECTED))
7678 return; 7686 return;
7679 7687
7680 fc &= ~IEEE80211_FCTL_PROTECTED; 7688 fc &= ~IEEE80211_FCTL_PROTECTED;
7681 hdr->frame_ctl = cpu_to_le16(fc); 7689 hdr->frame_control = cpu_to_le16(fc);
7682 switch (priv->ieee->sec.level) { 7690 switch (priv->ieee->sec.level) {
7683 case SEC_LEVEL_3: 7691 case SEC_LEVEL_3:
7684 /* Remove CCMP HDR */ 7692 /* Remove CCMP HDR */
@@ -7806,15 +7814,6 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7806 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr), 7814 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7807 rxb->skb->data + IPW_RX_FRAME_SIZE, len); 7815 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7808 7816
7809 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7810 * part of our real header, saves a little time.
7811 *
7812 * No longer necessary since we fill in all our data. Purge before merging
7813 * patch officially.
7814 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7815 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7816 */
7817
7818 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data; 7817 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7819 7818
7820 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; 7819 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
@@ -7990,17 +7989,17 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7990 } 7989 }
7991 7990
7992 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE; 7991 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7993 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) { 7992 if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
7994 if (filter & IPW_PROM_NO_MGMT) 7993 if (filter & IPW_PROM_NO_MGMT)
7995 return; 7994 return;
7996 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 7995 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7997 hdr_only = 1; 7996 hdr_only = 1;
7998 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) { 7997 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
7999 if (filter & IPW_PROM_NO_CTL) 7998 if (filter & IPW_PROM_NO_CTL)
8000 return; 7999 return;
8001 if (filter & IPW_PROM_CTL_HEADER_ONLY) 8000 if (filter & IPW_PROM_CTL_HEADER_ONLY)
8002 hdr_only = 1; 8001 hdr_only = 1;
8003 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) { 8002 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
8004 if (filter & IPW_PROM_NO_DATA) 8003 if (filter & IPW_PROM_NO_DATA)
8005 return; 8004 return;
8006 if (filter & IPW_PROM_DATA_HEADER_ONLY) 8005 if (filter & IPW_PROM_DATA_HEADER_ONLY)
@@ -8018,19 +8017,10 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
8018 ipw_rt = (void *)skb->data; 8017 ipw_rt = (void *)skb->data;
8019 8018
8020 if (hdr_only) 8019 if (hdr_only)
8021 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 8020 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
8022 8021
8023 memcpy(ipw_rt->payload, hdr, len); 8022 memcpy(ipw_rt->payload, hdr, len);
8024 8023
8025 /* Zero the radiotap static buffer ... We only need to zero the bytes
8026 * NOT part of our real header, saves a little time.
8027 *
8028 * No longer necessary since we fill in all our data. Purge before
8029 * merging patch officially.
8030 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
8031 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
8032 */
8033
8034 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; 8024 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8035 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ 8025 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8036 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */ 8026 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
@@ -8238,7 +8228,7 @@ static int is_duplicate_packet(struct ipw_priv *priv,
8238 /* Comment this line now since we observed the card receives 8228 /* Comment this line now since we observed the card receives
8239 * duplicate packets but the FCTL_RETRY bit is not set in the 8229 * duplicate packets but the FCTL_RETRY bit is not set in the
8240 * IBSS mode with fragmentation enabled. 8230 * IBSS mode with fragmentation enabled.
8241 BUG_ON(!(le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_RETRY)); */ 8231 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8242 return 1; 8232 return 1;
8243} 8233}
8244 8234
@@ -8302,9 +8292,6 @@ static void ipw_rx(struct ipw_priv *priv)
8302 u32 r, w, i; 8292 u32 r, w, i;
8303 u8 network_packet; 8293 u8 network_packet;
8304 u8 fill_rx = 0; 8294 u8 fill_rx = 0;
8305 DECLARE_MAC_BUF(mac);
8306 DECLARE_MAC_BUF(mac2);
8307 DECLARE_MAC_BUF(mac3);
8308 8295
8309 r = ipw_read32(priv, IPW_RX_READ_INDEX); 8296 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8310 w = ipw_read32(priv, IPW_RX_WRITE_INDEX); 8297 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
@@ -8434,18 +8421,12 @@ static void ipw_rx(struct ipw_priv *priv)
8434 header))) 8421 header)))
8435 { 8422 {
8436 IPW_DEBUG_DROP("Dropping: " 8423 IPW_DEBUG_DROP("Dropping: "
8437 "%s, " 8424 "%pM, "
8438 "%s, " 8425 "%pM, "
8439 "%s\n", 8426 "%pM\n",
8440 print_mac(mac, 8427 header->addr1,
8441 header-> 8428 header->addr2,
8442 addr1), 8429 header->addr3);
8443 print_mac(mac2,
8444 header->
8445 addr2),
8446 print_mac(mac3,
8447 header->
8448 addr3));
8449 break; 8430 break;
8450 } 8431 }
8451 8432
@@ -8984,7 +8965,6 @@ static int ipw_wx_set_wap(struct net_device *dev,
8984 union iwreq_data *wrqu, char *extra) 8965 union iwreq_data *wrqu, char *extra)
8985{ 8966{
8986 struct ipw_priv *priv = ieee80211_priv(dev); 8967 struct ipw_priv *priv = ieee80211_priv(dev);
8987 DECLARE_MAC_BUF(mac);
8988 8968
8989 static const unsigned char any[] = { 8969 static const unsigned char any[] = {
8990 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 8970 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
@@ -9015,8 +8995,8 @@ static int ipw_wx_set_wap(struct net_device *dev,
9015 return 0; 8995 return 0;
9016 } 8996 }
9017 8997
9018 IPW_DEBUG_WX("Setting mandatory BSSID to %s\n", 8998 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9019 print_mac(mac, wrqu->ap_addr.sa_data)); 8999 wrqu->ap_addr.sa_data);
9020 9000
9021 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN); 9001 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9022 9002
@@ -9034,7 +9014,6 @@ static int ipw_wx_get_wap(struct net_device *dev,
9034 union iwreq_data *wrqu, char *extra) 9014 union iwreq_data *wrqu, char *extra)
9035{ 9015{
9036 struct ipw_priv *priv = ieee80211_priv(dev); 9016 struct ipw_priv *priv = ieee80211_priv(dev);
9037 DECLARE_MAC_BUF(mac);
9038 9017
9039 /* If we are associated, trying to associate, or have a statically 9018 /* If we are associated, trying to associate, or have a statically
9040 * configured BSSID then return that; otherwise return ANY */ 9019 * configured BSSID then return that; otherwise return ANY */
@@ -9046,8 +9025,8 @@ static int ipw_wx_get_wap(struct net_device *dev,
9046 } else 9025 } else
9047 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); 9026 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9048 9027
9049 IPW_DEBUG_WX("Getting WAP BSSID: %s\n", 9028 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9050 print_mac(mac, wrqu->ap_addr.sa_data)); 9029 wrqu->ap_addr.sa_data);
9051 mutex_unlock(&priv->mutex); 9030 mutex_unlock(&priv->mutex);
9052 return 0; 9031 return 0;
9053} 9032}
@@ -9058,6 +9037,7 @@ static int ipw_wx_set_essid(struct net_device *dev,
9058{ 9037{
9059 struct ipw_priv *priv = ieee80211_priv(dev); 9038 struct ipw_priv *priv = ieee80211_priv(dev);
9060 int length; 9039 int length;
9040 DECLARE_SSID_BUF(ssid);
9061 9041
9062 mutex_lock(&priv->mutex); 9042 mutex_lock(&priv->mutex);
9063 9043
@@ -9082,8 +9062,8 @@ static int ipw_wx_set_essid(struct net_device *dev,
9082 return 0; 9062 return 0;
9083 } 9063 }
9084 9064
9085 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length), 9065 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9086 length); 9066 print_ssid(ssid, extra, length), length);
9087 9067
9088 priv->essid_len = length; 9068 priv->essid_len = length;
9089 memcpy(priv->essid, extra, priv->essid_len); 9069 memcpy(priv->essid, extra, priv->essid_len);
@@ -9102,6 +9082,7 @@ static int ipw_wx_get_essid(struct net_device *dev,
9102 union iwreq_data *wrqu, char *extra) 9082 union iwreq_data *wrqu, char *extra)
9103{ 9083{
9104 struct ipw_priv *priv = ieee80211_priv(dev); 9084 struct ipw_priv *priv = ieee80211_priv(dev);
9085 DECLARE_SSID_BUF(ssid);
9105 9086
9106 /* If we are associated, trying to associate, or have a statically 9087 /* If we are associated, trying to associate, or have a statically
9107 * configured ESSID then return that; otherwise return ANY */ 9088 * configured ESSID then return that; otherwise return ANY */
@@ -9109,7 +9090,7 @@ static int ipw_wx_get_essid(struct net_device *dev,
9109 if (priv->config & CFG_STATIC_ESSID || 9090 if (priv->config & CFG_STATIC_ESSID ||
9110 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) { 9091 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9111 IPW_DEBUG_WX("Getting essid: '%s'\n", 9092 IPW_DEBUG_WX("Getting essid: '%s'\n",
9112 escape_essid(priv->essid, priv->essid_len)); 9093 print_ssid(ssid, priv->essid, priv->essid_len));
9113 memcpy(extra, priv->essid, priv->essid_len); 9094 memcpy(extra, priv->essid, priv->essid_len);
9114 wrqu->essid.length = priv->essid_len; 9095 wrqu->essid.length = priv->essid_len;
9115 wrqu->essid.flags = 1; /* active */ 9096 wrqu->essid.flags = 1; /* active */
@@ -10203,10 +10184,8 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10203 id = ipw_add_station(priv, hdr->addr1); 10184 id = ipw_add_station(priv, hdr->addr1);
10204 if (id == IPW_INVALID_STATION) { 10185 if (id == IPW_INVALID_STATION) {
10205 IPW_WARNING("Attempt to send data to " 10186 IPW_WARNING("Attempt to send data to "
10206 "invalid cell: " MAC_FMT "\n", 10187 "invalid cell: %pM\n",
10207 hdr->addr1[0], hdr->addr1[1], 10188 hdr->addr1);
10208 hdr->addr1[2], hdr->addr1[3],
10209 hdr->addr1[4], hdr->addr1[5]);
10210 goto drop; 10189 goto drop;
10211 } 10190 }
10212 } 10191 }
@@ -10274,8 +10253,8 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10274 case SEC_LEVEL_1: 10253 case SEC_LEVEL_1:
10275 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10254 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10276 cpu_to_le16(IEEE80211_FCTL_PROTECTED); 10255 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10277 tfd->u.data.key_index = priv->ieee->tx_keyidx; 10256 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10278 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <= 10257 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10279 40) 10258 40)
10280 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit; 10259 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10281 else 10260 else
@@ -10403,17 +10382,17 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10403 10382
10404 /* Filtering of fragment chains is done agains the first fragment */ 10383 /* Filtering of fragment chains is done agains the first fragment */
10405 hdr = (void *)txb->fragments[0]->data; 10384 hdr = (void *)txb->fragments[0]->data;
10406 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) { 10385 if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
10407 if (filter & IPW_PROM_NO_MGMT) 10386 if (filter & IPW_PROM_NO_MGMT)
10408 return; 10387 return;
10409 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 10388 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10410 hdr_only = 1; 10389 hdr_only = 1;
10411 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) { 10390 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
10412 if (filter & IPW_PROM_NO_CTL) 10391 if (filter & IPW_PROM_NO_CTL)
10413 return; 10392 return;
10414 if (filter & IPW_PROM_CTL_HEADER_ONLY) 10393 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10415 hdr_only = 1; 10394 hdr_only = 1;
10416 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) { 10395 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
10417 if (filter & IPW_PROM_NO_DATA) 10396 if (filter & IPW_PROM_NO_DATA)
10418 return; 10397 return;
10419 if (filter & IPW_PROM_DATA_HEADER_ONLY) 10398 if (filter & IPW_PROM_DATA_HEADER_ONLY)
@@ -10428,13 +10407,13 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10428 10407
10429 if (hdr_only) { 10408 if (hdr_only) {
10430 hdr = (void *)src->data; 10409 hdr = (void *)src->data;
10431 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 10410 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
10432 } else 10411 } else
10433 len = src->len; 10412 len = src->len;
10434 10413
10435 dst = alloc_skb( 10414 dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
10436 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC); 10415 if (!dst)
10437 if (!dst) continue; 10416 continue;
10438 10417
10439 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr)); 10418 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10440 10419
@@ -10509,15 +10488,14 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10509{ 10488{
10510 struct ipw_priv *priv = ieee80211_priv(dev); 10489 struct ipw_priv *priv = ieee80211_priv(dev);
10511 struct sockaddr *addr = p; 10490 struct sockaddr *addr = p;
10512 DECLARE_MAC_BUF(mac);
10513 10491
10514 if (!is_valid_ether_addr(addr->sa_data)) 10492 if (!is_valid_ether_addr(addr->sa_data))
10515 return -EADDRNOTAVAIL; 10493 return -EADDRNOTAVAIL;
10516 mutex_lock(&priv->mutex); 10494 mutex_lock(&priv->mutex);
10517 priv->config |= CFG_CUSTOM_MAC; 10495 priv->config |= CFG_CUSTOM_MAC;
10518 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); 10496 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10519 printk(KERN_INFO "%s: Setting MAC to %s\n", 10497 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10520 priv->net_dev->name, print_mac(mac, priv->mac_addr)); 10498 priv->net_dev->name, priv->mac_addr);
10521 queue_work(priv->workqueue, &priv->adapter_restart); 10499 queue_work(priv->workqueue, &priv->adapter_restart);
10522 mutex_unlock(&priv->mutex); 10500 mutex_unlock(&priv->mutex);
10523 return 0; 10501 return 0;
@@ -11652,7 +11630,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11652 length = pci_resource_len(pdev, 0); 11630 length = pci_resource_len(pdev, 0);
11653 priv->hw_len = length; 11631 priv->hw_len = length;
11654 11632
11655 base = ioremap_nocache(pci_resource_start(pdev, 0), length); 11633 base = pci_ioremap_bar(pdev, 0);
11656 if (!base) { 11634 if (!base) {
11657 err = -ENODEV; 11635 err = -ENODEV;
11658 goto out_pci_release_regions; 11636 goto out_pci_release_regions;
@@ -11944,7 +11922,7 @@ module_param(disable, int, 0444);
11944MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); 11922MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11945 11923
11946module_param(associate, int, 0444); 11924module_param(associate, int, 0444);
11947MODULE_PARM_DESC(associate, "auto associate when scanning (default on)"); 11925MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
11948 11926
11949module_param(auto_create, int, 0444); 11927module_param(auto_create, int, 0444);
11950MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)"); 11928MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h
index 0bad1ec3e7e0..277b274d4be5 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2x00/ipw2200.h
@@ -48,6 +48,7 @@
48#include <linux/jiffies.h> 48#include <linux/jiffies.h>
49#include <asm/io.h> 49#include <asm/io.h>
50 50
51#include <net/lib80211.h>
51#include <net/ieee80211.h> 52#include <net/ieee80211.h>
52#include <net/ieee80211_radiotap.h> 53#include <net/ieee80211_radiotap.h>
53 54
@@ -244,6 +245,7 @@ enum connection_manager_assoc_states {
244#define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED 31 245#define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED 31
245 246
246#define HOST_NOTIFICATION_STATUS_BEACON_MISSING 1 247#define HOST_NOTIFICATION_STATUS_BEACON_MISSING 1
248#define IPW_MB_SCAN_CANCEL_THRESHOLD 3
247#define IPW_MB_ROAMING_THRESHOLD_MIN 1 249#define IPW_MB_ROAMING_THRESHOLD_MIN 1
248#define IPW_MB_ROAMING_THRESHOLD_DEFAULT 8 250#define IPW_MB_ROAMING_THRESHOLD_DEFAULT 8
249#define IPW_MB_ROAMING_THRESHOLD_MAX 30 251#define IPW_MB_ROAMING_THRESHOLD_MAX 30
diff --git a/drivers/net/wireless/ipw2x00/libipw_geo.c b/drivers/net/wireless/ipw2x00/libipw_geo.c
new file mode 100644
index 000000000000..960ad13f5e9f
--- /dev/null
+++ b/drivers/net/wireless/ipw2x00/libipw_geo.c
@@ -0,0 +1,195 @@
1/******************************************************************************
2
3 Copyright(c) 2005 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25******************************************************************************/
26#include <linux/compiler.h>
27#include <linux/errno.h>
28#include <linux/if_arp.h>
29#include <linux/in6.h>
30#include <linux/in.h>
31#include <linux/ip.h>
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/netdevice.h>
35#include <linux/proc_fs.h>
36#include <linux/skbuff.h>
37#include <linux/slab.h>
38#include <linux/tcp.h>
39#include <linux/types.h>
40#include <linux/wireless.h>
41#include <linux/etherdevice.h>
42#include <asm/uaccess.h>
43
44#include <net/ieee80211.h>
45
46int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel)
47{
48 int i;
49
50 /* Driver needs to initialize the geography map before using
51 * these helper functions */
52 if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0)
53 return 0;
54
55 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
56 for (i = 0; i < ieee->geo.bg_channels; i++)
57 /* NOTE: If G mode is currently supported but
58 * this is a B only channel, we don't see it
59 * as valid. */
60 if ((ieee->geo.bg[i].channel == channel) &&
61 !(ieee->geo.bg[i].flags & IEEE80211_CH_INVALID) &&
62 (!(ieee->mode & IEEE_G) ||
63 !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY)))
64 return IEEE80211_24GHZ_BAND;
65
66 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
67 for (i = 0; i < ieee->geo.a_channels; i++)
68 if ((ieee->geo.a[i].channel == channel) &&
69 !(ieee->geo.a[i].flags & IEEE80211_CH_INVALID))
70 return IEEE80211_52GHZ_BAND;
71
72 return 0;
73}
74
75int ieee80211_channel_to_index(struct ieee80211_device *ieee, u8 channel)
76{
77 int i;
78
79 /* Driver needs to initialize the geography map before using
80 * these helper functions */
81 if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0)
82 return -1;
83
84 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
85 for (i = 0; i < ieee->geo.bg_channels; i++)
86 if (ieee->geo.bg[i].channel == channel)
87 return i;
88
89 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
90 for (i = 0; i < ieee->geo.a_channels; i++)
91 if (ieee->geo.a[i].channel == channel)
92 return i;
93
94 return -1;
95}
96
97u32 ieee80211_channel_to_freq(struct ieee80211_device * ieee, u8 channel)
98{
99 const struct ieee80211_channel * ch;
100
101 /* Driver needs to initialize the geography map before using
102 * these helper functions */
103 if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0)
104 return 0;
105
106 ch = ieee80211_get_channel(ieee, channel);
107 if (!ch->channel)
108 return 0;
109 return ch->freq;
110}
111
112u8 ieee80211_freq_to_channel(struct ieee80211_device * ieee, u32 freq)
113{
114 int i;
115
116 /* Driver needs to initialize the geography map before using
117 * these helper functions */
118 if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0)
119 return 0;
120
121 freq /= 100000;
122
123 if (ieee->freq_band & IEEE80211_24GHZ_BAND)
124 for (i = 0; i < ieee->geo.bg_channels; i++)
125 if (ieee->geo.bg[i].freq == freq)
126 return ieee->geo.bg[i].channel;
127
128 if (ieee->freq_band & IEEE80211_52GHZ_BAND)
129 for (i = 0; i < ieee->geo.a_channels; i++)
130 if (ieee->geo.a[i].freq == freq)
131 return ieee->geo.a[i].channel;
132
133 return 0;
134}
135
136int ieee80211_set_geo(struct ieee80211_device *ieee,
137 const struct ieee80211_geo *geo)
138{
139 memcpy(ieee->geo.name, geo->name, 3);
140 ieee->geo.name[3] = '\0';
141 ieee->geo.bg_channels = geo->bg_channels;
142 ieee->geo.a_channels = geo->a_channels;
143 memcpy(ieee->geo.bg, geo->bg, geo->bg_channels *
144 sizeof(struct ieee80211_channel));
145 memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
146 sizeof(struct ieee80211_channel));
147 return 0;
148}
149
150const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device *ieee)
151{
152 return &ieee->geo;
153}
154
155u8 ieee80211_get_channel_flags(struct ieee80211_device * ieee, u8 channel)
156{
157 int index = ieee80211_channel_to_index(ieee, channel);
158
159 if (index == -1)
160 return IEEE80211_CH_INVALID;
161
162 if (channel <= IEEE80211_24GHZ_CHANNELS)
163 return ieee->geo.bg[index].flags;
164
165 return ieee->geo.a[index].flags;
166}
167
168static const struct ieee80211_channel bad_channel = {
169 .channel = 0,
170 .flags = IEEE80211_CH_INVALID,
171 .max_power = 0,
172};
173
174const struct ieee80211_channel *ieee80211_get_channel(struct ieee80211_device
175 *ieee, u8 channel)
176{
177 int index = ieee80211_channel_to_index(ieee, channel);
178
179 if (index == -1)
180 return &bad_channel;
181
182 if (channel <= IEEE80211_24GHZ_CHANNELS)
183 return &ieee->geo.bg[index];
184
185 return &ieee->geo.a[index];
186}
187
188EXPORT_SYMBOL(ieee80211_get_channel);
189EXPORT_SYMBOL(ieee80211_get_channel_flags);
190EXPORT_SYMBOL(ieee80211_is_valid_channel);
191EXPORT_SYMBOL(ieee80211_freq_to_channel);
192EXPORT_SYMBOL(ieee80211_channel_to_freq);
193EXPORT_SYMBOL(ieee80211_channel_to_index);
194EXPORT_SYMBOL(ieee80211_set_geo);
195EXPORT_SYMBOL(ieee80211_get_geo);
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
new file mode 100644
index 000000000000..a2f5616d5b09
--- /dev/null
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -0,0 +1,293 @@
1/*******************************************************************************
2
3 Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
4
5 Portions of this file are based on the WEP enablement code provided by the
6 Host AP project hostap-drivers v0.1.3
7 Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
8 <j@w1.fi>
9 Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31*******************************************************************************/
32
33#include <linux/compiler.h>
34#include <linux/errno.h>
35#include <linux/if_arp.h>
36#include <linux/in6.h>
37#include <linux/in.h>
38#include <linux/ip.h>
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/netdevice.h>
42#include <linux/proc_fs.h>
43#include <linux/skbuff.h>
44#include <linux/slab.h>
45#include <linux/tcp.h>
46#include <linux/types.h>
47#include <linux/wireless.h>
48#include <linux/etherdevice.h>
49#include <asm/uaccess.h>
50#include <net/net_namespace.h>
51#include <net/arp.h>
52
53#include <net/ieee80211.h>
54
55#define DRV_DESCRIPTION "802.11 data/management/control stack"
56#define DRV_NAME "ieee80211"
57#define DRV_VERSION IEEE80211_VERSION
58#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
59
60MODULE_VERSION(DRV_VERSION);
61MODULE_DESCRIPTION(DRV_DESCRIPTION);
62MODULE_AUTHOR(DRV_COPYRIGHT);
63MODULE_LICENSE("GPL");
64
65static int ieee80211_networks_allocate(struct ieee80211_device *ieee)
66{
67 if (ieee->networks)
68 return 0;
69
70 ieee->networks =
71 kzalloc(MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
72 GFP_KERNEL);
73 if (!ieee->networks) {
74 printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
75 ieee->dev->name);
76 return -ENOMEM;
77 }
78
79 return 0;
80}
81
82void ieee80211_network_reset(struct ieee80211_network *network)
83{
84 if (!network)
85 return;
86
87 if (network->ibss_dfs) {
88 kfree(network->ibss_dfs);
89 network->ibss_dfs = NULL;
90 }
91}
92
93static inline void ieee80211_networks_free(struct ieee80211_device *ieee)
94{
95 int i;
96
97 if (!ieee->networks)
98 return;
99
100 for (i = 0; i < MAX_NETWORK_COUNT; i++)
101 if (ieee->networks[i].ibss_dfs)
102 kfree(ieee->networks[i].ibss_dfs);
103
104 kfree(ieee->networks);
105 ieee->networks = NULL;
106}
107
108static void ieee80211_networks_initialize(struct ieee80211_device *ieee)
109{
110 int i;
111
112 INIT_LIST_HEAD(&ieee->network_free_list);
113 INIT_LIST_HEAD(&ieee->network_list);
114 for (i = 0; i < MAX_NETWORK_COUNT; i++)
115 list_add_tail(&ieee->networks[i].list,
116 &ieee->network_free_list);
117}
118
119static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
120{
121 if ((new_mtu < 68) || (new_mtu > IEEE80211_DATA_LEN))
122 return -EINVAL;
123 dev->mtu = new_mtu;
124 return 0;
125}
126
127static struct net_device_stats *ieee80211_generic_get_stats(
128 struct net_device *dev)
129{
130 struct ieee80211_device *ieee = netdev_priv(dev);
131 return &ieee->stats;
132}
133
134struct net_device *alloc_ieee80211(int sizeof_priv)
135{
136 struct ieee80211_device *ieee;
137 struct net_device *dev;
138 int err;
139
140 IEEE80211_DEBUG_INFO("Initializing...\n");
141
142 dev = alloc_etherdev(sizeof(struct ieee80211_device) + sizeof_priv);
143 if (!dev) {
144 IEEE80211_ERROR("Unable to allocate network device.\n");
145 goto failed;
146 }
147 ieee = netdev_priv(dev);
148 dev->hard_start_xmit = ieee80211_xmit;
149 dev->change_mtu = ieee80211_change_mtu;
150
151 /* Drivers are free to override this if the generic implementation
152 * does not meet their needs. */
153 dev->get_stats = ieee80211_generic_get_stats;
154
155 ieee->dev = dev;
156
157 err = ieee80211_networks_allocate(ieee);
158 if (err) {
159 IEEE80211_ERROR("Unable to allocate beacon storage: %d\n", err);
160 goto failed_free_netdev;
161 }
162 ieee80211_networks_initialize(ieee);
163
164 /* Default fragmentation threshold is maximum payload size */
165 ieee->fts = DEFAULT_FTS;
166 ieee->rts = DEFAULT_FTS;
167 ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
168 ieee->open_wep = 1;
169
170 /* Default to enabling full open WEP with host based encrypt/decrypt */
171 ieee->host_encrypt = 1;
172 ieee->host_decrypt = 1;
173 ieee->host_mc_decrypt = 1;
174
175 /* Host fragementation in Open mode. Default is enabled.
176 * Note: host fragmentation is always enabled if host encryption
177 * is enabled. For cards can do hardware encryption, they must do
178 * hardware fragmentation as well. So we don't need a variable
179 * like host_enc_frag. */
180 ieee->host_open_frag = 1;
181 ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
182
183 spin_lock_init(&ieee->lock);
184
185 lib80211_crypt_info_init(&ieee->crypt_info, dev->name, &ieee->lock);
186
187 ieee->wpa_enabled = 0;
188 ieee->drop_unencrypted = 0;
189 ieee->privacy_invoked = 0;
190
191 return dev;
192
193failed_free_netdev:
194 free_netdev(dev);
195failed:
196 return NULL;
197}
198
199void free_ieee80211(struct net_device *dev)
200{
201 struct ieee80211_device *ieee = netdev_priv(dev);
202
203 lib80211_crypt_info_free(&ieee->crypt_info);
204
205 ieee80211_networks_free(ieee);
206 free_netdev(dev);
207}
208
209#ifdef CONFIG_IEEE80211_DEBUG
210
211static int debug = 0;
212u32 ieee80211_debug_level = 0;
213EXPORT_SYMBOL_GPL(ieee80211_debug_level);
214static struct proc_dir_entry *ieee80211_proc = NULL;
215
216static int show_debug_level(char *page, char **start, off_t offset,
217 int count, int *eof, void *data)
218{
219 return snprintf(page, count, "0x%08X\n", ieee80211_debug_level);
220}
221
222static int store_debug_level(struct file *file, const char __user * buffer,
223 unsigned long count, void *data)
224{
225 char buf[] = "0x00000000\n";
226 unsigned long len = min((unsigned long)sizeof(buf) - 1, count);
227 unsigned long val;
228
229 if (copy_from_user(buf, buffer, len))
230 return count;
231 buf[len] = 0;
232 if (sscanf(buf, "%li", &val) != 1)
233 printk(KERN_INFO DRV_NAME
234 ": %s is not in hex or decimal form.\n", buf);
235 else
236 ieee80211_debug_level = val;
237
238 return strnlen(buf, len);
239}
240#endif /* CONFIG_IEEE80211_DEBUG */
241
242static int __init ieee80211_init(void)
243{
244#ifdef CONFIG_IEEE80211_DEBUG
245 struct proc_dir_entry *e;
246
247 ieee80211_debug_level = debug;
248 ieee80211_proc = proc_mkdir(DRV_NAME, init_net.proc_net);
249 if (ieee80211_proc == NULL) {
250 IEEE80211_ERROR("Unable to create " DRV_NAME
251 " proc directory\n");
252 return -EIO;
253 }
254 e = create_proc_entry("debug_level", S_IFREG | S_IRUGO | S_IWUSR,
255 ieee80211_proc);
256 if (!e) {
257 remove_proc_entry(DRV_NAME, init_net.proc_net);
258 ieee80211_proc = NULL;
259 return -EIO;
260 }
261 e->read_proc = show_debug_level;
262 e->write_proc = store_debug_level;
263 e->data = NULL;
264#endif /* CONFIG_IEEE80211_DEBUG */
265
266 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
267 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
268
269 return 0;
270}
271
272static void __exit ieee80211_exit(void)
273{
274#ifdef CONFIG_IEEE80211_DEBUG
275 if (ieee80211_proc) {
276 remove_proc_entry("debug_level", ieee80211_proc);
277 remove_proc_entry(DRV_NAME, init_net.proc_net);
278 ieee80211_proc = NULL;
279 }
280#endif /* CONFIG_IEEE80211_DEBUG */
281}
282
283#ifdef CONFIG_IEEE80211_DEBUG
284#include <linux/moduleparam.h>
285module_param(debug, int, 0444);
286MODULE_PARM_DESC(debug, "debug output mask");
287#endif /* CONFIG_IEEE80211_DEBUG */
288
289module_exit(ieee80211_exit);
290module_init(ieee80211_init);
291
292EXPORT_SYMBOL(alloc_ieee80211);
293EXPORT_SYMBOL(free_ieee80211);
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
new file mode 100644
index 000000000000..9c67dfae4320
--- /dev/null
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -0,0 +1,1799 @@
1/*
2 * Original code based Host AP (software wireless LAN access point) driver
3 * for Intersil Prism2/2.5/3 - hostap.o module, common routines
4 *
5 * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
6 * <j@w1.fi>
7 * Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
8 * Copyright (c) 2004-2005, Intel Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. See README and COPYING for
13 * more details.
14 */
15
16#include <linux/compiler.h>
17#include <linux/errno.h>
18#include <linux/if_arp.h>
19#include <linux/in6.h>
20#include <linux/in.h>
21#include <linux/ip.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/netdevice.h>
25#include <linux/proc_fs.h>
26#include <linux/skbuff.h>
27#include <linux/slab.h>
28#include <linux/tcp.h>
29#include <linux/types.h>
30#include <linux/wireless.h>
31#include <linux/etherdevice.h>
32#include <asm/uaccess.h>
33#include <linux/ctype.h>
34
35#include <net/lib80211.h>
36#include <net/ieee80211.h>
37
38static void ieee80211_monitor_rx(struct ieee80211_device *ieee,
39 struct sk_buff *skb,
40 struct ieee80211_rx_stats *rx_stats)
41{
42 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
43 u16 fc = le16_to_cpu(hdr->frame_control);
44
45 skb->dev = ieee->dev;
46 skb_reset_mac_header(skb);
47 skb_pull(skb, ieee80211_get_hdrlen(fc));
48 skb->pkt_type = PACKET_OTHERHOST;
49 skb->protocol = htons(ETH_P_80211_RAW);
50 memset(skb->cb, 0, sizeof(skb->cb));
51 netif_rx(skb);
52}
53
54/* Called only as a tasklet (software IRQ) */
55static struct ieee80211_frag_entry *ieee80211_frag_cache_find(struct
56 ieee80211_device
57 *ieee,
58 unsigned int seq,
59 unsigned int frag,
60 u8 * src,
61 u8 * dst)
62{
63 struct ieee80211_frag_entry *entry;
64 int i;
65
66 for (i = 0; i < IEEE80211_FRAG_CACHE_LEN; i++) {
67 entry = &ieee->frag_cache[i];
68 if (entry->skb != NULL &&
69 time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
70 IEEE80211_DEBUG_FRAG("expiring fragment cache entry "
71 "seq=%u last_frag=%u\n",
72 entry->seq, entry->last_frag);
73 dev_kfree_skb_any(entry->skb);
74 entry->skb = NULL;
75 }
76
77 if (entry->skb != NULL && entry->seq == seq &&
78 (entry->last_frag + 1 == frag || frag == -1) &&
79 !compare_ether_addr(entry->src_addr, src) &&
80 !compare_ether_addr(entry->dst_addr, dst))
81 return entry;
82 }
83
84 return NULL;
85}
86
87/* Called only as a tasklet (software IRQ) */
88static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee,
89 struct ieee80211_hdr_4addr *hdr)
90{
91 struct sk_buff *skb = NULL;
92 u16 sc;
93 unsigned int frag, seq;
94 struct ieee80211_frag_entry *entry;
95
96 sc = le16_to_cpu(hdr->seq_ctl);
97 frag = WLAN_GET_SEQ_FRAG(sc);
98 seq = WLAN_GET_SEQ_SEQ(sc);
99
100 if (frag == 0) {
101 /* Reserve enough space to fit maximum frame length */
102 skb = dev_alloc_skb(ieee->dev->mtu +
103 sizeof(struct ieee80211_hdr_4addr) +
104 8 /* LLC */ +
105 2 /* alignment */ +
106 8 /* WEP */ + ETH_ALEN /* WDS */ );
107 if (skb == NULL)
108 return NULL;
109
110 entry = &ieee->frag_cache[ieee->frag_next_idx];
111 ieee->frag_next_idx++;
112 if (ieee->frag_next_idx >= IEEE80211_FRAG_CACHE_LEN)
113 ieee->frag_next_idx = 0;
114
115 if (entry->skb != NULL)
116 dev_kfree_skb_any(entry->skb);
117
118 entry->first_frag_time = jiffies;
119 entry->seq = seq;
120 entry->last_frag = frag;
121 entry->skb = skb;
122 memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
123 memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
124 } else {
125 /* received a fragment of a frame for which the head fragment
126 * should have already been received */
127 entry = ieee80211_frag_cache_find(ieee, seq, frag, hdr->addr2,
128 hdr->addr1);
129 if (entry != NULL) {
130 entry->last_frag = frag;
131 skb = entry->skb;
132 }
133 }
134
135 return skb;
136}
137
138/* Called only as a tasklet (software IRQ) */
139static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
140 struct ieee80211_hdr_4addr *hdr)
141{
142 u16 sc;
143 unsigned int seq;
144 struct ieee80211_frag_entry *entry;
145
146 sc = le16_to_cpu(hdr->seq_ctl);
147 seq = WLAN_GET_SEQ_SEQ(sc);
148
149 entry = ieee80211_frag_cache_find(ieee, seq, -1, hdr->addr2,
150 hdr->addr1);
151
152 if (entry == NULL) {
153 IEEE80211_DEBUG_FRAG("could not invalidate fragment cache "
154 "entry (seq=%u)\n", seq);
155 return -1;
156 }
157
158 entry->skb = NULL;
159 return 0;
160}
161
162#ifdef NOT_YET
163/* ieee80211_rx_frame_mgtmt
164 *
165 * Responsible for handling management control frames
166 *
167 * Called by ieee80211_rx */
168static int
169ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
170 struct ieee80211_rx_stats *rx_stats, u16 type,
171 u16 stype)
172{
173 if (ieee->iw_mode == IW_MODE_MASTER) {
174 printk(KERN_DEBUG "%s: Master mode not yet suppported.\n",
175 ieee->dev->name);
176 return 0;
177/*
178 hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *)
179 skb->data);*/
180 }
181
182 if (ieee->hostapd && type == WLAN_FC_TYPE_MGMT) {
183 if (stype == WLAN_FC_STYPE_BEACON &&
184 ieee->iw_mode == IW_MODE_MASTER) {
185 struct sk_buff *skb2;
186 /* Process beacon frames also in kernel driver to
187 * update STA(AP) table statistics */
188 skb2 = skb_clone(skb, GFP_ATOMIC);
189 if (skb2)
190 hostap_rx(skb2->dev, skb2, rx_stats);
191 }
192
193 /* send management frames to the user space daemon for
194 * processing */
195 ieee->apdevstats.rx_packets++;
196 ieee->apdevstats.rx_bytes += skb->len;
197 prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT);
198 return 0;
199 }
200
201 if (ieee->iw_mode == IW_MODE_MASTER) {
202 if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) {
203 printk(KERN_DEBUG "%s: unknown management frame "
204 "(type=0x%02x, stype=0x%02x) dropped\n",
205 skb->dev->name, type, stype);
206 return -1;
207 }
208
209 hostap_rx(skb->dev, skb, rx_stats);
210 return 0;
211 }
212
213 printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame "
214 "received in non-Host AP mode\n", skb->dev->name);
215 return -1;
216}
217#endif
218
219/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
220/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
221static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
222
223/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
224static unsigned char bridge_tunnel_header[] =
225 { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
226/* No encapsulation header if EtherType < 0x600 (=length) */
227
228/* Called by ieee80211_rx_frame_decrypt */
229static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
230 struct sk_buff *skb)
231{
232 struct net_device *dev = ieee->dev;
233 u16 fc, ethertype;
234 struct ieee80211_hdr_3addr *hdr;
235 u8 *pos;
236
237 if (skb->len < 24)
238 return 0;
239
240 hdr = (struct ieee80211_hdr_3addr *)skb->data;
241 fc = le16_to_cpu(hdr->frame_ctl);
242
243 /* check that the frame is unicast frame to us */
244 if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
245 IEEE80211_FCTL_TODS &&
246 !compare_ether_addr(hdr->addr1, dev->dev_addr) &&
247 !compare_ether_addr(hdr->addr3, dev->dev_addr)) {
248 /* ToDS frame with own addr BSSID and DA */
249 } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
250 IEEE80211_FCTL_FROMDS &&
251 !compare_ether_addr(hdr->addr1, dev->dev_addr)) {
252 /* FromDS frame with own addr as DA */
253 } else
254 return 0;
255
256 if (skb->len < 24 + 8)
257 return 0;
258
259 /* check for port access entity Ethernet type */
260 pos = skb->data + 24;
261 ethertype = (pos[6] << 8) | pos[7];
262 if (ethertype == ETH_P_PAE)
263 return 1;
264
265 return 0;
266}
267
268/* Called only as a tasklet (software IRQ), by ieee80211_rx */
269static int
270ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb,
271 struct lib80211_crypt_data *crypt)
272{
273 struct ieee80211_hdr_3addr *hdr;
274 int res, hdrlen;
275
276 if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
277 return 0;
278
279 hdr = (struct ieee80211_hdr_3addr *)skb->data;
280 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
281
282 atomic_inc(&crypt->refcnt);
283 res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
284 atomic_dec(&crypt->refcnt);
285 if (res < 0) {
286 IEEE80211_DEBUG_DROP("decryption failed (SA=%pM) res=%d\n",
287 hdr->addr2, res);
288 if (res == -2)
289 IEEE80211_DEBUG_DROP("Decryption failed ICV "
290 "mismatch (key %d)\n",
291 skb->data[hdrlen + 3] >> 6);
292 ieee->ieee_stats.rx_discards_undecryptable++;
293 return -1;
294 }
295
296 return res;
297}
298
299/* Called only as a tasklet (software IRQ), by ieee80211_rx */
300static int
301ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee,
302 struct sk_buff *skb, int keyidx,
303 struct lib80211_crypt_data *crypt)
304{
305 struct ieee80211_hdr_3addr *hdr;
306 int res, hdrlen;
307
308 if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
309 return 0;
310
311 hdr = (struct ieee80211_hdr_3addr *)skb->data;
312 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
313
314 atomic_inc(&crypt->refcnt);
315 res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
316 atomic_dec(&crypt->refcnt);
317 if (res < 0) {
318 printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
319 " (SA=%pM keyidx=%d)\n", ieee->dev->name, hdr->addr2,
320 keyidx);
321 return -1;
322 }
323
324 return 0;
325}
326
327/* All received frames are sent to this function. @skb contains the frame in
328 * IEEE 802.11 format, i.e., in the format it was sent over air.
329 * This function is called only as a tasklet (software IRQ). */
330int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
331 struct ieee80211_rx_stats *rx_stats)
332{
333 struct net_device *dev = ieee->dev;
334 struct ieee80211_hdr_4addr *hdr;
335 size_t hdrlen;
336 u16 fc, type, stype, sc;
337 struct net_device_stats *stats;
338 unsigned int frag;
339 u8 *payload;
340 u16 ethertype;
341#ifdef NOT_YET
342 struct net_device *wds = NULL;
343 struct sk_buff *skb2 = NULL;
344 struct net_device *wds = NULL;
345 int frame_authorized = 0;
346 int from_assoc_ap = 0;
347 void *sta = NULL;
348#endif
349 u8 dst[ETH_ALEN];
350 u8 src[ETH_ALEN];
351 struct lib80211_crypt_data *crypt = NULL;
352 int keyidx = 0;
353 int can_be_decrypted = 0;
354
355 hdr = (struct ieee80211_hdr_4addr *)skb->data;
356 stats = &ieee->stats;
357
358 if (skb->len < 10) {
359 printk(KERN_INFO "%s: SKB length < 10\n", dev->name);
360 goto rx_dropped;
361 }
362
363 fc = le16_to_cpu(hdr->frame_ctl);
364 type = WLAN_FC_GET_TYPE(fc);
365 stype = WLAN_FC_GET_STYPE(fc);
366 sc = le16_to_cpu(hdr->seq_ctl);
367 frag = WLAN_GET_SEQ_FRAG(sc);
368 hdrlen = ieee80211_get_hdrlen(fc);
369
370 if (skb->len < hdrlen) {
371 printk(KERN_INFO "%s: invalid SKB length %d\n",
372 dev->name, skb->len);
373 goto rx_dropped;
374 }
375
376 /* Put this code here so that we avoid duplicating it in all
377 * Rx paths. - Jean II */
378#ifdef CONFIG_WIRELESS_EXT
379#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
380 /* If spy monitoring on */
381 if (ieee->spy_data.spy_number > 0) {
382 struct iw_quality wstats;
383
384 wstats.updated = 0;
385 if (rx_stats->mask & IEEE80211_STATMASK_RSSI) {
386 wstats.level = rx_stats->signal;
387 wstats.updated |= IW_QUAL_LEVEL_UPDATED;
388 } else
389 wstats.updated |= IW_QUAL_LEVEL_INVALID;
390
391 if (rx_stats->mask & IEEE80211_STATMASK_NOISE) {
392 wstats.noise = rx_stats->noise;
393 wstats.updated |= IW_QUAL_NOISE_UPDATED;
394 } else
395 wstats.updated |= IW_QUAL_NOISE_INVALID;
396
397 if (rx_stats->mask & IEEE80211_STATMASK_SIGNAL) {
398 wstats.qual = rx_stats->signal;
399 wstats.updated |= IW_QUAL_QUAL_UPDATED;
400 } else
401 wstats.updated |= IW_QUAL_QUAL_INVALID;
402
403 /* Update spy records */
404 wireless_spy_update(ieee->dev, hdr->addr2, &wstats);
405 }
406#endif /* IW_WIRELESS_SPY */
407#endif /* CONFIG_WIRELESS_EXT */
408
409#ifdef NOT_YET
410 hostap_update_rx_stats(local->ap, hdr, rx_stats);
411#endif
412
413 if (ieee->iw_mode == IW_MODE_MONITOR) {
414 stats->rx_packets++;
415 stats->rx_bytes += skb->len;
416 ieee80211_monitor_rx(ieee, skb, rx_stats);
417 return 1;
418 }
419
420 can_be_decrypted = (is_multicast_ether_addr(hdr->addr1) ||
421 is_broadcast_ether_addr(hdr->addr2)) ?
422 ieee->host_mc_decrypt : ieee->host_decrypt;
423
424 if (can_be_decrypted) {
425 if (skb->len >= hdrlen + 3) {
426 /* Top two-bits of byte 3 are the key index */
427 keyidx = skb->data[hdrlen + 3] >> 6;
428 }
429
430 /* ieee->crypt[] is WEP_KEY (4) in length. Given that keyidx
431 * is only allowed 2-bits of storage, no value of keyidx can
432 * be provided via above code that would result in keyidx
433 * being out of range */
434 crypt = ieee->crypt_info.crypt[keyidx];
435
436#ifdef NOT_YET
437 sta = NULL;
438
439 /* Use station specific key to override default keys if the
440 * receiver address is a unicast address ("individual RA"). If
441 * bcrx_sta_key parameter is set, station specific key is used
442 * even with broad/multicast targets (this is against IEEE
443 * 802.11, but makes it easier to use different keys with
444 * stations that do not support WEP key mapping). */
445
446 if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
447 (void)hostap_handle_sta_crypto(local, hdr, &crypt,
448 &sta);
449#endif
450
451 /* allow NULL decrypt to indicate an station specific override
452 * for default encryption */
453 if (crypt && (crypt->ops == NULL ||
454 crypt->ops->decrypt_mpdu == NULL))
455 crypt = NULL;
456
457 if (!crypt && (fc & IEEE80211_FCTL_PROTECTED)) {
458 /* This seems to be triggered by some (multicast?)
459 * frames from other than current BSS, so just drop the
460 * frames silently instead of filling system log with
461 * these reports. */
462 IEEE80211_DEBUG_DROP("Decryption failed (not set)"
463 " (SA=%pM)\n", hdr->addr2);
464 ieee->ieee_stats.rx_discards_undecryptable++;
465 goto rx_dropped;
466 }
467 }
468#ifdef NOT_YET
469 if (type != WLAN_FC_TYPE_DATA) {
470 if (type == WLAN_FC_TYPE_MGMT && stype == WLAN_FC_STYPE_AUTH &&
471 fc & IEEE80211_FCTL_PROTECTED && ieee->host_decrypt &&
472 (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0) {
473 printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth "
474 "from %pM\n", dev->name, hdr->addr2);
475 /* TODO: could inform hostapd about this so that it
476 * could send auth failure report */
477 goto rx_dropped;
478 }
479
480 if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
481 goto rx_dropped;
482 else
483 goto rx_exit;
484 }
485#endif
486 /* drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.29) */
487 if (sc == ieee->prev_seq_ctl)
488 goto rx_dropped;
489 else
490 ieee->prev_seq_ctl = sc;
491
492 /* Data frame - extract src/dst addresses */
493 if (skb->len < IEEE80211_3ADDR_LEN)
494 goto rx_dropped;
495
496 switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
497 case IEEE80211_FCTL_FROMDS:
498 memcpy(dst, hdr->addr1, ETH_ALEN);
499 memcpy(src, hdr->addr3, ETH_ALEN);
500 break;
501 case IEEE80211_FCTL_TODS:
502 memcpy(dst, hdr->addr3, ETH_ALEN);
503 memcpy(src, hdr->addr2, ETH_ALEN);
504 break;
505 case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
506 if (skb->len < IEEE80211_4ADDR_LEN)
507 goto rx_dropped;
508 memcpy(dst, hdr->addr3, ETH_ALEN);
509 memcpy(src, hdr->addr4, ETH_ALEN);
510 break;
511 case 0:
512 memcpy(dst, hdr->addr1, ETH_ALEN);
513 memcpy(src, hdr->addr2, ETH_ALEN);
514 break;
515 }
516
517#ifdef NOT_YET
518 if (hostap_rx_frame_wds(ieee, hdr, fc, &wds))
519 goto rx_dropped;
520 if (wds) {
521 skb->dev = dev = wds;
522 stats = hostap_get_stats(dev);
523 }
524
525 if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
526 (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
527 IEEE80211_FCTL_FROMDS && ieee->stadev
528 && !compare_ether_addr(hdr->addr2, ieee->assoc_ap_addr)) {
529 /* Frame from BSSID of the AP for which we are a client */
530 skb->dev = dev = ieee->stadev;
531 stats = hostap_get_stats(dev);
532 from_assoc_ap = 1;
533 }
534#endif
535
536#ifdef NOT_YET
537 if ((ieee->iw_mode == IW_MODE_MASTER ||
538 ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) {
539 switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats,
540 wds != NULL)) {
541 case AP_RX_CONTINUE_NOT_AUTHORIZED:
542 frame_authorized = 0;
543 break;
544 case AP_RX_CONTINUE:
545 frame_authorized = 1;
546 break;
547 case AP_RX_DROP:
548 goto rx_dropped;
549 case AP_RX_EXIT:
550 goto rx_exit;
551 }
552 }
553#endif
554
555 /* Nullfunc frames may have PS-bit set, so they must be passed to
556 * hostap_handle_sta_rx() before being dropped here. */
557
558 stype &= ~IEEE80211_STYPE_QOS_DATA;
559
560 if (stype != IEEE80211_STYPE_DATA &&
561 stype != IEEE80211_STYPE_DATA_CFACK &&
562 stype != IEEE80211_STYPE_DATA_CFPOLL &&
563 stype != IEEE80211_STYPE_DATA_CFACKPOLL) {
564 if (stype != IEEE80211_STYPE_NULLFUNC)
565 IEEE80211_DEBUG_DROP("RX: dropped data frame "
566 "with no data (type=0x%02x, "
567 "subtype=0x%02x, len=%d)\n",
568 type, stype, skb->len);
569 goto rx_dropped;
570 }
571
572 /* skb: hdr + (possibly fragmented, possibly encrypted) payload */
573
574 if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted &&
575 (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
576 goto rx_dropped;
577
578 hdr = (struct ieee80211_hdr_4addr *)skb->data;
579
580 /* skb: hdr + (possibly fragmented) plaintext payload */
581 // PR: FIXME: hostap has additional conditions in the "if" below:
582 // ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
583 if ((frag != 0) || (fc & IEEE80211_FCTL_MOREFRAGS)) {
584 int flen;
585 struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr);
586 IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag);
587
588 if (!frag_skb) {
589 IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG,
590 "Rx cannot get skb from fragment "
591 "cache (morefrag=%d seq=%u frag=%u)\n",
592 (fc & IEEE80211_FCTL_MOREFRAGS) != 0,
593 WLAN_GET_SEQ_SEQ(sc), frag);
594 goto rx_dropped;
595 }
596
597 flen = skb->len;
598 if (frag != 0)
599 flen -= hdrlen;
600
601 if (frag_skb->tail + flen > frag_skb->end) {
602 printk(KERN_WARNING "%s: host decrypted and "
603 "reassembled frame did not fit skb\n",
604 dev->name);
605 ieee80211_frag_cache_invalidate(ieee, hdr);
606 goto rx_dropped;
607 }
608
609 if (frag == 0) {
610 /* copy first fragment (including full headers) into
611 * beginning of the fragment cache skb */
612 skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), flen);
613 } else {
614 /* append frame payload to the end of the fragment
615 * cache skb */
616 skb_copy_from_linear_data_offset(skb, hdrlen,
617 skb_put(frag_skb, flen), flen);
618 }
619 dev_kfree_skb_any(skb);
620 skb = NULL;
621
622 if (fc & IEEE80211_FCTL_MOREFRAGS) {
623 /* more fragments expected - leave the skb in fragment
624 * cache for now; it will be delivered to upper layers
625 * after all fragments have been received */
626 goto rx_exit;
627 }
628
629 /* this was the last fragment and the frame will be
630 * delivered, so remove skb from fragment cache */
631 skb = frag_skb;
632 hdr = (struct ieee80211_hdr_4addr *)skb->data;
633 ieee80211_frag_cache_invalidate(ieee, hdr);
634 }
635
636 /* skb: hdr + (possible reassembled) full MSDU payload; possibly still
637 * encrypted/authenticated */
638 if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted &&
639 ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
640 goto rx_dropped;
641
642 hdr = (struct ieee80211_hdr_4addr *)skb->data;
643 if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) {
644 if ( /*ieee->ieee802_1x && */
645 ieee80211_is_eapol_frame(ieee, skb)) {
646 /* pass unencrypted EAPOL frames even if encryption is
647 * configured */
648 } else {
649 IEEE80211_DEBUG_DROP("encryption configured, but RX "
650 "frame not encrypted (SA=%pM)\n",
651 hdr->addr2);
652 goto rx_dropped;
653 }
654 }
655
656 if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep &&
657 !ieee80211_is_eapol_frame(ieee, skb)) {
658 IEEE80211_DEBUG_DROP("dropped unencrypted RX data "
659 "frame from %pM (drop_unencrypted=1)\n",
660 hdr->addr2);
661 goto rx_dropped;
662 }
663
664 /* If the frame was decrypted in hardware, we may need to strip off
665 * any security data (IV, ICV, etc) that was left behind */
666 if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) &&
667 ieee->host_strip_iv_icv) {
668 int trimlen = 0;
669
670 /* Top two-bits of byte 3 are the key index */
671 if (skb->len >= hdrlen + 3)
672 keyidx = skb->data[hdrlen + 3] >> 6;
673
674 /* To strip off any security data which appears before the
675 * payload, we simply increase hdrlen (as the header gets
676 * chopped off immediately below). For the security data which
677 * appears after the payload, we use skb_trim. */
678
679 switch (ieee->sec.encode_alg[keyidx]) {
680 case SEC_ALG_WEP:
681 /* 4 byte IV */
682 hdrlen += 4;
683 /* 4 byte ICV */
684 trimlen = 4;
685 break;
686 case SEC_ALG_TKIP:
687 /* 4 byte IV, 4 byte ExtIV */
688 hdrlen += 8;
689 /* 8 byte MIC, 4 byte ICV */
690 trimlen = 12;
691 break;
692 case SEC_ALG_CCMP:
693 /* 8 byte CCMP header */
694 hdrlen += 8;
695 /* 8 byte MIC */
696 trimlen = 8;
697 break;
698 }
699
700 if (skb->len < trimlen)
701 goto rx_dropped;
702
703 __skb_trim(skb, skb->len - trimlen);
704
705 if (skb->len < hdrlen)
706 goto rx_dropped;
707 }
708
709 /* skb: hdr + (possible reassembled) full plaintext payload */
710
711 payload = skb->data + hdrlen;
712 ethertype = (payload[6] << 8) | payload[7];
713
714#ifdef NOT_YET
715 /* If IEEE 802.1X is used, check whether the port is authorized to send
716 * the received frame. */
717 if (ieee->ieee802_1x && ieee->iw_mode == IW_MODE_MASTER) {
718 if (ethertype == ETH_P_PAE) {
719 printk(KERN_DEBUG "%s: RX: IEEE 802.1X frame\n",
720 dev->name);
721 if (ieee->hostapd && ieee->apdev) {
722 /* Send IEEE 802.1X frames to the user
723 * space daemon for processing */
724 prism2_rx_80211(ieee->apdev, skb, rx_stats,
725 PRISM2_RX_MGMT);
726 ieee->apdevstats.rx_packets++;
727 ieee->apdevstats.rx_bytes += skb->len;
728 goto rx_exit;
729 }
730 } else if (!frame_authorized) {
731 printk(KERN_DEBUG "%s: dropped frame from "
732 "unauthorized port (IEEE 802.1X): "
733 "ethertype=0x%04x\n", dev->name, ethertype);
734 goto rx_dropped;
735 }
736 }
737#endif
738
739 /* convert hdr + possible LLC headers into Ethernet header */
740 if (skb->len - hdrlen >= 8 &&
741 ((memcmp(payload, rfc1042_header, SNAP_SIZE) == 0 &&
742 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
743 memcmp(payload, bridge_tunnel_header, SNAP_SIZE) == 0)) {
744 /* remove RFC1042 or Bridge-Tunnel encapsulation and
745 * replace EtherType */
746 skb_pull(skb, hdrlen + SNAP_SIZE);
747 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
748 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
749 } else {
750 __be16 len;
751 /* Leave Ethernet header part of hdr and full payload */
752 skb_pull(skb, hdrlen);
753 len = htons(skb->len);
754 memcpy(skb_push(skb, 2), &len, 2);
755 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
756 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
757 }
758
759#ifdef NOT_YET
760 if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
761 IEEE80211_FCTL_TODS) && skb->len >= ETH_HLEN + ETH_ALEN) {
762 /* Non-standard frame: get addr4 from its bogus location after
763 * the payload */
764 skb_copy_to_linear_data_offset(skb, ETH_ALEN,
765 skb->data + skb->len - ETH_ALEN,
766 ETH_ALEN);
767 skb_trim(skb, skb->len - ETH_ALEN);
768 }
769#endif
770
771 stats->rx_packets++;
772 stats->rx_bytes += skb->len;
773
774#ifdef NOT_YET
775 if (ieee->iw_mode == IW_MODE_MASTER && !wds && ieee->ap->bridge_packets) {
776 if (dst[0] & 0x01) {
777 /* copy multicast frame both to the higher layers and
778 * to the wireless media */
779 ieee->ap->bridged_multicast++;
780 skb2 = skb_clone(skb, GFP_ATOMIC);
781 if (skb2 == NULL)
782 printk(KERN_DEBUG "%s: skb_clone failed for "
783 "multicast frame\n", dev->name);
784 } else if (hostap_is_sta_assoc(ieee->ap, dst)) {
785 /* send frame directly to the associated STA using
786 * wireless media and not passing to higher layers */
787 ieee->ap->bridged_unicast++;
788 skb2 = skb;
789 skb = NULL;
790 }
791 }
792
793 if (skb2 != NULL) {
794 /* send to wireless media */
795 skb2->dev = dev;
796 skb2->protocol = htons(ETH_P_802_3);
797 skb_reset_mac_header(skb2);
798 skb_reset_network_header(skb2);
799 /* skb2->network_header += ETH_HLEN; */
800 dev_queue_xmit(skb2);
801 }
802#endif
803
804 if (skb) {
805 skb->protocol = eth_type_trans(skb, dev);
806 memset(skb->cb, 0, sizeof(skb->cb));
807 skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
808 if (netif_rx(skb) == NET_RX_DROP) {
809 /* netif_rx always succeeds, but it might drop
810 * the packet. If it drops the packet, we log that
811 * in our stats. */
812 IEEE80211_DEBUG_DROP
813 ("RX: netif_rx dropped the packet\n");
814 stats->rx_dropped++;
815 }
816 }
817
818 rx_exit:
819#ifdef NOT_YET
820 if (sta)
821 hostap_handle_sta_release(sta);
822#endif
823 return 1;
824
825 rx_dropped:
826 stats->rx_dropped++;
827
828 /* Returning 0 indicates to caller that we have not handled the SKB--
829 * so it is still allocated and can be used again by underlying
830 * hardware as a DMA target */
831 return 0;
832}
833
834/* Filter out unrelated packets, call ieee80211_rx[_mgt]
835 * This function takes over the skb, it should not be used again after calling
836 * this function. */
837void ieee80211_rx_any(struct ieee80211_device *ieee,
838 struct sk_buff *skb, struct ieee80211_rx_stats *stats)
839{
840 struct ieee80211_hdr_4addr *hdr;
841 int is_packet_for_us;
842 u16 fc;
843
844 if (ieee->iw_mode == IW_MODE_MONITOR) {
845 if (!ieee80211_rx(ieee, skb, stats))
846 dev_kfree_skb_irq(skb);
847 return;
848 }
849
850 if (skb->len < sizeof(struct ieee80211_hdr))
851 goto drop_free;
852
853 hdr = (struct ieee80211_hdr_4addr *)skb->data;
854 fc = le16_to_cpu(hdr->frame_ctl);
855
856 if ((fc & IEEE80211_FCTL_VERS) != 0)
857 goto drop_free;
858
859 switch (fc & IEEE80211_FCTL_FTYPE) {
860 case IEEE80211_FTYPE_MGMT:
861 if (skb->len < sizeof(struct ieee80211_hdr_3addr))
862 goto drop_free;
863 ieee80211_rx_mgt(ieee, hdr, stats);
864 dev_kfree_skb_irq(skb);
865 return;
866 case IEEE80211_FTYPE_DATA:
867 break;
868 case IEEE80211_FTYPE_CTL:
869 return;
870 default:
871 return;
872 }
873
874 is_packet_for_us = 0;
875 switch (ieee->iw_mode) {
876 case IW_MODE_ADHOC:
877 /* our BSS and not from/to DS */
878 if (memcmp(hdr->addr3, ieee->bssid, ETH_ALEN) == 0)
879 if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) {
880 /* promisc: get all */
881 if (ieee->dev->flags & IFF_PROMISC)
882 is_packet_for_us = 1;
883 /* to us */
884 else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0)
885 is_packet_for_us = 1;
886 /* mcast */
887 else if (is_multicast_ether_addr(hdr->addr1))
888 is_packet_for_us = 1;
889 }
890 break;
891 case IW_MODE_INFRA:
892 /* our BSS (== from our AP) and from DS */
893 if (memcmp(hdr->addr2, ieee->bssid, ETH_ALEN) == 0)
894 if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) {
895 /* promisc: get all */
896 if (ieee->dev->flags & IFF_PROMISC)
897 is_packet_for_us = 1;
898 /* to us */
899 else if (memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN) == 0)
900 is_packet_for_us = 1;
901 /* mcast */
902 else if (is_multicast_ether_addr(hdr->addr1)) {
903 /* not our own packet bcasted from AP */
904 if (memcmp(hdr->addr3, ieee->dev->dev_addr, ETH_ALEN))
905 is_packet_for_us = 1;
906 }
907 }
908 break;
909 default:
910 /* ? */
911 break;
912 }
913
914 if (is_packet_for_us)
915 if (!ieee80211_rx(ieee, skb, stats))
916 dev_kfree_skb_irq(skb);
917 return;
918
919drop_free:
920 dev_kfree_skb_irq(skb);
921 ieee->stats.rx_dropped++;
922 return;
923}
924
925#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
926
927static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
928
929/*
930* Make ther structure we read from the beacon packet has
931* the right values
932*/
933static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element
934 *info_element, int sub_type)
935{
936
937 if (info_element->qui_subtype != sub_type)
938 return -1;
939 if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN))
940 return -1;
941 if (info_element->qui_type != QOS_OUI_TYPE)
942 return -1;
943 if (info_element->version != QOS_VERSION_1)
944 return -1;
945
946 return 0;
947}
948
949/*
950 * Parse a QoS parameter element
951 */
952static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info
953 *element_param, struct ieee80211_info_element
954 *info_element)
955{
956 int ret = 0;
957 u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2;
958
959 if ((info_element == NULL) || (element_param == NULL))
960 return -1;
961
962 if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) {
963 memcpy(element_param->info_element.qui, info_element->data,
964 info_element->len);
965 element_param->info_element.elementID = info_element->id;
966 element_param->info_element.length = info_element->len;
967 } else
968 ret = -1;
969 if (ret == 0)
970 ret = ieee80211_verify_qos_info(&element_param->info_element,
971 QOS_OUI_PARAM_SUB_TYPE);
972 return ret;
973}
974
975/*
976 * Parse a QoS information element
977 */
978static int ieee80211_read_qos_info_element(struct
979 ieee80211_qos_information_element
980 *element_info, struct ieee80211_info_element
981 *info_element)
982{
983 int ret = 0;
984 u16 size = sizeof(struct ieee80211_qos_information_element) - 2;
985
986 if (element_info == NULL)
987 return -1;
988 if (info_element == NULL)
989 return -1;
990
991 if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) {
992 memcpy(element_info->qui, info_element->data,
993 info_element->len);
994 element_info->elementID = info_element->id;
995 element_info->length = info_element->len;
996 } else
997 ret = -1;
998
999 if (ret == 0)
1000 ret = ieee80211_verify_qos_info(element_info,
1001 QOS_OUI_INFO_SUB_TYPE);
1002 return ret;
1003}
1004
1005/*
1006 * Write QoS parameters from the ac parameters.
1007 */
1008static int ieee80211_qos_convert_ac_to_parameters(struct
1009 ieee80211_qos_parameter_info
1010 *param_elm, struct
1011 ieee80211_qos_parameters
1012 *qos_param)
1013{
1014 int rc = 0;
1015 int i;
1016 struct ieee80211_qos_ac_parameter *ac_params;
1017 u32 txop;
1018 u8 cw_min;
1019 u8 cw_max;
1020
1021 for (i = 0; i < QOS_QUEUE_NUM; i++) {
1022 ac_params = &(param_elm->ac_params_record[i]);
1023
1024 qos_param->aifs[i] = (ac_params->aci_aifsn) & 0x0F;
1025 qos_param->aifs[i] -= (qos_param->aifs[i] < 2) ? 0 : 2;
1026
1027 cw_min = ac_params->ecw_min_max & 0x0F;
1028 qos_param->cw_min[i] = cpu_to_le16((1 << cw_min) - 1);
1029
1030 cw_max = (ac_params->ecw_min_max & 0xF0) >> 4;
1031 qos_param->cw_max[i] = cpu_to_le16((1 << cw_max) - 1);
1032
1033 qos_param->flag[i] =
1034 (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
1035
1036 txop = le16_to_cpu(ac_params->tx_op_limit) * 32;
1037 qos_param->tx_op_limit[i] = cpu_to_le16(txop);
1038 }
1039 return rc;
1040}
1041
1042/*
1043 * we have a generic data element which it may contain QoS information or
1044 * parameters element. check the information element length to decide
1045 * which type to read
1046 */
1047static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element
1048 *info_element,
1049 struct ieee80211_network *network)
1050{
1051 int rc = 0;
1052 struct ieee80211_qos_parameters *qos_param = NULL;
1053 struct ieee80211_qos_information_element qos_info_element;
1054
1055 rc = ieee80211_read_qos_info_element(&qos_info_element, info_element);
1056
1057 if (rc == 0) {
1058 network->qos_data.param_count = qos_info_element.ac_info & 0x0F;
1059 network->flags |= NETWORK_HAS_QOS_INFORMATION;
1060 } else {
1061 struct ieee80211_qos_parameter_info param_element;
1062
1063 rc = ieee80211_read_qos_param_element(&param_element,
1064 info_element);
1065 if (rc == 0) {
1066 qos_param = &(network->qos_data.parameters);
1067 ieee80211_qos_convert_ac_to_parameters(&param_element,
1068 qos_param);
1069 network->flags |= NETWORK_HAS_QOS_PARAMETERS;
1070 network->qos_data.param_count =
1071 param_element.info_element.ac_info & 0x0F;
1072 }
1073 }
1074
1075 if (rc == 0) {
1076 IEEE80211_DEBUG_QOS("QoS is supported\n");
1077 network->qos_data.supported = 1;
1078 }
1079 return rc;
1080}
1081
1082#ifdef CONFIG_IEEE80211_DEBUG
1083#define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x
1084
1085static const char *get_info_element_string(u16 id)
1086{
1087 switch (id) {
1088 MFIE_STRING(SSID);
1089 MFIE_STRING(RATES);
1090 MFIE_STRING(FH_SET);
1091 MFIE_STRING(DS_SET);
1092 MFIE_STRING(CF_SET);
1093 MFIE_STRING(TIM);
1094 MFIE_STRING(IBSS_SET);
1095 MFIE_STRING(COUNTRY);
1096 MFIE_STRING(HOP_PARAMS);
1097 MFIE_STRING(HOP_TABLE);
1098 MFIE_STRING(REQUEST);
1099 MFIE_STRING(CHALLENGE);
1100 MFIE_STRING(POWER_CONSTRAINT);
1101 MFIE_STRING(POWER_CAPABILITY);
1102 MFIE_STRING(TPC_REQUEST);
1103 MFIE_STRING(TPC_REPORT);
1104 MFIE_STRING(SUPP_CHANNELS);
1105 MFIE_STRING(CSA);
1106 MFIE_STRING(MEASURE_REQUEST);
1107 MFIE_STRING(MEASURE_REPORT);
1108 MFIE_STRING(QUIET);
1109 MFIE_STRING(IBSS_DFS);
1110 MFIE_STRING(ERP_INFO);
1111 MFIE_STRING(RSN);
1112 MFIE_STRING(RATES_EX);
1113 MFIE_STRING(GENERIC);
1114 MFIE_STRING(QOS_PARAMETER);
1115 default:
1116 return "UNKNOWN";
1117 }
1118}
1119#endif
1120
1121static int ieee80211_parse_info_param(struct ieee80211_info_element
1122 *info_element, u16 length,
1123 struct ieee80211_network *network)
1124{
1125 DECLARE_SSID_BUF(ssid);
1126 u8 i;
1127#ifdef CONFIG_IEEE80211_DEBUG
1128 char rates_str[64];
1129 char *p;
1130#endif
1131
1132 while (length >= sizeof(*info_element)) {
1133 if (sizeof(*info_element) + info_element->len > length) {
1134 IEEE80211_DEBUG_MGMT("Info elem: parse failed: "
1135 "info_element->len + 2 > left : "
1136 "info_element->len+2=%zd left=%d, id=%d.\n",
1137 info_element->len +
1138 sizeof(*info_element),
1139 length, info_element->id);
1140 /* We stop processing but don't return an error here
1141 * because some misbehaviour APs break this rule. ie.
1142 * Orinoco AP1000. */
1143 break;
1144 }
1145
1146 switch (info_element->id) {
1147 case MFIE_TYPE_SSID:
1148 network->ssid_len = min(info_element->len,
1149 (u8) IW_ESSID_MAX_SIZE);
1150 memcpy(network->ssid, info_element->data,
1151 network->ssid_len);
1152 if (network->ssid_len < IW_ESSID_MAX_SIZE)
1153 memset(network->ssid + network->ssid_len, 0,
1154 IW_ESSID_MAX_SIZE - network->ssid_len);
1155
1156 IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n",
1157 print_ssid(ssid, network->ssid,
1158 network->ssid_len),
1159 network->ssid_len);
1160 break;
1161
1162 case MFIE_TYPE_RATES:
1163#ifdef CONFIG_IEEE80211_DEBUG
1164 p = rates_str;
1165#endif
1166 network->rates_len = min(info_element->len,
1167 MAX_RATES_LENGTH);
1168 for (i = 0; i < network->rates_len; i++) {
1169 network->rates[i] = info_element->data[i];
1170#ifdef CONFIG_IEEE80211_DEBUG
1171 p += snprintf(p, sizeof(rates_str) -
1172 (p - rates_str), "%02X ",
1173 network->rates[i]);
1174#endif
1175 if (ieee80211_is_ofdm_rate
1176 (info_element->data[i])) {
1177 network->flags |= NETWORK_HAS_OFDM;
1178 if (info_element->data[i] &
1179 IEEE80211_BASIC_RATE_MASK)
1180 network->flags &=
1181 ~NETWORK_HAS_CCK;
1182 }
1183 }
1184
1185 IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n",
1186 rates_str, network->rates_len);
1187 break;
1188
1189 case MFIE_TYPE_RATES_EX:
1190#ifdef CONFIG_IEEE80211_DEBUG
1191 p = rates_str;
1192#endif
1193 network->rates_ex_len = min(info_element->len,
1194 MAX_RATES_EX_LENGTH);
1195 for (i = 0; i < network->rates_ex_len; i++) {
1196 network->rates_ex[i] = info_element->data[i];
1197#ifdef CONFIG_IEEE80211_DEBUG
1198 p += snprintf(p, sizeof(rates_str) -
1199 (p - rates_str), "%02X ",
1200 network->rates[i]);
1201#endif
1202 if (ieee80211_is_ofdm_rate
1203 (info_element->data[i])) {
1204 network->flags |= NETWORK_HAS_OFDM;
1205 if (info_element->data[i] &
1206 IEEE80211_BASIC_RATE_MASK)
1207 network->flags &=
1208 ~NETWORK_HAS_CCK;
1209 }
1210 }
1211
1212 IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
1213 rates_str, network->rates_ex_len);
1214 break;
1215
1216 case MFIE_TYPE_DS_SET:
1217 IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n",
1218 info_element->data[0]);
1219 network->channel = info_element->data[0];
1220 break;
1221
1222 case MFIE_TYPE_FH_SET:
1223 IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n");
1224 break;
1225
1226 case MFIE_TYPE_CF_SET:
1227 IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n");
1228 break;
1229
1230 case MFIE_TYPE_TIM:
1231 network->tim.tim_count = info_element->data[0];
1232 network->tim.tim_period = info_element->data[1];
1233 IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: partially ignored\n");
1234 break;
1235
1236 case MFIE_TYPE_ERP_INFO:
1237 network->erp_value = info_element->data[0];
1238 network->flags |= NETWORK_HAS_ERP_VALUE;
1239 IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n",
1240 network->erp_value);
1241 break;
1242
1243 case MFIE_TYPE_IBSS_SET:
1244 network->atim_window = info_element->data[0];
1245 IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n",
1246 network->atim_window);
1247 break;
1248
1249 case MFIE_TYPE_CHALLENGE:
1250 IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n");
1251 break;
1252
1253 case MFIE_TYPE_GENERIC:
1254 IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n",
1255 info_element->len);
1256 if (!ieee80211_parse_qos_info_param_IE(info_element,
1257 network))
1258 break;
1259
1260 if (info_element->len >= 4 &&
1261 info_element->data[0] == 0x00 &&
1262 info_element->data[1] == 0x50 &&
1263 info_element->data[2] == 0xf2 &&
1264 info_element->data[3] == 0x01) {
1265 network->wpa_ie_len = min(info_element->len + 2,
1266 MAX_WPA_IE_LEN);
1267 memcpy(network->wpa_ie, info_element,
1268 network->wpa_ie_len);
1269 }
1270 break;
1271
1272 case MFIE_TYPE_RSN:
1273 IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n",
1274 info_element->len);
1275 network->rsn_ie_len = min(info_element->len + 2,
1276 MAX_WPA_IE_LEN);
1277 memcpy(network->rsn_ie, info_element,
1278 network->rsn_ie_len);
1279 break;
1280
1281 case MFIE_TYPE_QOS_PARAMETER:
1282 printk(KERN_ERR
1283 "QoS Error need to parse QOS_PARAMETER IE\n");
1284 break;
1285 /* 802.11h */
1286 case MFIE_TYPE_POWER_CONSTRAINT:
1287 network->power_constraint = info_element->data[0];
1288 network->flags |= NETWORK_HAS_POWER_CONSTRAINT;
1289 break;
1290
1291 case MFIE_TYPE_CSA:
1292 network->power_constraint = info_element->data[0];
1293 network->flags |= NETWORK_HAS_CSA;
1294 break;
1295
1296 case MFIE_TYPE_QUIET:
1297 network->quiet.count = info_element->data[0];
1298 network->quiet.period = info_element->data[1];
1299 network->quiet.duration = info_element->data[2];
1300 network->quiet.offset = info_element->data[3];
1301 network->flags |= NETWORK_HAS_QUIET;
1302 break;
1303
1304 case MFIE_TYPE_IBSS_DFS:
1305 if (network->ibss_dfs)
1306 break;
1307 network->ibss_dfs = kmemdup(info_element->data,
1308 info_element->len,
1309 GFP_ATOMIC);
1310 if (!network->ibss_dfs)
1311 return 1;
1312 network->flags |= NETWORK_HAS_IBSS_DFS;
1313 break;
1314
1315 case MFIE_TYPE_TPC_REPORT:
1316 network->tpc_report.transmit_power =
1317 info_element->data[0];
1318 network->tpc_report.link_margin = info_element->data[1];
1319 network->flags |= NETWORK_HAS_TPC_REPORT;
1320 break;
1321
1322 default:
1323 IEEE80211_DEBUG_MGMT
1324 ("Unsupported info element: %s (%d)\n",
1325 get_info_element_string(info_element->id),
1326 info_element->id);
1327 break;
1328 }
1329
1330 length -= sizeof(*info_element) + info_element->len;
1331 info_element =
1332 (struct ieee80211_info_element *)&info_element->
1333 data[info_element->len];
1334 }
1335
1336 return 0;
1337}
1338
1339static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response
1340 *frame, struct ieee80211_rx_stats *stats)
1341{
1342 struct ieee80211_network network_resp = {
1343 .ibss_dfs = NULL,
1344 };
1345 struct ieee80211_network *network = &network_resp;
1346 struct net_device *dev = ieee->dev;
1347
1348 network->flags = 0;
1349 network->qos_data.active = 0;
1350 network->qos_data.supported = 0;
1351 network->qos_data.param_count = 0;
1352 network->qos_data.old_param_count = 0;
1353
1354 //network->atim_window = le16_to_cpu(frame->aid) & (0x3FFF);
1355 network->atim_window = le16_to_cpu(frame->aid);
1356 network->listen_interval = le16_to_cpu(frame->status);
1357 memcpy(network->bssid, frame->header.addr3, ETH_ALEN);
1358 network->capability = le16_to_cpu(frame->capability);
1359 network->last_scanned = jiffies;
1360 network->rates_len = network->rates_ex_len = 0;
1361 network->last_associate = 0;
1362 network->ssid_len = 0;
1363 network->erp_value =
1364 (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0;
1365
1366 if (stats->freq == IEEE80211_52GHZ_BAND) {
1367 /* for A band (No DS info) */
1368 network->channel = stats->received_channel;
1369 } else
1370 network->flags |= NETWORK_HAS_CCK;
1371
1372 network->wpa_ie_len = 0;
1373 network->rsn_ie_len = 0;
1374
1375 if (ieee80211_parse_info_param
1376 (frame->info_element, stats->len - sizeof(*frame), network))
1377 return 1;
1378
1379 network->mode = 0;
1380 if (stats->freq == IEEE80211_52GHZ_BAND)
1381 network->mode = IEEE_A;
1382 else {
1383 if (network->flags & NETWORK_HAS_OFDM)
1384 network->mode |= IEEE_G;
1385 if (network->flags & NETWORK_HAS_CCK)
1386 network->mode |= IEEE_B;
1387 }
1388
1389 memcpy(&network->stats, stats, sizeof(network->stats));
1390
1391 if (ieee->handle_assoc_response != NULL)
1392 ieee->handle_assoc_response(dev, frame, network);
1393
1394 return 0;
1395}
1396
1397/***************************************************/
1398
1399static int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee80211_probe_response
1400 *beacon,
1401 struct ieee80211_network *network,
1402 struct ieee80211_rx_stats *stats)
1403{
1404 DECLARE_SSID_BUF(ssid);
1405
1406 network->qos_data.active = 0;
1407 network->qos_data.supported = 0;
1408 network->qos_data.param_count = 0;
1409 network->qos_data.old_param_count = 0;
1410
1411 /* Pull out fixed field data */
1412 memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
1413 network->capability = le16_to_cpu(beacon->capability);
1414 network->last_scanned = jiffies;
1415 network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
1416 network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
1417 network->beacon_interval = le16_to_cpu(beacon->beacon_interval);
1418 /* Where to pull this? beacon->listen_interval; */
1419 network->listen_interval = 0x0A;
1420 network->rates_len = network->rates_ex_len = 0;
1421 network->last_associate = 0;
1422 network->ssid_len = 0;
1423 network->flags = 0;
1424 network->atim_window = 0;
1425 network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ?
1426 0x3 : 0x0;
1427
1428 if (stats->freq == IEEE80211_52GHZ_BAND) {
1429 /* for A band (No DS info) */
1430 network->channel = stats->received_channel;
1431 } else
1432 network->flags |= NETWORK_HAS_CCK;
1433
1434 network->wpa_ie_len = 0;
1435 network->rsn_ie_len = 0;
1436
1437 if (ieee80211_parse_info_param
1438 (beacon->info_element, stats->len - sizeof(*beacon), network))
1439 return 1;
1440
1441 network->mode = 0;
1442 if (stats->freq == IEEE80211_52GHZ_BAND)
1443 network->mode = IEEE_A;
1444 else {
1445 if (network->flags & NETWORK_HAS_OFDM)
1446 network->mode |= IEEE_G;
1447 if (network->flags & NETWORK_HAS_CCK)
1448 network->mode |= IEEE_B;
1449 }
1450
1451 if (network->mode == 0) {
1452 IEEE80211_DEBUG_SCAN("Filtered out '%s (%pM)' "
1453 "network.\n",
1454 print_ssid(ssid, network->ssid,
1455 network->ssid_len),
1456 network->bssid);
1457 return 1;
1458 }
1459
1460 memcpy(&network->stats, stats, sizeof(network->stats));
1461
1462 return 0;
1463}
1464
1465static inline int is_same_network(struct ieee80211_network *src,
1466 struct ieee80211_network *dst)
1467{
1468 /* A network is only a duplicate if the channel, BSSID, and ESSID
1469 * all match. We treat all <hidden> with the same BSSID and channel
1470 * as one network */
1471 return ((src->ssid_len == dst->ssid_len) &&
1472 (src->channel == dst->channel) &&
1473 !compare_ether_addr(src->bssid, dst->bssid) &&
1474 !memcmp(src->ssid, dst->ssid, src->ssid_len));
1475}
1476
1477static void update_network(struct ieee80211_network *dst,
1478 struct ieee80211_network *src)
1479{
1480 int qos_active;
1481 u8 old_param;
1482
1483 ieee80211_network_reset(dst);
1484 dst->ibss_dfs = src->ibss_dfs;
1485
1486 /* We only update the statistics if they were created by receiving
1487 * the network information on the actual channel the network is on.
1488 *
1489 * This keeps beacons received on neighbor channels from bringing
1490 * down the signal level of an AP. */
1491 if (dst->channel == src->stats.received_channel)
1492 memcpy(&dst->stats, &src->stats,
1493 sizeof(struct ieee80211_rx_stats));
1494 else
1495 IEEE80211_DEBUG_SCAN("Network %pM info received "
1496 "off channel (%d vs. %d)\n", src->bssid,
1497 dst->channel, src->stats.received_channel);
1498
1499 dst->capability = src->capability;
1500 memcpy(dst->rates, src->rates, src->rates_len);
1501 dst->rates_len = src->rates_len;
1502 memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
1503 dst->rates_ex_len = src->rates_ex_len;
1504
1505 dst->mode = src->mode;
1506 dst->flags = src->flags;
1507 dst->time_stamp[0] = src->time_stamp[0];
1508 dst->time_stamp[1] = src->time_stamp[1];
1509
1510 dst->beacon_interval = src->beacon_interval;
1511 dst->listen_interval = src->listen_interval;
1512 dst->atim_window = src->atim_window;
1513 dst->erp_value = src->erp_value;
1514 dst->tim = src->tim;
1515
1516 memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
1517 dst->wpa_ie_len = src->wpa_ie_len;
1518 memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len);
1519 dst->rsn_ie_len = src->rsn_ie_len;
1520
1521 dst->last_scanned = jiffies;
1522 qos_active = src->qos_data.active;
1523 old_param = dst->qos_data.old_param_count;
1524 if (dst->flags & NETWORK_HAS_QOS_MASK)
1525 memcpy(&dst->qos_data, &src->qos_data,
1526 sizeof(struct ieee80211_qos_data));
1527 else {
1528 dst->qos_data.supported = src->qos_data.supported;
1529 dst->qos_data.param_count = src->qos_data.param_count;
1530 }
1531
1532 if (dst->qos_data.supported == 1) {
1533 if (dst->ssid_len)
1534 IEEE80211_DEBUG_QOS
1535 ("QoS the network %s is QoS supported\n",
1536 dst->ssid);
1537 else
1538 IEEE80211_DEBUG_QOS
1539 ("QoS the network is QoS supported\n");
1540 }
1541 dst->qos_data.active = qos_active;
1542 dst->qos_data.old_param_count = old_param;
1543
1544 /* dst->last_associate is not overwritten */
1545}
1546
1547static inline int is_beacon(__le16 fc)
1548{
1549 return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON);
1550}
1551
1552static void ieee80211_process_probe_response(struct ieee80211_device
1553 *ieee, struct
1554 ieee80211_probe_response
1555 *beacon, struct ieee80211_rx_stats
1556 *stats)
1557{
1558 struct net_device *dev = ieee->dev;
1559 struct ieee80211_network network = {
1560 .ibss_dfs = NULL,
1561 };
1562 struct ieee80211_network *target;
1563 struct ieee80211_network *oldest = NULL;
1564#ifdef CONFIG_IEEE80211_DEBUG
1565 struct ieee80211_info_element *info_element = beacon->info_element;
1566#endif
1567 unsigned long flags;
1568 DECLARE_SSID_BUF(ssid);
1569
1570 IEEE80211_DEBUG_SCAN("'%s' (%pM"
1571 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
1572 print_ssid(ssid, info_element->data, info_element->len),
1573 beacon->header.addr3,
1574 (beacon->capability & cpu_to_le16(1 << 0xf)) ? '1' : '0',
1575 (beacon->capability & cpu_to_le16(1 << 0xe)) ? '1' : '0',
1576 (beacon->capability & cpu_to_le16(1 << 0xd)) ? '1' : '0',
1577 (beacon->capability & cpu_to_le16(1 << 0xc)) ? '1' : '0',
1578 (beacon->capability & cpu_to_le16(1 << 0xb)) ? '1' : '0',
1579 (beacon->capability & cpu_to_le16(1 << 0xa)) ? '1' : '0',
1580 (beacon->capability & cpu_to_le16(1 << 0x9)) ? '1' : '0',
1581 (beacon->capability & cpu_to_le16(1 << 0x8)) ? '1' : '0',
1582 (beacon->capability & cpu_to_le16(1 << 0x7)) ? '1' : '0',
1583 (beacon->capability & cpu_to_le16(1 << 0x6)) ? '1' : '0',
1584 (beacon->capability & cpu_to_le16(1 << 0x5)) ? '1' : '0',
1585 (beacon->capability & cpu_to_le16(1 << 0x4)) ? '1' : '0',
1586 (beacon->capability & cpu_to_le16(1 << 0x3)) ? '1' : '0',
1587 (beacon->capability & cpu_to_le16(1 << 0x2)) ? '1' : '0',
1588 (beacon->capability & cpu_to_le16(1 << 0x1)) ? '1' : '0',
1589 (beacon->capability & cpu_to_le16(1 << 0x0)) ? '1' : '0');
1590
1591 if (ieee80211_network_init(ieee, beacon, &network, stats)) {
1592 IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n",
1593 print_ssid(ssid, info_element->data,
1594 info_element->len),
1595 beacon->header.addr3,
1596 is_beacon(beacon->header.frame_ctl) ?
1597 "BEACON" : "PROBE RESPONSE");
1598 return;
1599 }
1600
1601 /* The network parsed correctly -- so now we scan our known networks
1602 * to see if we can find it in our list.
1603 *
1604 * NOTE: This search is definitely not optimized. Once its doing
1605 * the "right thing" we'll optimize it for efficiency if
1606 * necessary */
1607
1608 /* Search for this entry in the list and update it if it is
1609 * already there. */
1610
1611 spin_lock_irqsave(&ieee->lock, flags);
1612
1613 list_for_each_entry(target, &ieee->network_list, list) {
1614 if (is_same_network(target, &network))
1615 break;
1616
1617 if ((oldest == NULL) ||
1618 (target->last_scanned < oldest->last_scanned))
1619 oldest = target;
1620 }
1621
1622 /* If we didn't find a match, then get a new network slot to initialize
1623 * with this beacon's information */
1624 if (&target->list == &ieee->network_list) {
1625 if (list_empty(&ieee->network_free_list)) {
1626 /* If there are no more slots, expire the oldest */
1627 list_del(&oldest->list);
1628 target = oldest;
1629 IEEE80211_DEBUG_SCAN("Expired '%s' (%pM) from "
1630 "network list.\n",
1631 print_ssid(ssid, target->ssid,
1632 target->ssid_len),
1633 target->bssid);
1634 ieee80211_network_reset(target);
1635 } else {
1636 /* Otherwise just pull from the free list */
1637 target = list_entry(ieee->network_free_list.next,
1638 struct ieee80211_network, list);
1639 list_del(ieee->network_free_list.next);
1640 }
1641
1642#ifdef CONFIG_IEEE80211_DEBUG
1643 IEEE80211_DEBUG_SCAN("Adding '%s' (%pM) via %s.\n",
1644 print_ssid(ssid, network.ssid,
1645 network.ssid_len),
1646 network.bssid,
1647 is_beacon(beacon->header.frame_ctl) ?
1648 "BEACON" : "PROBE RESPONSE");
1649#endif
1650 memcpy(target, &network, sizeof(*target));
1651 network.ibss_dfs = NULL;
1652 list_add_tail(&target->list, &ieee->network_list);
1653 } else {
1654 IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n",
1655 print_ssid(ssid, target->ssid,
1656 target->ssid_len),
1657 target->bssid,
1658 is_beacon(beacon->header.frame_ctl) ?
1659 "BEACON" : "PROBE RESPONSE");
1660 update_network(target, &network);
1661 network.ibss_dfs = NULL;
1662 }
1663
1664 spin_unlock_irqrestore(&ieee->lock, flags);
1665
1666 if (is_beacon(beacon->header.frame_ctl)) {
1667 if (ieee->handle_beacon != NULL)
1668 ieee->handle_beacon(dev, beacon, target);
1669 } else {
1670 if (ieee->handle_probe_response != NULL)
1671 ieee->handle_probe_response(dev, beacon, target);
1672 }
1673}
1674
1675void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1676 struct ieee80211_hdr_4addr *header,
1677 struct ieee80211_rx_stats *stats)
1678{
1679 switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) {
1680 case IEEE80211_STYPE_ASSOC_RESP:
1681 IEEE80211_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n",
1682 WLAN_FC_GET_STYPE(le16_to_cpu
1683 (header->frame_ctl)));
1684 ieee80211_handle_assoc_resp(ieee,
1685 (struct ieee80211_assoc_response *)
1686 header, stats);
1687 break;
1688
1689 case IEEE80211_STYPE_REASSOC_RESP:
1690 IEEE80211_DEBUG_MGMT("received REASSOCIATION RESPONSE (%d)\n",
1691 WLAN_FC_GET_STYPE(le16_to_cpu
1692 (header->frame_ctl)));
1693 break;
1694
1695 case IEEE80211_STYPE_PROBE_REQ:
1696 IEEE80211_DEBUG_MGMT("received auth (%d)\n",
1697 WLAN_FC_GET_STYPE(le16_to_cpu
1698 (header->frame_ctl)));
1699
1700 if (ieee->handle_probe_request != NULL)
1701 ieee->handle_probe_request(ieee->dev,
1702 (struct
1703 ieee80211_probe_request *)
1704 header, stats);
1705 break;
1706
1707 case IEEE80211_STYPE_PROBE_RESP:
1708 IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
1709 WLAN_FC_GET_STYPE(le16_to_cpu
1710 (header->frame_ctl)));
1711 IEEE80211_DEBUG_SCAN("Probe response\n");
1712 ieee80211_process_probe_response(ieee,
1713 (struct
1714 ieee80211_probe_response *)
1715 header, stats);
1716 break;
1717
1718 case IEEE80211_STYPE_BEACON:
1719 IEEE80211_DEBUG_MGMT("received BEACON (%d)\n",
1720 WLAN_FC_GET_STYPE(le16_to_cpu
1721 (header->frame_ctl)));
1722 IEEE80211_DEBUG_SCAN("Beacon\n");
1723 ieee80211_process_probe_response(ieee,
1724 (struct
1725 ieee80211_probe_response *)
1726 header, stats);
1727 break;
1728 case IEEE80211_STYPE_AUTH:
1729
1730 IEEE80211_DEBUG_MGMT("received auth (%d)\n",
1731 WLAN_FC_GET_STYPE(le16_to_cpu
1732 (header->frame_ctl)));
1733
1734 if (ieee->handle_auth != NULL)
1735 ieee->handle_auth(ieee->dev,
1736 (struct ieee80211_auth *)header);
1737 break;
1738
1739 case IEEE80211_STYPE_DISASSOC:
1740 if (ieee->handle_disassoc != NULL)
1741 ieee->handle_disassoc(ieee->dev,
1742 (struct ieee80211_disassoc *)
1743 header);
1744 break;
1745
1746 case IEEE80211_STYPE_ACTION:
1747 IEEE80211_DEBUG_MGMT("ACTION\n");
1748 if (ieee->handle_action)
1749 ieee->handle_action(ieee->dev,
1750 (struct ieee80211_action *)
1751 header, stats);
1752 break;
1753
1754 case IEEE80211_STYPE_REASSOC_REQ:
1755 IEEE80211_DEBUG_MGMT("received reassoc (%d)\n",
1756 WLAN_FC_GET_STYPE(le16_to_cpu
1757 (header->frame_ctl)));
1758
1759 IEEE80211_DEBUG_MGMT("%s: IEEE80211_REASSOC_REQ received\n",
1760 ieee->dev->name);
1761 if (ieee->handle_reassoc_request != NULL)
1762 ieee->handle_reassoc_request(ieee->dev,
1763 (struct ieee80211_reassoc_request *)
1764 header);
1765 break;
1766
1767 case IEEE80211_STYPE_ASSOC_REQ:
1768 IEEE80211_DEBUG_MGMT("received assoc (%d)\n",
1769 WLAN_FC_GET_STYPE(le16_to_cpu
1770 (header->frame_ctl)));
1771
1772 IEEE80211_DEBUG_MGMT("%s: IEEE80211_ASSOC_REQ received\n",
1773 ieee->dev->name);
1774 if (ieee->handle_assoc_request != NULL)
1775 ieee->handle_assoc_request(ieee->dev);
1776 break;
1777
1778 case IEEE80211_STYPE_DEAUTH:
1779 IEEE80211_DEBUG_MGMT("DEAUTH\n");
1780 if (ieee->handle_deauth != NULL)
1781 ieee->handle_deauth(ieee->dev,
1782 (struct ieee80211_deauth *)
1783 header);
1784 break;
1785 default:
1786 IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n",
1787 WLAN_FC_GET_STYPE(le16_to_cpu
1788 (header->frame_ctl)));
1789 IEEE80211_DEBUG_MGMT("%s: Unknown management packet: %d\n",
1790 ieee->dev->name,
1791 WLAN_FC_GET_STYPE(le16_to_cpu
1792 (header->frame_ctl)));
1793 break;
1794 }
1795}
1796
1797EXPORT_SYMBOL_GPL(ieee80211_rx_any);
1798EXPORT_SYMBOL(ieee80211_rx_mgt);
1799EXPORT_SYMBOL(ieee80211_rx);
diff --git a/drivers/net/wireless/ipw2x00/libipw_tx.c b/drivers/net/wireless/ipw2x00/libipw_tx.c
new file mode 100644
index 000000000000..f78f57e8844a
--- /dev/null
+++ b/drivers/net/wireless/ipw2x00/libipw_tx.c
@@ -0,0 +1,546 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25******************************************************************************/
26#include <linux/compiler.h>
27#include <linux/errno.h>
28#include <linux/if_arp.h>
29#include <linux/in6.h>
30#include <linux/in.h>
31#include <linux/ip.h>
32#include <linux/kernel.h>
33#include <linux/module.h>
34#include <linux/netdevice.h>
35#include <linux/proc_fs.h>
36#include <linux/skbuff.h>
37#include <linux/slab.h>
38#include <linux/tcp.h>
39#include <linux/types.h>
40#include <linux/wireless.h>
41#include <linux/etherdevice.h>
42#include <asm/uaccess.h>
43
44#include <net/ieee80211.h>
45
46/*
47
48802.11 Data Frame
49
50 ,-------------------------------------------------------------------.
51Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
52 |------|------|---------|---------|---------|------|---------|------|
53Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
54 | | tion | (BSSID) | | | ence | data | |
55 `--------------------------------------------------| |------'
56Total: 28 non-data bytes `----.----'
57 |
58 .- 'Frame data' expands, if WEP enabled, to <----------'
59 |
60 V
61 ,-----------------------.
62Bytes | 4 | 0-2296 | 4 |
63 |-----|-----------|-----|
64Desc. | IV | Encrypted | ICV |
65 | | Packet | |
66 `-----| |-----'
67 `-----.-----'
68 |
69 .- 'Encrypted Packet' expands to
70 |
71 V
72 ,---------------------------------------------------.
73Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
74 |------|------|---------|----------|------|---------|
75Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
76 | DSAP | SSAP | | | | Packet |
77 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
78 `----------------------------------------------------
79Total: 8 non-data bytes
80
81802.3 Ethernet Data Frame
82
83 ,-----------------------------------------.
84Bytes | 6 | 6 | 2 | Variable | 4 |
85 |-------|-------|------|-----------|------|
86Desc. | Dest. | Source| Type | IP Packet | fcs |
87 | MAC | MAC | | | |
88 `-----------------------------------------'
89Total: 18 non-data bytes
90
91In the event that fragmentation is required, the incoming payload is split into
92N parts of size ieee->fts. The first fragment contains the SNAP header and the
93remaining packets are just data.
94
95If encryption is enabled, each fragment payload size is reduced by enough space
96to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
97So if you have 1500 bytes of payload with ieee->fts set to 500 without
98encryption it will take 3 frames. With WEP it will take 4 frames as the
99payload of each frame is reduced to 492 bytes.
100
101* SKB visualization
102*
103* ,- skb->data
104* |
105* | ETHERNET HEADER ,-<-- PAYLOAD
106* | | 14 bytes from skb->data
107* | 2 bytes for Type --> ,T. | (sizeof ethhdr)
108* | | | |
109* |,-Dest.--. ,--Src.---. | | |
110* | 6 bytes| | 6 bytes | | | |
111* v | | | | | |
112* 0 | v 1 | v | v 2
113* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
114* ^ | ^ | ^ |
115* | | | | | |
116* | | | | `T' <---- 2 bytes for Type
117* | | | |
118* | | '---SNAP--' <-------- 6 bytes for SNAP
119* | |
120* `-IV--' <-------------------- 4 bytes for IV (WEP)
121*
122* SNAP HEADER
123*
124*/
125
126static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
127static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
128
129static int ieee80211_copy_snap(u8 * data, __be16 h_proto)
130{
131 struct ieee80211_snap_hdr *snap;
132 u8 *oui;
133
134 snap = (struct ieee80211_snap_hdr *)data;
135 snap->dsap = 0xaa;
136 snap->ssap = 0xaa;
137 snap->ctrl = 0x03;
138
139 if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
140 oui = P802_1H_OUI;
141 else
142 oui = RFC1042_OUI;
143 snap->oui[0] = oui[0];
144 snap->oui[1] = oui[1];
145 snap->oui[2] = oui[2];
146
147 memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
148
149 return SNAP_SIZE + sizeof(u16);
150}
151
152static int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
153 struct sk_buff *frag, int hdr_len)
154{
155 struct lib80211_crypt_data *crypt =
156 ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
157 int res;
158
159 if (crypt == NULL)
160 return -1;
161
162 /* To encrypt, frame format is:
163 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
164 atomic_inc(&crypt->refcnt);
165 res = 0;
166 if (crypt->ops && crypt->ops->encrypt_mpdu)
167 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
168
169 atomic_dec(&crypt->refcnt);
170 if (res < 0) {
171 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
172 ieee->dev->name, frag->len);
173 ieee->ieee_stats.tx_discards++;
174 return -1;
175 }
176
177 return 0;
178}
179
180void ieee80211_txb_free(struct ieee80211_txb *txb)
181{
182 int i;
183 if (unlikely(!txb))
184 return;
185 for (i = 0; i < txb->nr_frags; i++)
186 if (txb->fragments[i])
187 dev_kfree_skb_any(txb->fragments[i]);
188 kfree(txb);
189}
190
191static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
192 int headroom, gfp_t gfp_mask)
193{
194 struct ieee80211_txb *txb;
195 int i;
196 txb = kmalloc(sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags),
197 gfp_mask);
198 if (!txb)
199 return NULL;
200
201 memset(txb, 0, sizeof(struct ieee80211_txb));
202 txb->nr_frags = nr_frags;
203 txb->frag_size = txb_size;
204
205 for (i = 0; i < nr_frags; i++) {
206 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
207 gfp_mask);
208 if (unlikely(!txb->fragments[i])) {
209 i--;
210 break;
211 }
212 skb_reserve(txb->fragments[i], headroom);
213 }
214 if (unlikely(i != nr_frags)) {
215 while (i >= 0)
216 dev_kfree_skb_any(txb->fragments[i--]);
217 kfree(txb);
218 return NULL;
219 }
220 return txb;
221}
222
223static int ieee80211_classify(struct sk_buff *skb)
224{
225 struct ethhdr *eth;
226 struct iphdr *ip;
227
228 eth = (struct ethhdr *)skb->data;
229 if (eth->h_proto != htons(ETH_P_IP))
230 return 0;
231
232 ip = ip_hdr(skb);
233 switch (ip->tos & 0xfc) {
234 case 0x20:
235 return 2;
236 case 0x40:
237 return 1;
238 case 0x60:
239 return 3;
240 case 0x80:
241 return 4;
242 case 0xa0:
243 return 5;
244 case 0xc0:
245 return 6;
246 case 0xe0:
247 return 7;
248 default:
249 return 0;
250 }
251}
252
253/* Incoming skb is converted to a txb which consists of
254 * a block of 802.11 fragment packets (stored as skbs) */
255int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
256{
257 struct ieee80211_device *ieee = netdev_priv(dev);
258 struct ieee80211_txb *txb = NULL;
259 struct ieee80211_hdr_3addrqos *frag_hdr;
260 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
261 rts_required;
262 unsigned long flags;
263 struct net_device_stats *stats = &ieee->stats;
264 int encrypt, host_encrypt, host_encrypt_msdu, host_build_iv;
265 __be16 ether_type;
266 int bytes, fc, hdr_len;
267 struct sk_buff *skb_frag;
268 struct ieee80211_hdr_3addrqos header = {/* Ensure zero initialized */
269 .duration_id = 0,
270 .seq_ctl = 0,
271 .qos_ctl = 0
272 };
273 u8 dest[ETH_ALEN], src[ETH_ALEN];
274 struct lib80211_crypt_data *crypt;
275 int priority = skb->priority;
276 int snapped = 0;
277
278 if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
279 return NETDEV_TX_BUSY;
280
281 spin_lock_irqsave(&ieee->lock, flags);
282
283 /* If there is no driver handler to take the TXB, dont' bother
284 * creating it... */
285 if (!ieee->hard_start_xmit) {
286 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
287 goto success;
288 }
289
290 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
291 printk(KERN_WARNING "%s: skb too small (%d).\n",
292 ieee->dev->name, skb->len);
293 goto success;
294 }
295
296 ether_type = ((struct ethhdr *)skb->data)->h_proto;
297
298 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
299
300 encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) &&
301 ieee->sec.encrypt;
302
303 host_encrypt = ieee->host_encrypt && encrypt && crypt;
304 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
305 host_build_iv = ieee->host_build_iv && encrypt && crypt;
306
307 if (!encrypt && ieee->ieee802_1x &&
308 ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) {
309 stats->tx_dropped++;
310 goto success;
311 }
312
313 /* Save source and destination addresses */
314 skb_copy_from_linear_data(skb, dest, ETH_ALEN);
315 skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
316
317 if (host_encrypt || host_build_iv)
318 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
319 IEEE80211_FCTL_PROTECTED;
320 else
321 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
322
323 if (ieee->iw_mode == IW_MODE_INFRA) {
324 fc |= IEEE80211_FCTL_TODS;
325 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
326 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
327 memcpy(header.addr2, src, ETH_ALEN);
328 memcpy(header.addr3, dest, ETH_ALEN);
329 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
330 /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
331 memcpy(header.addr1, dest, ETH_ALEN);
332 memcpy(header.addr2, src, ETH_ALEN);
333 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
334 }
335 hdr_len = IEEE80211_3ADDR_LEN;
336
337 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
338 fc |= IEEE80211_STYPE_QOS_DATA;
339 hdr_len += 2;
340
341 skb->priority = ieee80211_classify(skb);
342 header.qos_ctl |= cpu_to_le16(skb->priority & IEEE80211_QCTL_TID);
343 }
344 header.frame_ctl = cpu_to_le16(fc);
345
346 /* Advance the SKB to the start of the payload */
347 skb_pull(skb, sizeof(struct ethhdr));
348
349 /* Determine total amount of storage required for TXB packets */
350 bytes = skb->len + SNAP_SIZE + sizeof(u16);
351
352 /* Encrypt msdu first on the whole data packet. */
353 if ((host_encrypt || host_encrypt_msdu) &&
354 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
355 int res = 0;
356 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
357 crypt->ops->extra_msdu_postfix_len;
358 struct sk_buff *skb_new = dev_alloc_skb(len);
359
360 if (unlikely(!skb_new))
361 goto failed;
362
363 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
364 memcpy(skb_put(skb_new, hdr_len), &header, hdr_len);
365 snapped = 1;
366 ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
367 ether_type);
368 skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
369 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
370 if (res < 0) {
371 IEEE80211_ERROR("msdu encryption failed\n");
372 dev_kfree_skb_any(skb_new);
373 goto failed;
374 }
375 dev_kfree_skb_any(skb);
376 skb = skb_new;
377 bytes += crypt->ops->extra_msdu_prefix_len +
378 crypt->ops->extra_msdu_postfix_len;
379 skb_pull(skb, hdr_len);
380 }
381
382 if (host_encrypt || ieee->host_open_frag) {
383 /* Determine fragmentation size based on destination (multicast
384 * and broadcast are not fragmented) */
385 if (is_multicast_ether_addr(dest) ||
386 is_broadcast_ether_addr(dest))
387 frag_size = MAX_FRAG_THRESHOLD;
388 else
389 frag_size = ieee->fts;
390
391 /* Determine amount of payload per fragment. Regardless of if
392 * this stack is providing the full 802.11 header, one will
393 * eventually be affixed to this fragment -- so we must account
394 * for it when determining the amount of payload space. */
395 bytes_per_frag = frag_size - hdr_len;
396 if (ieee->config &
397 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
398 bytes_per_frag -= IEEE80211_FCS_LEN;
399
400 /* Each fragment may need to have room for encryptiong
401 * pre/postfix */
402 if (host_encrypt)
403 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
404 crypt->ops->extra_mpdu_postfix_len;
405
406 /* Number of fragments is the total
407 * bytes_per_frag / payload_per_fragment */
408 nr_frags = bytes / bytes_per_frag;
409 bytes_last_frag = bytes % bytes_per_frag;
410 if (bytes_last_frag)
411 nr_frags++;
412 else
413 bytes_last_frag = bytes_per_frag;
414 } else {
415 nr_frags = 1;
416 bytes_per_frag = bytes_last_frag = bytes;
417 frag_size = bytes + hdr_len;
418 }
419
420 rts_required = (frag_size > ieee->rts
421 && ieee->config & CFG_IEEE80211_RTS);
422 if (rts_required)
423 nr_frags++;
424
425 /* When we allocate the TXB we allocate enough space for the reserve
426 * and full fragment bytes (bytes_per_frag doesn't include prefix,
427 * postfix, header, FCS, etc.) */
428 txb = ieee80211_alloc_txb(nr_frags, frag_size,
429 ieee->tx_headroom, GFP_ATOMIC);
430 if (unlikely(!txb)) {
431 printk(KERN_WARNING "%s: Could not allocate TXB\n",
432 ieee->dev->name);
433 goto failed;
434 }
435 txb->encrypted = encrypt;
436 if (host_encrypt)
437 txb->payload_size = frag_size * (nr_frags - 1) +
438 bytes_last_frag;
439 else
440 txb->payload_size = bytes;
441
442 if (rts_required) {
443 skb_frag = txb->fragments[0];
444 frag_hdr =
445 (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
446
447 /*
448 * Set header frame_ctl to the RTS.
449 */
450 header.frame_ctl =
451 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
452 memcpy(frag_hdr, &header, hdr_len);
453
454 /*
455 * Restore header frame_ctl to the original data setting.
456 */
457 header.frame_ctl = cpu_to_le16(fc);
458
459 if (ieee->config &
460 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
461 skb_put(skb_frag, 4);
462
463 txb->rts_included = 1;
464 i = 1;
465 } else
466 i = 0;
467
468 for (; i < nr_frags; i++) {
469 skb_frag = txb->fragments[i];
470
471 if (host_encrypt || host_build_iv)
472 skb_reserve(skb_frag,
473 crypt->ops->extra_mpdu_prefix_len);
474
475 frag_hdr =
476 (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
477 memcpy(frag_hdr, &header, hdr_len);
478
479 /* If this is not the last fragment, then add the MOREFRAGS
480 * bit to the frame control */
481 if (i != nr_frags - 1) {
482 frag_hdr->frame_ctl =
483 cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
484 bytes = bytes_per_frag;
485 } else {
486 /* The last fragment takes the remaining length */
487 bytes = bytes_last_frag;
488 }
489
490 if (i == 0 && !snapped) {
491 ieee80211_copy_snap(skb_put
492 (skb_frag, SNAP_SIZE + sizeof(u16)),
493 ether_type);
494 bytes -= SNAP_SIZE + sizeof(u16);
495 }
496
497 skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
498
499 /* Advance the SKB... */
500 skb_pull(skb, bytes);
501
502 /* Encryption routine will move the header forward in order
503 * to insert the IV between the header and the payload */
504 if (host_encrypt)
505 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
506 else if (host_build_iv) {
507 atomic_inc(&crypt->refcnt);
508 if (crypt->ops->build_iv)
509 crypt->ops->build_iv(skb_frag, hdr_len,
510 ieee->sec.keys[ieee->sec.active_key],
511 ieee->sec.key_sizes[ieee->sec.active_key],
512 crypt->priv);
513 atomic_dec(&crypt->refcnt);
514 }
515
516 if (ieee->config &
517 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
518 skb_put(skb_frag, 4);
519 }
520
521 success:
522 spin_unlock_irqrestore(&ieee->lock, flags);
523
524 dev_kfree_skb_any(skb);
525
526 if (txb) {
527 int ret = (*ieee->hard_start_xmit) (txb, dev, priority);
528 if (ret == 0) {
529 stats->tx_packets++;
530 stats->tx_bytes += txb->payload_size;
531 return 0;
532 }
533
534 ieee80211_txb_free(txb);
535 }
536
537 return 0;
538
539 failed:
540 spin_unlock_irqrestore(&ieee->lock, flags);
541 netif_stop_queue(dev);
542 stats->tx_errors++;
543 return 1;
544}
545
546EXPORT_SYMBOL(ieee80211_txb_free);
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
new file mode 100644
index 000000000000..31ea3abfc327
--- /dev/null
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -0,0 +1,760 @@
1/******************************************************************************
2
3 Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
4
5 Portions of this file are based on the WEP enablement code provided by the
6 Host AP project hostap-drivers v0.1.3
7 Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
8 <j@w1.fi>
9 Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31******************************************************************************/
32
33#include <linux/kmod.h>
34#include <linux/module.h>
35#include <linux/jiffies.h>
36
37#include <net/lib80211.h>
38#include <net/ieee80211.h>
39#include <linux/wireless.h>
40
41static const char *ieee80211_modes[] = {
42 "?", "a", "b", "ab", "g", "ag", "bg", "abg"
43};
44
45#define MAX_CUSTOM_LEN 64
46static char *ieee80211_translate_scan(struct ieee80211_device *ieee,
47 char *start, char *stop,
48 struct ieee80211_network *network,
49 struct iw_request_info *info)
50{
51 char custom[MAX_CUSTOM_LEN];
52 char *p;
53 struct iw_event iwe;
54 int i, j;
55 char *current_val; /* For rates */
56 u8 rate;
57
58 /* First entry *MUST* be the AP MAC address */
59 iwe.cmd = SIOCGIWAP;
60 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
61 memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
62 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
63
64 /* Remaining entries will be displayed in the order we provide them */
65
66 /* Add the ESSID */
67 iwe.cmd = SIOCGIWESSID;
68 iwe.u.data.flags = 1;
69 iwe.u.data.length = min(network->ssid_len, (u8) 32);
70 start = iwe_stream_add_point(info, start, stop,
71 &iwe, network->ssid);
72
73 /* Add the protocol name */
74 iwe.cmd = SIOCGIWNAME;
75 snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s",
76 ieee80211_modes[network->mode]);
77 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
78
79 /* Add mode */
80 iwe.cmd = SIOCGIWMODE;
81 if (network->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
82 if (network->capability & WLAN_CAPABILITY_ESS)
83 iwe.u.mode = IW_MODE_MASTER;
84 else
85 iwe.u.mode = IW_MODE_ADHOC;
86
87 start = iwe_stream_add_event(info, start, stop,
88 &iwe, IW_EV_UINT_LEN);
89 }
90
91 /* Add channel and frequency */
92 /* Note : userspace automatically computes channel using iwrange */
93 iwe.cmd = SIOCGIWFREQ;
94 iwe.u.freq.m = ieee80211_channel_to_freq(ieee, network->channel);
95 iwe.u.freq.e = 6;
96 iwe.u.freq.i = 0;
97 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
98
99 /* Add encryption capability */
100 iwe.cmd = SIOCGIWENCODE;
101 if (network->capability & WLAN_CAPABILITY_PRIVACY)
102 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
103 else
104 iwe.u.data.flags = IW_ENCODE_DISABLED;
105 iwe.u.data.length = 0;
106 start = iwe_stream_add_point(info, start, stop,
107 &iwe, network->ssid);
108
109 /* Add basic and extended rates */
110 /* Rate : stuffing multiple values in a single event require a bit
111 * more of magic - Jean II */
112 current_val = start + iwe_stream_lcp_len(info);
113 iwe.cmd = SIOCGIWRATE;
114 /* Those two flags are ignored... */
115 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
116
117 for (i = 0, j = 0; i < network->rates_len;) {
118 if (j < network->rates_ex_len &&
119 ((network->rates_ex[j] & 0x7F) <
120 (network->rates[i] & 0x7F)))
121 rate = network->rates_ex[j++] & 0x7F;
122 else
123 rate = network->rates[i++] & 0x7F;
124 /* Bit rate given in 500 kb/s units (+ 0x80) */
125 iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
126 /* Add new value to event */
127 current_val = iwe_stream_add_value(info, start, current_val,
128 stop, &iwe, IW_EV_PARAM_LEN);
129 }
130 for (; j < network->rates_ex_len; j++) {
131 rate = network->rates_ex[j] & 0x7F;
132 /* Bit rate given in 500 kb/s units (+ 0x80) */
133 iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
134 /* Add new value to event */
135 current_val = iwe_stream_add_value(info, start, current_val,
136 stop, &iwe, IW_EV_PARAM_LEN);
137 }
138 /* Check if we added any rate */
139 if ((current_val - start) > iwe_stream_lcp_len(info))
140 start = current_val;
141
142 /* Add quality statistics */
143 iwe.cmd = IWEVQUAL;
144 iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
145 IW_QUAL_NOISE_UPDATED;
146
147 if (!(network->stats.mask & IEEE80211_STATMASK_RSSI)) {
148 iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID |
149 IW_QUAL_LEVEL_INVALID;
150 iwe.u.qual.qual = 0;
151 } else {
152 if (ieee->perfect_rssi == ieee->worst_rssi)
153 iwe.u.qual.qual = 100;
154 else
155 iwe.u.qual.qual =
156 (100 *
157 (ieee->perfect_rssi - ieee->worst_rssi) *
158 (ieee->perfect_rssi - ieee->worst_rssi) -
159 (ieee->perfect_rssi - network->stats.rssi) *
160 (15 * (ieee->perfect_rssi - ieee->worst_rssi) +
161 62 * (ieee->perfect_rssi -
162 network->stats.rssi))) /
163 ((ieee->perfect_rssi -
164 ieee->worst_rssi) * (ieee->perfect_rssi -
165 ieee->worst_rssi));
166 if (iwe.u.qual.qual > 100)
167 iwe.u.qual.qual = 100;
168 else if (iwe.u.qual.qual < 1)
169 iwe.u.qual.qual = 0;
170 }
171
172 if (!(network->stats.mask & IEEE80211_STATMASK_NOISE)) {
173 iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
174 iwe.u.qual.noise = 0;
175 } else {
176 iwe.u.qual.noise = network->stats.noise;
177 }
178
179 if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL)) {
180 iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
181 iwe.u.qual.level = 0;
182 } else {
183 iwe.u.qual.level = network->stats.signal;
184 }
185
186 start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
187
188 iwe.cmd = IWEVCUSTOM;
189 p = custom;
190
191 iwe.u.data.length = p - custom;
192 if (iwe.u.data.length)
193 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
194
195 memset(&iwe, 0, sizeof(iwe));
196 if (network->wpa_ie_len) {
197 char buf[MAX_WPA_IE_LEN];
198 memcpy(buf, network->wpa_ie, network->wpa_ie_len);
199 iwe.cmd = IWEVGENIE;
200 iwe.u.data.length = network->wpa_ie_len;
201 start = iwe_stream_add_point(info, start, stop, &iwe, buf);
202 }
203
204 memset(&iwe, 0, sizeof(iwe));
205 if (network->rsn_ie_len) {
206 char buf[MAX_WPA_IE_LEN];
207 memcpy(buf, network->rsn_ie, network->rsn_ie_len);
208 iwe.cmd = IWEVGENIE;
209 iwe.u.data.length = network->rsn_ie_len;
210 start = iwe_stream_add_point(info, start, stop, &iwe, buf);
211 }
212
213 /* Add EXTRA: Age to display seconds since last beacon/probe response
214 * for given network. */
215 iwe.cmd = IWEVCUSTOM;
216 p = custom;
217 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
218 " Last beacon: %dms ago",
219 jiffies_to_msecs(jiffies - network->last_scanned));
220 iwe.u.data.length = p - custom;
221 if (iwe.u.data.length)
222 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
223
224 /* Add spectrum management information */
225 iwe.cmd = -1;
226 p = custom;
227 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Channel flags: ");
228
229 if (ieee80211_get_channel_flags(ieee, network->channel) &
230 IEEE80211_CH_INVALID) {
231 iwe.cmd = IWEVCUSTOM;
232 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "INVALID ");
233 }
234
235 if (ieee80211_get_channel_flags(ieee, network->channel) &
236 IEEE80211_CH_RADAR_DETECT) {
237 iwe.cmd = IWEVCUSTOM;
238 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "DFS ");
239 }
240
241 if (iwe.cmd == IWEVCUSTOM) {
242 iwe.u.data.length = p - custom;
243 start = iwe_stream_add_point(info, start, stop, &iwe, custom);
244 }
245
246 return start;
247}
248
249#define SCAN_ITEM_SIZE 128
250
251int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
252 struct iw_request_info *info,
253 union iwreq_data *wrqu, char *extra)
254{
255 struct ieee80211_network *network;
256 unsigned long flags;
257 int err = 0;
258
259 char *ev = extra;
260 char *stop = ev + wrqu->data.length;
261 int i = 0;
262 DECLARE_SSID_BUF(ssid);
263
264 IEEE80211_DEBUG_WX("Getting scan\n");
265
266 spin_lock_irqsave(&ieee->lock, flags);
267
268 list_for_each_entry(network, &ieee->network_list, list) {
269 i++;
270 if (stop - ev < SCAN_ITEM_SIZE) {
271 err = -E2BIG;
272 break;
273 }
274
275 if (ieee->scan_age == 0 ||
276 time_after(network->last_scanned + ieee->scan_age, jiffies))
277 ev = ieee80211_translate_scan(ieee, ev, stop, network,
278 info);
279 else
280 IEEE80211_DEBUG_SCAN("Not showing network '%s ("
281 "%pM)' due to age (%dms).\n",
282 print_ssid(ssid, network->ssid,
283 network->ssid_len),
284 network->bssid,
285 jiffies_to_msecs(jiffies -
286 network->
287 last_scanned));
288 }
289
290 spin_unlock_irqrestore(&ieee->lock, flags);
291
292 wrqu->data.length = ev - extra;
293 wrqu->data.flags = 0;
294
295 IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
296
297 return err;
298}
299
300int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
301 struct iw_request_info *info,
302 union iwreq_data *wrqu, char *keybuf)
303{
304 struct iw_point *erq = &(wrqu->encoding);
305 struct net_device *dev = ieee->dev;
306 struct ieee80211_security sec = {
307 .flags = 0
308 };
309 int i, key, key_provided, len;
310 struct lib80211_crypt_data **crypt;
311 int host_crypto = ieee->host_encrypt || ieee->host_decrypt || ieee->host_build_iv;
312 DECLARE_SSID_BUF(ssid);
313
314 IEEE80211_DEBUG_WX("SET_ENCODE\n");
315
316 key = erq->flags & IW_ENCODE_INDEX;
317 if (key) {
318 if (key > WEP_KEYS)
319 return -EINVAL;
320 key--;
321 key_provided = 1;
322 } else {
323 key_provided = 0;
324 key = ieee->crypt_info.tx_keyidx;
325 }
326
327 IEEE80211_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
328 "provided" : "default");
329
330 crypt = &ieee->crypt_info.crypt[key];
331
332 if (erq->flags & IW_ENCODE_DISABLED) {
333 if (key_provided && *crypt) {
334 IEEE80211_DEBUG_WX("Disabling encryption on key %d.\n",
335 key);
336 lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
337 } else
338 IEEE80211_DEBUG_WX("Disabling encryption.\n");
339
340 /* Check all the keys to see if any are still configured,
341 * and if no key index was provided, de-init them all */
342 for (i = 0; i < WEP_KEYS; i++) {
343 if (ieee->crypt_info.crypt[i] != NULL) {
344 if (key_provided)
345 break;
346 lib80211_crypt_delayed_deinit(&ieee->crypt_info,
347 &ieee->crypt_info.crypt[i]);
348 }
349 }
350
351 if (i == WEP_KEYS) {
352 sec.enabled = 0;
353 sec.encrypt = 0;
354 sec.level = SEC_LEVEL_0;
355 sec.flags |= SEC_ENABLED | SEC_LEVEL | SEC_ENCRYPT;
356 }
357
358 goto done;
359 }
360
361 sec.enabled = 1;
362 sec.encrypt = 1;
363 sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
364
365 if (*crypt != NULL && (*crypt)->ops != NULL &&
366 strcmp((*crypt)->ops->name, "WEP") != 0) {
367 /* changing to use WEP; deinit previously used algorithm
368 * on this key */
369 lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
370 }
371
372 if (*crypt == NULL && host_crypto) {
373 struct lib80211_crypt_data *new_crypt;
374
375 /* take WEP into use */
376 new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
377 GFP_KERNEL);
378 if (new_crypt == NULL)
379 return -ENOMEM;
380 new_crypt->ops = lib80211_get_crypto_ops("WEP");
381 if (!new_crypt->ops) {
382 request_module("lib80211_crypt_wep");
383 new_crypt->ops = lib80211_get_crypto_ops("WEP");
384 }
385
386 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
387 new_crypt->priv = new_crypt->ops->init(key);
388
389 if (!new_crypt->ops || !new_crypt->priv) {
390 kfree(new_crypt);
391 new_crypt = NULL;
392
393 printk(KERN_WARNING "%s: could not initialize WEP: "
394 "load module lib80211_crypt_wep\n", dev->name);
395 return -EOPNOTSUPP;
396 }
397 *crypt = new_crypt;
398 }
399
400 /* If a new key was provided, set it up */
401 if (erq->length > 0) {
402#ifdef CONFIG_IEEE80211_DEBUG
403 DECLARE_SSID_BUF(ssid);
404#endif
405
406 len = erq->length <= 5 ? 5 : 13;
407 memcpy(sec.keys[key], keybuf, erq->length);
408 if (len > erq->length)
409 memset(sec.keys[key] + erq->length, 0,
410 len - erq->length);
411 IEEE80211_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n",
412 key, print_ssid(ssid, sec.keys[key], len),
413 erq->length, len);
414 sec.key_sizes[key] = len;
415 if (*crypt)
416 (*crypt)->ops->set_key(sec.keys[key], len, NULL,
417 (*crypt)->priv);
418 sec.flags |= (1 << key);
419 /* This ensures a key will be activated if no key is
420 * explicitly set */
421 if (key == sec.active_key)
422 sec.flags |= SEC_ACTIVE_KEY;
423
424 } else {
425 if (host_crypto) {
426 len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
427 NULL, (*crypt)->priv);
428 if (len == 0) {
429 /* Set a default key of all 0 */
430 IEEE80211_DEBUG_WX("Setting key %d to all "
431 "zero.\n", key);
432 memset(sec.keys[key], 0, 13);
433 (*crypt)->ops->set_key(sec.keys[key], 13, NULL,
434 (*crypt)->priv);
435 sec.key_sizes[key] = 13;
436 sec.flags |= (1 << key);
437 }
438 }
439 /* No key data - just set the default TX key index */
440 if (key_provided) {
441 IEEE80211_DEBUG_WX("Setting key %d to default Tx "
442 "key.\n", key);
443 ieee->crypt_info.tx_keyidx = key;
444 sec.active_key = key;
445 sec.flags |= SEC_ACTIVE_KEY;
446 }
447 }
448 if (erq->flags & (IW_ENCODE_OPEN | IW_ENCODE_RESTRICTED)) {
449 ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
450 sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN :
451 WLAN_AUTH_SHARED_KEY;
452 sec.flags |= SEC_AUTH_MODE;
453 IEEE80211_DEBUG_WX("Auth: %s\n",
454 sec.auth_mode == WLAN_AUTH_OPEN ?
455 "OPEN" : "SHARED KEY");
456 }
457
458 /* For now we just support WEP, so only set that security level...
459 * TODO: When WPA is added this is one place that needs to change */
460 sec.flags |= SEC_LEVEL;
461 sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
462 sec.encode_alg[key] = SEC_ALG_WEP;
463
464 done:
465 if (ieee->set_security)
466 ieee->set_security(dev, &sec);
467
468 /* Do not reset port if card is in Managed mode since resetting will
469 * generate new IEEE 802.11 authentication which may end up in looping
470 * with IEEE 802.1X. If your hardware requires a reset after WEP
471 * configuration (for example... Prism2), implement the reset_port in
472 * the callbacks structures used to initialize the 802.11 stack. */
473 if (ieee->reset_on_keychange &&
474 ieee->iw_mode != IW_MODE_INFRA &&
475 ieee->reset_port && ieee->reset_port(dev)) {
476 printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
477 return -EINVAL;
478 }
479 return 0;
480}
481
482int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
483 struct iw_request_info *info,
484 union iwreq_data *wrqu, char *keybuf)
485{
486 struct iw_point *erq = &(wrqu->encoding);
487 int len, key;
488 struct lib80211_crypt_data *crypt;
489 struct ieee80211_security *sec = &ieee->sec;
490
491 IEEE80211_DEBUG_WX("GET_ENCODE\n");
492
493 key = erq->flags & IW_ENCODE_INDEX;
494 if (key) {
495 if (key > WEP_KEYS)
496 return -EINVAL;
497 key--;
498 } else
499 key = ieee->crypt_info.tx_keyidx;
500
501 crypt = ieee->crypt_info.crypt[key];
502 erq->flags = key + 1;
503
504 if (!sec->enabled) {
505 erq->length = 0;
506 erq->flags |= IW_ENCODE_DISABLED;
507 return 0;
508 }
509
510 len = sec->key_sizes[key];
511 memcpy(keybuf, sec->keys[key], len);
512
513 erq->length = len;
514 erq->flags |= IW_ENCODE_ENABLED;
515
516 if (ieee->open_wep)
517 erq->flags |= IW_ENCODE_OPEN;
518 else
519 erq->flags |= IW_ENCODE_RESTRICTED;
520
521 return 0;
522}
523
524int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee,
525 struct iw_request_info *info,
526 union iwreq_data *wrqu, char *extra)
527{
528 struct net_device *dev = ieee->dev;
529 struct iw_point *encoding = &wrqu->encoding;
530 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
531 int i, idx, ret = 0;
532 int group_key = 0;
533 const char *alg, *module;
534 struct lib80211_crypto_ops *ops;
535 struct lib80211_crypt_data **crypt;
536
537 struct ieee80211_security sec = {
538 .flags = 0,
539 };
540
541 idx = encoding->flags & IW_ENCODE_INDEX;
542 if (idx) {
543 if (idx < 1 || idx > WEP_KEYS)
544 return -EINVAL;
545 idx--;
546 } else
547 idx = ieee->crypt_info.tx_keyidx;
548
549 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
550 crypt = &ieee->crypt_info.crypt[idx];
551 group_key = 1;
552 } else {
553 /* some Cisco APs use idx>0 for unicast in dynamic WEP */
554 if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
555 return -EINVAL;
556 if (ieee->iw_mode == IW_MODE_INFRA)
557 crypt = &ieee->crypt_info.crypt[idx];
558 else
559 return -EINVAL;
560 }
561
562 sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
563 if ((encoding->flags & IW_ENCODE_DISABLED) ||
564 ext->alg == IW_ENCODE_ALG_NONE) {
565 if (*crypt)
566 lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
567
568 for (i = 0; i < WEP_KEYS; i++)
569 if (ieee->crypt_info.crypt[i] != NULL)
570 break;
571
572 if (i == WEP_KEYS) {
573 sec.enabled = 0;
574 sec.encrypt = 0;
575 sec.level = SEC_LEVEL_0;
576 sec.flags |= SEC_LEVEL;
577 }
578 goto done;
579 }
580
581 sec.enabled = 1;
582 sec.encrypt = 1;
583
584 if (group_key ? !ieee->host_mc_decrypt :
585 !(ieee->host_encrypt || ieee->host_decrypt ||
586 ieee->host_encrypt_msdu))
587 goto skip_host_crypt;
588
589 switch (ext->alg) {
590 case IW_ENCODE_ALG_WEP:
591 alg = "WEP";
592 module = "lib80211_crypt_wep";
593 break;
594 case IW_ENCODE_ALG_TKIP:
595 alg = "TKIP";
596 module = "lib80211_crypt_tkip";
597 break;
598 case IW_ENCODE_ALG_CCMP:
599 alg = "CCMP";
600 module = "lib80211_crypt_ccmp";
601 break;
602 default:
603 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
604 dev->name, ext->alg);
605 ret = -EINVAL;
606 goto done;
607 }
608
609 ops = lib80211_get_crypto_ops(alg);
610 if (ops == NULL) {
611 request_module(module);
612 ops = lib80211_get_crypto_ops(alg);
613 }
614 if (ops == NULL) {
615 IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n",
616 dev->name, ext->alg);
617 ret = -EINVAL;
618 goto done;
619 }
620
621 if (*crypt == NULL || (*crypt)->ops != ops) {
622 struct lib80211_crypt_data *new_crypt;
623
624 lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
625
626 new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
627 if (new_crypt == NULL) {
628 ret = -ENOMEM;
629 goto done;
630 }
631 new_crypt->ops = ops;
632 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
633 new_crypt->priv = new_crypt->ops->init(idx);
634 if (new_crypt->priv == NULL) {
635 kfree(new_crypt);
636 ret = -EINVAL;
637 goto done;
638 }
639 *crypt = new_crypt;
640 }
641
642 if (ext->key_len > 0 && (*crypt)->ops->set_key &&
643 (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
644 (*crypt)->priv) < 0) {
645 IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name);
646 ret = -EINVAL;
647 goto done;
648 }
649
650 skip_host_crypt:
651 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
652 ieee->crypt_info.tx_keyidx = idx;
653 sec.active_key = idx;
654 sec.flags |= SEC_ACTIVE_KEY;
655 }
656
657 if (ext->alg != IW_ENCODE_ALG_NONE) {
658 memcpy(sec.keys[idx], ext->key, ext->key_len);
659 sec.key_sizes[idx] = ext->key_len;
660 sec.flags |= (1 << idx);
661 if (ext->alg == IW_ENCODE_ALG_WEP) {
662 sec.encode_alg[idx] = SEC_ALG_WEP;
663 sec.flags |= SEC_LEVEL;
664 sec.level = SEC_LEVEL_1;
665 } else if (ext->alg == IW_ENCODE_ALG_TKIP) {
666 sec.encode_alg[idx] = SEC_ALG_TKIP;
667 sec.flags |= SEC_LEVEL;
668 sec.level = SEC_LEVEL_2;
669 } else if (ext->alg == IW_ENCODE_ALG_CCMP) {
670 sec.encode_alg[idx] = SEC_ALG_CCMP;
671 sec.flags |= SEC_LEVEL;
672 sec.level = SEC_LEVEL_3;
673 }
674 /* Don't set sec level for group keys. */
675 if (group_key)
676 sec.flags &= ~SEC_LEVEL;
677 }
678 done:
679 if (ieee->set_security)
680 ieee->set_security(ieee->dev, &sec);
681
682 /*
683 * Do not reset port if card is in Managed mode since resetting will
684 * generate new IEEE 802.11 authentication which may end up in looping
685 * with IEEE 802.1X. If your hardware requires a reset after WEP
686 * configuration (for example... Prism2), implement the reset_port in
687 * the callbacks structures used to initialize the 802.11 stack.
688 */
689 if (ieee->reset_on_keychange &&
690 ieee->iw_mode != IW_MODE_INFRA &&
691 ieee->reset_port && ieee->reset_port(dev)) {
692 IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name);
693 return -EINVAL;
694 }
695
696 return ret;
697}
698
699int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee,
700 struct iw_request_info *info,
701 union iwreq_data *wrqu, char *extra)
702{
703 struct iw_point *encoding = &wrqu->encoding;
704 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
705 struct ieee80211_security *sec = &ieee->sec;
706 int idx, max_key_len;
707
708 max_key_len = encoding->length - sizeof(*ext);
709 if (max_key_len < 0)
710 return -EINVAL;
711
712 idx = encoding->flags & IW_ENCODE_INDEX;
713 if (idx) {
714 if (idx < 1 || idx > WEP_KEYS)
715 return -EINVAL;
716 idx--;
717 } else
718 idx = ieee->crypt_info.tx_keyidx;
719
720 if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
721 ext->alg != IW_ENCODE_ALG_WEP)
722 if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA)
723 return -EINVAL;
724
725 encoding->flags = idx + 1;
726 memset(ext, 0, sizeof(*ext));
727
728 if (!sec->enabled) {
729 ext->alg = IW_ENCODE_ALG_NONE;
730 ext->key_len = 0;
731 encoding->flags |= IW_ENCODE_DISABLED;
732 } else {
733 if (sec->encode_alg[idx] == SEC_ALG_WEP)
734 ext->alg = IW_ENCODE_ALG_WEP;
735 else if (sec->encode_alg[idx] == SEC_ALG_TKIP)
736 ext->alg = IW_ENCODE_ALG_TKIP;
737 else if (sec->encode_alg[idx] == SEC_ALG_CCMP)
738 ext->alg = IW_ENCODE_ALG_CCMP;
739 else
740 return -EINVAL;
741
742 ext->key_len = sec->key_sizes[idx];
743 memcpy(ext->key, sec->keys[idx], ext->key_len);
744 encoding->flags |= IW_ENCODE_ENABLED;
745 if (ext->key_len &&
746 (ext->alg == IW_ENCODE_ALG_TKIP ||
747 ext->alg == IW_ENCODE_ALG_CCMP))
748 ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
749
750 }
751
752 return 0;
753}
754
755EXPORT_SYMBOL(ieee80211_wx_set_encodeext);
756EXPORT_SYMBOL(ieee80211_wx_get_encodeext);
757
758EXPORT_SYMBOL(ieee80211_wx_get_scan);
759EXPORT_SYMBOL(ieee80211_wx_set_encode);
760EXPORT_SYMBOL(ieee80211_wx_get_encode);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index b0ac0ce3fb9f..47bee0ee0a7c 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -4,6 +4,7 @@ config IWLWIFI
4config IWLCORE 4config IWLCORE
5 tristate "Intel Wireless Wifi Core" 5 tristate "Intel Wireless Wifi Core"
6 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 6 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
7 select LIB80211
7 select IWLWIFI 8 select IWLWIFI
8 select MAC80211_LEDS if IWLWIFI_LEDS 9 select MAC80211_LEDS if IWLWIFI_LEDS
9 select LEDS_CLASS if IWLWIFI_LEDS 10 select LEDS_CLASS if IWLWIFI_LEDS
@@ -105,6 +106,7 @@ config IWL3945
105 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection" 106 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection"
106 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 107 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
107 select FW_LOADER 108 select FW_LOADER
109 select LIB80211
108 select IWLWIFI 110 select IWLWIFI
109 select MAC80211_LEDS if IWL3945_LEDS 111 select MAC80211_LEDS if IWL3945_LEDS
110 select LEDS_CLASS if IWL3945_LEDS 112 select LEDS_CLASS if IWL3945_LEDS
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 47aa28f6a513..0be9e6b66aa0 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -5,9 +5,10 @@ iwlcore-objs += iwl-scan.o
5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
6iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o 6iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
7iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o 7iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o
8iwlcore-$(CONFIG_IWLAGN_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
8 9
9obj-$(CONFIG_IWLAGN) += iwlagn.o 10obj-$(CONFIG_IWLAGN) += iwlagn.o
10iwlagn-objs := iwl-agn.o iwl-agn-rs.o 11iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-hcmd-check.o
11 12
12iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 13iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
13iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 14iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-commands.h b/drivers/net/wireless/iwlwifi/iwl-3945-commands.h
index 817ece773643..c6f4eb54a2b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-commands.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -69,6 +69,12 @@
69#ifndef __iwl_3945_commands_h__ 69#ifndef __iwl_3945_commands_h__
70#define __iwl_3945_commands_h__ 70#define __iwl_3945_commands_h__
71 71
72/* uCode version contains 4 values: Major/Minor/API/Serial */
73#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
74#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
75#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
76#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
77
72enum { 78enum {
73 REPLY_ALIVE = 0x1, 79 REPLY_ALIVE = 0x1,
74 REPLY_ERROR = 0x2, 80 REPLY_ERROR = 0x2,
@@ -121,7 +127,7 @@ enum {
121 REPLY_TX_PWR_TABLE_CMD = 0x97, 127 REPLY_TX_PWR_TABLE_CMD = 0x97,
122 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */ 128 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */
123 129
124 /* Bluetooth device coexistance config command */ 130 /* Bluetooth device coexistence config command */
125 REPLY_BT_CONFIG = 0x9b, 131 REPLY_BT_CONFIG = 0x9b,
126 132
127 /* Statistics */ 133 /* Statistics */
@@ -158,7 +164,7 @@ struct iwl3945_cmd_header {
158 u8 cmd; /* Command ID: REPLY_RXON, etc. */ 164 u8 cmd; /* Command ID: REPLY_RXON, etc. */
159 u8 flags; /* IWL_CMD_* */ 165 u8 flags; /* IWL_CMD_* */
160 /* 166 /*
161 * The driver sets up the sequence number to values of its chosing. 167 * The driver sets up the sequence number to values of its choosing.
162 * uCode does not use this value, but passes it back to the driver 168 * uCode does not use this value, but passes it back to the driver
163 * when sending the response to each driver-originated command, so 169 * when sending the response to each driver-originated command, so
164 * the driver can match the response to the command. Since the values 170 * the driver can match the response to the command. Since the values
@@ -220,7 +226,7 @@ struct iwl3945_power_per_rate {
220 * 226 *
221 *****************************************************************************/ 227 *****************************************************************************/
222 228
223#define UCODE_VALID_OK __constant_cpu_to_le32(0x1) 229#define UCODE_VALID_OK cpu_to_le32(0x1)
224#define INITIALIZE_SUBTYPE (9) 230#define INITIALIZE_SUBTYPE (9)
225 231
226/* 232/*
@@ -322,42 +328,42 @@ enum {
322 328
323/* rx_config flags */ 329/* rx_config flags */
324/* band & modulation selection */ 330/* band & modulation selection */
325#define RXON_FLG_BAND_24G_MSK __constant_cpu_to_le32(1 << 0) 331#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
326#define RXON_FLG_CCK_MSK __constant_cpu_to_le32(1 << 1) 332#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
327/* auto detection enable */ 333/* auto detection enable */
328#define RXON_FLG_AUTO_DETECT_MSK __constant_cpu_to_le32(1 << 2) 334#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
329/* TGg protection when tx */ 335/* TGg protection when tx */
330#define RXON_FLG_TGG_PROTECT_MSK __constant_cpu_to_le32(1 << 3) 336#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
331/* cck short slot & preamble */ 337/* cck short slot & preamble */
332#define RXON_FLG_SHORT_SLOT_MSK __constant_cpu_to_le32(1 << 4) 338#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
333#define RXON_FLG_SHORT_PREAMBLE_MSK __constant_cpu_to_le32(1 << 5) 339#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
334/* antenna selection */ 340/* antenna selection */
335#define RXON_FLG_DIS_DIV_MSK __constant_cpu_to_le32(1 << 7) 341#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
336#define RXON_FLG_ANT_SEL_MSK __constant_cpu_to_le32(0x0f00) 342#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
337#define RXON_FLG_ANT_A_MSK __constant_cpu_to_le32(1 << 8) 343#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
338#define RXON_FLG_ANT_B_MSK __constant_cpu_to_le32(1 << 9) 344#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
339/* radar detection enable */ 345/* radar detection enable */
340#define RXON_FLG_RADAR_DETECT_MSK __constant_cpu_to_le32(1 << 12) 346#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
341#define RXON_FLG_TGJ_NARROW_BAND_MSK __constant_cpu_to_le32(1 << 13) 347#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
342/* rx response to host with 8-byte TSF 348/* rx response to host with 8-byte TSF
343* (according to ON_AIR deassertion) */ 349* (according to ON_AIR deassertion) */
344#define RXON_FLG_TSF2HOST_MSK __constant_cpu_to_le32(1 << 15) 350#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
345 351
346/* rx_config filter flags */ 352/* rx_config filter flags */
347/* accept all data frames */ 353/* accept all data frames */
348#define RXON_FILTER_PROMISC_MSK __constant_cpu_to_le32(1 << 0) 354#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
349/* pass control & management to host */ 355/* pass control & management to host */
350#define RXON_FILTER_CTL2HOST_MSK __constant_cpu_to_le32(1 << 1) 356#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
351/* accept multi-cast */ 357/* accept multi-cast */
352#define RXON_FILTER_ACCEPT_GRP_MSK __constant_cpu_to_le32(1 << 2) 358#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
353/* don't decrypt uni-cast frames */ 359/* don't decrypt uni-cast frames */
354#define RXON_FILTER_DIS_DECRYPT_MSK __constant_cpu_to_le32(1 << 3) 360#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
355/* don't decrypt multi-cast frames */ 361/* don't decrypt multi-cast frames */
356#define RXON_FILTER_DIS_GRP_DECRYPT_MSK __constant_cpu_to_le32(1 << 4) 362#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
357/* STA is associated */ 363/* STA is associated */
358#define RXON_FILTER_ASSOC_MSK __constant_cpu_to_le32(1 << 5) 364#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
359/* transfer to host non bssid beacons in associated state */ 365/* transfer to host non bssid beacons in associated state */
360#define RXON_FILTER_BCON_AWARE_MSK __constant_cpu_to_le32(1 << 6) 366#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
361 367
362/** 368/**
363 * REPLY_RXON = 0x10 (command, has simple generic response) 369 * REPLY_RXON = 0x10 (command, has simple generic response)
@@ -471,9 +477,9 @@ struct iwl3945_ac_qos {
471} __attribute__ ((packed)); 477} __attribute__ ((packed));
472 478
473/* QoS flags defines */ 479/* QoS flags defines */
474#define QOS_PARAM_FLG_UPDATE_EDCA_MSK __constant_cpu_to_le32(0x01) 480#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
475#define QOS_PARAM_FLG_TGN_MSK __constant_cpu_to_le32(0x02) 481#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
476#define QOS_PARAM_FLG_TXOP_TYPE_MSK __constant_cpu_to_le32(0x10) 482#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
477 483
478/* Number of Access Categories (AC) (EDCA), queues 0..3 */ 484/* Number of Access Categories (AC) (EDCA), queues 0..3 */
479#define AC_NUM 4 485#define AC_NUM 4
@@ -508,27 +514,27 @@ struct iwl3945_qosparam_cmd {
508#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ 514#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
509#define IWL_INVALID_STATION 255 515#define IWL_INVALID_STATION 255
510 516
511#define STA_FLG_TX_RATE_MSK __constant_cpu_to_le32(1 << 2); 517#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2);
512#define STA_FLG_PWR_SAVE_MSK __constant_cpu_to_le32(1 << 8); 518#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8);
513 519
514/* Use in mode field. 1: modify existing entry, 0: add new station entry */ 520/* Use in mode field. 1: modify existing entry, 0: add new station entry */
515#define STA_CONTROL_MODIFY_MSK 0x01 521#define STA_CONTROL_MODIFY_MSK 0x01
516 522
517/* key flags __le16*/ 523/* key flags __le16*/
518#define STA_KEY_FLG_ENCRYPT_MSK __constant_cpu_to_le16(0x0007) 524#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
519#define STA_KEY_FLG_NO_ENC __constant_cpu_to_le16(0x0000) 525#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
520#define STA_KEY_FLG_WEP __constant_cpu_to_le16(0x0001) 526#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
521#define STA_KEY_FLG_CCMP __constant_cpu_to_le16(0x0002) 527#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
522#define STA_KEY_FLG_TKIP __constant_cpu_to_le16(0x0003) 528#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
523 529
524#define STA_KEY_FLG_KEYID_POS 8 530#define STA_KEY_FLG_KEYID_POS 8
525#define STA_KEY_FLG_INVALID __constant_cpu_to_le16(0x0800) 531#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
526/* wep key is either from global key (0) or from station info array (1) */ 532/* wep key is either from global key (0) or from station info array (1) */
527#define STA_KEY_FLG_WEP_KEY_MAP_MSK __constant_cpu_to_le16(0x0008) 533#define STA_KEY_FLG_WEP_KEY_MAP_MSK cpu_to_le16(0x0008)
528 534
529/* wep key in STA: 5-bytes (0) or 13-bytes (1) */ 535/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
530#define STA_KEY_FLG_KEY_SIZE_MSK __constant_cpu_to_le16(0x1000) 536#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
531#define STA_KEY_MULTICAST_MSK __constant_cpu_to_le16(0x4000) 537#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
532 538
533/* Flags indicate whether to modify vs. don't change various station params */ 539/* Flags indicate whether to modify vs. don't change various station params */
534#define STA_MODIFY_KEY_MASK 0x01 540#define STA_MODIFY_KEY_MASK 0x01
@@ -666,14 +672,14 @@ struct iwl3945_rx_frame_hdr {
666 u8 payload[0]; 672 u8 payload[0];
667} __attribute__ ((packed)); 673} __attribute__ ((packed));
668 674
669#define RX_RES_STATUS_NO_CRC32_ERROR __constant_cpu_to_le32(1 << 0) 675#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
670#define RX_RES_STATUS_NO_RXE_OVERFLOW __constant_cpu_to_le32(1 << 1) 676#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
671 677
672#define RX_RES_PHY_FLAGS_BAND_24_MSK __constant_cpu_to_le16(1 << 0) 678#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
673#define RX_RES_PHY_FLAGS_MOD_CCK_MSK __constant_cpu_to_le16(1 << 1) 679#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
674#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK __constant_cpu_to_le16(1 << 2) 680#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
675#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK __constant_cpu_to_le16(1 << 3) 681#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
676#define RX_RES_PHY_FLAGS_ANTENNA_MSK __constant_cpu_to_le16(0xf0) 682#define RX_RES_PHY_FLAGS_ANTENNA_MSK cpu_to_le16(0xf0)
677 683
678#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) 684#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
679#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) 685#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
@@ -733,57 +739,57 @@ struct iwl3945_rx_frame {
733 739
734/* 1: Use Request-To-Send protocol before this frame. 740/* 1: Use Request-To-Send protocol before this frame.
735 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */ 741 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */
736#define TX_CMD_FLG_RTS_MSK __constant_cpu_to_le32(1 << 1) 742#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
737 743
738/* 1: Transmit Clear-To-Send to self before this frame. 744/* 1: Transmit Clear-To-Send to self before this frame.
739 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames. 745 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
740 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. */ 746 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. */
741#define TX_CMD_FLG_CTS_MSK __constant_cpu_to_le32(1 << 2) 747#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
742 748
743/* 1: Expect ACK from receiving station 749/* 1: Expect ACK from receiving station
744 * 0: Don't expect ACK (MAC header's duration field s/b 0) 750 * 0: Don't expect ACK (MAC header's duration field s/b 0)
745 * Set this for unicast frames, but not broadcast/multicast. */ 751 * Set this for unicast frames, but not broadcast/multicast. */
746#define TX_CMD_FLG_ACK_MSK __constant_cpu_to_le32(1 << 3) 752#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
747 753
748/* 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). 754/* 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
749 * Tx command's initial_rate_index indicates first rate to try; 755 * Tx command's initial_rate_index indicates first rate to try;
750 * uCode walks through table for additional Tx attempts. 756 * uCode walks through table for additional Tx attempts.
751 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. 757 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
752 * This rate will be used for all Tx attempts; it will not be scaled. */ 758 * This rate will be used for all Tx attempts; it will not be scaled. */
753#define TX_CMD_FLG_STA_RATE_MSK __constant_cpu_to_le32(1 << 4) 759#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
754 760
755/* 1: Expect immediate block-ack. 761/* 1: Expect immediate block-ack.
756 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ 762 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
757#define TX_CMD_FLG_IMM_BA_RSP_MASK __constant_cpu_to_le32(1 << 6) 763#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
758 764
759/* 1: Frame requires full Tx-Op protection. 765/* 1: Frame requires full Tx-Op protection.
760 * Set this if either RTS or CTS Tx Flag gets set. */ 766 * Set this if either RTS or CTS Tx Flag gets set. */
761#define TX_CMD_FLG_FULL_TXOP_PROT_MSK __constant_cpu_to_le32(1 << 7) 767#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
762 768
763/* Tx antenna selection field; used only for 3945, reserved (0) for 4965. 769/* Tx antenna selection field; used only for 3945, reserved (0) for 4965.
764 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */ 770 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
765#define TX_CMD_FLG_ANT_SEL_MSK __constant_cpu_to_le32(0xf00) 771#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
766#define TX_CMD_FLG_ANT_A_MSK __constant_cpu_to_le32(1 << 8) 772#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
767#define TX_CMD_FLG_ANT_B_MSK __constant_cpu_to_le32(1 << 9) 773#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
768 774
769/* 1: Ignore Bluetooth priority for this frame. 775/* 1: Ignore Bluetooth priority for this frame.
770 * 0: Delay Tx until Bluetooth device is done (normal usage). */ 776 * 0: Delay Tx until Bluetooth device is done (normal usage). */
771#define TX_CMD_FLG_BT_DIS_MSK __constant_cpu_to_le32(1 << 12) 777#define TX_CMD_FLG_BT_DIS_MSK cpu_to_le32(1 << 12)
772 778
773/* 1: uCode overrides sequence control field in MAC header. 779/* 1: uCode overrides sequence control field in MAC header.
774 * 0: Driver provides sequence control field in MAC header. 780 * 0: Driver provides sequence control field in MAC header.
775 * Set this for management frames, non-QOS data frames, non-unicast frames, 781 * Set this for management frames, non-QOS data frames, non-unicast frames,
776 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ 782 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
777#define TX_CMD_FLG_SEQ_CTL_MSK __constant_cpu_to_le32(1 << 13) 783#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
778 784
779/* 1: This frame is non-last MPDU; more fragments are coming. 785/* 1: This frame is non-last MPDU; more fragments are coming.
780 * 0: Last fragment, or not using fragmentation. */ 786 * 0: Last fragment, or not using fragmentation. */
781#define TX_CMD_FLG_MORE_FRAG_MSK __constant_cpu_to_le32(1 << 14) 787#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
782 788
783/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame. 789/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
784 * 0: No TSF required in outgoing frame. 790 * 0: No TSF required in outgoing frame.
785 * Set this for transmitting beacons and probe responses. */ 791 * Set this for transmitting beacons and probe responses. */
786#define TX_CMD_FLG_TSF_MSK __constant_cpu_to_le32(1 << 16) 792#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
787 793
788/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword 794/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
789 * alignment of frame's payload data field. 795 * alignment of frame's payload data field.
@@ -791,10 +797,10 @@ struct iwl3945_rx_frame {
791 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4 797 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
792 * field (but not both). Driver must align frame data (i.e. data following 798 * field (but not both). Driver must align frame data (i.e. data following
793 * MAC header) to DWORD boundary. */ 799 * MAC header) to DWORD boundary. */
794#define TX_CMD_FLG_MH_PAD_MSK __constant_cpu_to_le32(1 << 20) 800#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
795 801
796/* HCCA-AP - disable duration overwriting. */ 802/* HCCA-AP - disable duration overwriting. */
797#define TX_CMD_FLG_DUR_MSK __constant_cpu_to_le32(1 << 25) 803#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
798 804
799/* 805/*
800 * TX command security control 806 * TX command security control
@@ -991,7 +997,7 @@ struct iwl3945_rate_scaling_cmd {
991 * 997 *
992 * 3945 and 4965 support hardware handshake with Bluetooth device on 998 * 3945 and 4965 support hardware handshake with Bluetooth device on
993 * same platform. Bluetooth device alerts wireless device when it will Tx; 999 * same platform. Bluetooth device alerts wireless device when it will Tx;
994 * wireless device can delay or kill its own Tx to accomodate. 1000 * wireless device can delay or kill its own Tx to accommodate.
995 */ 1001 */
996struct iwl3945_bt_cmd { 1002struct iwl3945_bt_cmd {
997 u8 flags; 1003 u8 flags;
@@ -1158,9 +1164,9 @@ struct iwl3945_spectrum_notification {
1158 */ 1164 */
1159#define IWL_POWER_VEC_SIZE 5 1165#define IWL_POWER_VEC_SIZE 5
1160 1166
1161#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le32(1 << 0) 1167#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le32(1 << 0)
1162#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le32(1 << 2) 1168#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le32(1 << 2)
1163#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le32(1 << 3) 1169#define IWL_POWER_PCI_PM_MSK cpu_to_le32(1 << 3)
1164struct iwl3945_powertable_cmd { 1170struct iwl3945_powertable_cmd {
1165 __le32 flags; 1171 __le32 flags;
1166 __le32 rx_data_timeout; 1172 __le32 rx_data_timeout;
@@ -1278,8 +1284,8 @@ struct iwl3945_ssid_ie {
1278} __attribute__ ((packed)); 1284} __attribute__ ((packed));
1279 1285
1280#define PROBE_OPTION_MAX 0x4 1286#define PROBE_OPTION_MAX 0x4
1281#define TX_CMD_LIFE_TIME_INFINITE __constant_cpu_to_le32(0xFFFFFFFF) 1287#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
1282#define IWL_GOOD_CRC_TH __constant_cpu_to_le16(1) 1288#define IWL_GOOD_CRC_TH cpu_to_le16(1)
1283#define IWL_MAX_SCAN_SIZE 1024 1289#define IWL_MAX_SCAN_SIZE 1024
1284 1290
1285/* 1291/*
@@ -1379,7 +1385,7 @@ struct iwl3945_scan_cmd {
1379} __attribute__ ((packed)); 1385} __attribute__ ((packed));
1380 1386
1381/* Can abort will notify by complete notification with abort status. */ 1387/* Can abort will notify by complete notification with abort status. */
1382#define CAN_ABORT_STATUS __constant_cpu_to_le32(0x1) 1388#define CAN_ABORT_STATUS cpu_to_le32(0x1)
1383/* complete notification statuses */ 1389/* complete notification statuses */
1384#define ABORT_STATUS 0x2 1390#define ABORT_STATUS 0x2
1385 1391
@@ -1572,8 +1578,8 @@ struct statistics_general {
1572 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag 1578 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
1573 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. 1579 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
1574 */ 1580 */
1575#define IWL_STATS_CONF_CLEAR_STATS __constant_cpu_to_le32(0x1) /* see above */ 1581#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
1576#define IWL_STATS_CONF_DISABLE_NOTIF __constant_cpu_to_le32(0x2)/* see above */ 1582#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
1577struct iwl3945_statistics_cmd { 1583struct iwl3945_statistics_cmd {
1578 __le32 configuration_flags; /* IWL_STATS_CONF_* */ 1584 __le32 configuration_flags; /* IWL_STATS_CONF_* */
1579} __attribute__ ((packed)); 1585} __attribute__ ((packed));
@@ -1593,8 +1599,8 @@ struct iwl3945_statistics_cmd {
1593 * appropriately so that each notification contains statistics for only the 1599 * appropriately so that each notification contains statistics for only the
1594 * one channel that has just been scanned. 1600 * one channel that has just been scanned.
1595 */ 1601 */
1596#define STATISTICS_REPLY_FLG_BAND_24G_MSK __constant_cpu_to_le32(0x2) 1602#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
1597#define STATISTICS_REPLY_FLG_FAT_MODE_MSK __constant_cpu_to_le32(0x8) 1603#define STATISTICS_REPLY_FLG_FAT_MODE_MSK cpu_to_le32(0x8)
1598struct iwl3945_notif_statistics { 1604struct iwl3945_notif_statistics {
1599 __le32 flag; 1605 __le32 flag;
1600 struct statistics_rx rx; 1606 struct statistics_rx rx;
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-core.h b/drivers/net/wireless/iwlwifi/iwl-3945-core.h
index bc12f97ba0b1..6f463555402c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-core.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -71,9 +71,33 @@
71#define IWL_SKU_G 0x1 71#define IWL_SKU_G 0x1
72#define IWL_SKU_A 0x2 72#define IWL_SKU_A 0x2
73 73
74/**
75 * struct iwl_3945_cfg
76 * @fw_name_pre: Firmware filename prefix. The api version and extension
77 * (.ucode) will be added to filename before loading from disk. The
78 * filename is constructed as fw_name_pre<api>.ucode.
79 * @ucode_api_max: Highest version of uCode API supported by driver.
80 * @ucode_api_min: Lowest version of uCode API supported by driver.
81 *
82 * We enable the driver to be backward compatible wrt API version. The
83 * driver specifies which APIs it supports (with @ucode_api_max being the
84 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
85 * it has a supported API version. The firmware's API version will be
86 * stored in @iwl_priv, enabling the driver to make runtime changes based
87 * on firmware version used.
88 *
89 * For example,
90 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
91 * Driver interacts with Firmware API version >= 2.
92 * } else {
93 * Driver interacts with Firmware API version 1.
94 * }
95 */
74struct iwl_3945_cfg { 96struct iwl_3945_cfg {
75 const char *name; 97 const char *name;
76 const char *fw_name; 98 const char *fw_name_pre;
99 const unsigned int ucode_api_max;
100 const unsigned int ucode_api_min;
77 unsigned int sku; 101 unsigned int sku;
78}; 102};
79 103
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h b/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
index 33016fb5e9b3..85eb778f9df1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
@@ -21,7 +21,7 @@
21 * file called LICENSE. 21 * file called LICENSE.
22 * 22 *
23 * Contact Information: 23 * Contact Information:
24 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 * 26 *
27 *****************************************************************************/ 27 *****************************************************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 644bd9e08052..94ea0e60c410 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -103,7 +103,6 @@
103 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. 103 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
104 */ 104 */
105#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ 105#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
106#define IWL_EEPROM_ACCESS_DELAY 10 /* uSec */
107 106
108/* 107/*
109 * Regulatory channel usage flags in EEPROM struct iwl_eeprom_channel.flags. 108 * Regulatory channel usage flags in EEPROM struct iwl_eeprom_channel.flags.
@@ -321,6 +320,7 @@ struct iwl3945_eeprom {
321/* RSSR */ 320/* RSSR */
322#define FH_RSSR_CTRL (FH_RSSR_TABLE+0x000) 321#define FH_RSSR_CTRL (FH_RSSR_TABLE+0x000)
323#define FH_RSSR_STATUS (FH_RSSR_TABLE+0x004) 322#define FH_RSSR_STATUS (FH_RSSR_TABLE+0x004)
323#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
324/* TCSR */ 324/* TCSR */
325#define FH_TCSR(_channel) (FH_TCSR_TABLE+(_channel)*0x20) 325#define FH_TCSR(_channel) (FH_TCSR_TABLE+(_channel)*0x20)
326#define FH_TCSR_CONFIG(_channel) (FH_TCSR(_channel)+0x00) 326#define FH_TCSR_CONFIG(_channel) (FH_TCSR(_channel)+0x00)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-io.h b/drivers/net/wireless/iwlwifi/iwl-3945-io.h
index b3fe48de3ae7..2440fd664dd5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-io.h
@@ -21,7 +21,7 @@
21 * file called LICENSE. 21 * file called LICENSE.
22 * 22 *
23 * Contact Information: 23 * Contact Information:
24 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 * 26 *
27 *****************************************************************************/ 27 *****************************************************************************/
@@ -53,7 +53,7 @@
53 * _iwl3945_read32.) 53 * _iwl3945_read32.)
54 * 54 *
55 * These declarations are *extremely* useful in quickly isolating code deltas 55 * These declarations are *extremely* useful in quickly isolating code deltas
56 * which result in misconfiguring of the hardware I/O. In combination with 56 * which result in misconfiguration of the hardware I/O. In combination with
57 * git-bisect and the IO debug level you can quickly determine the specific 57 * git-bisect and the IO debug level you can quickly determine the specific
58 * commit which breaks the IO sequence to the hardware. 58 * commit which breaks the IO sequence to the hardware.
59 * 59 *
@@ -93,7 +93,7 @@ static inline int _iwl3945_poll_bit(struct iwl3945_priv *priv, u32 addr,
93 do { 93 do {
94 if ((_iwl3945_read32(priv, addr) & mask) == (bits & mask)) 94 if ((_iwl3945_read32(priv, addr) & mask) == (bits & mask))
95 return i; 95 return i;
96 mdelay(10); 96 udelay(10);
97 i += 10; 97 i += 10;
98 } while (i < timeout); 98 } while (i < timeout);
99 99
@@ -107,7 +107,7 @@ static inline int __iwl3945_poll_bit(const char *f, u32 l,
107 int ret = _iwl3945_poll_bit(priv, addr, bits, mask, timeout); 107 int ret = _iwl3945_poll_bit(priv, addr, bits, mask, timeout);
108 IWL_DEBUG_IO("poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n", 108 IWL_DEBUG_IO("poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
109 addr, bits, mask, 109 addr, bits, mask,
110 unlikely(ret == -ETIMEDOUT)?"timeout":"", f, l); 110 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
111 return ret; 111 return ret;
112} 112}
113#define iwl3945_poll_bit(priv, addr, bits, mask, timeout) \ 113#define iwl3945_poll_bit(priv, addr, bits, mask, timeout) \
@@ -271,16 +271,7 @@ static inline void iwl3945_write_reg_buf(struct iwl3945_priv *priv,
271static inline int _iwl3945_poll_direct_bit(struct iwl3945_priv *priv, 271static inline int _iwl3945_poll_direct_bit(struct iwl3945_priv *priv,
272 u32 addr, u32 mask, int timeout) 272 u32 addr, u32 mask, int timeout)
273{ 273{
274 int i = 0; 274 return _iwl3945_poll_bit(priv, addr, mask, mask, timeout);
275
276 do {
277 if ((_iwl3945_read_direct32(priv, addr) & mask) == mask)
278 return i;
279 mdelay(10);
280 i += 10;
281 } while (i < timeout);
282
283 return -ETIMEDOUT;
284} 275}
285 276
286#ifdef CONFIG_IWL3945_DEBUG 277#ifdef CONFIG_IWL3945_DEBUG
@@ -307,6 +298,7 @@ static inline int __iwl3945_poll_direct_bit(const char *f, u32 l,
307static inline u32 _iwl3945_read_prph(struct iwl3945_priv *priv, u32 reg) 298static inline u32 _iwl3945_read_prph(struct iwl3945_priv *priv, u32 reg)
308{ 299{
309 _iwl3945_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); 300 _iwl3945_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
301 rmb();
310 return _iwl3945_read_direct32(priv, HBUS_TARG_PRPH_RDAT); 302 return _iwl3945_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
311} 303}
312#ifdef CONFIG_IWL3945_DEBUG 304#ifdef CONFIG_IWL3945_DEBUG
@@ -328,6 +320,7 @@ static inline void _iwl3945_write_prph(struct iwl3945_priv *priv,
328{ 320{
329 _iwl3945_write_direct32(priv, HBUS_TARG_PRPH_WADDR, 321 _iwl3945_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
330 ((addr & 0x0000FFFF) | (3 << 24))); 322 ((addr & 0x0000FFFF) | (3 << 24)));
323 wmb();
331 _iwl3945_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val); 324 _iwl3945_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
332} 325}
333#ifdef CONFIG_IWL3945_DEBUG 326#ifdef CONFIG_IWL3945_DEBUG
@@ -389,12 +382,14 @@ static inline void iwl3945_clear_bits_prph(struct iwl3945_priv
389static inline u32 iwl3945_read_targ_mem(struct iwl3945_priv *priv, u32 addr) 382static inline u32 iwl3945_read_targ_mem(struct iwl3945_priv *priv, u32 addr)
390{ 383{
391 iwl3945_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr); 384 iwl3945_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
385 rmb();
392 return iwl3945_read_direct32(priv, HBUS_TARG_MEM_RDAT); 386 return iwl3945_read_direct32(priv, HBUS_TARG_MEM_RDAT);
393} 387}
394 388
395static inline void iwl3945_write_targ_mem(struct iwl3945_priv *priv, u32 addr, u32 val) 389static inline void iwl3945_write_targ_mem(struct iwl3945_priv *priv, u32 addr, u32 val)
396{ 390{
397 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); 391 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
392 wmb();
398 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WDAT, val); 393 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
399} 394}
400 395
@@ -402,6 +397,7 @@ static inline void iwl3945_write_targ_mem_buf(struct iwl3945_priv *priv, u32 add
402 u32 len, u32 *values) 397 u32 len, u32 *values)
403{ 398{
404 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); 399 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
400 wmb();
405 for (; 0 < len; len -= sizeof(u32), values++) 401 for (; 0 < len; len -= sizeof(u32), values++)
406 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WDAT, *values); 402 iwl3945_write_direct32(priv, HBUS_TARG_MEM_WDAT, *values);
407} 403}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index 705c65bed9fd..4c638909a7db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 2fbd126c1347..749ac035fd6a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 6fc5e7361f26..9b60a0c5de5f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
@@ -63,6 +63,9 @@ struct iwl3945_rs_sta {
63 u8 ibss_sta_added; 63 u8 ibss_sta_added;
64 struct timer_list rate_scale_flush; 64 struct timer_list rate_scale_flush;
65 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT]; 65 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT];
66#ifdef CONFIG_MAC80211_DEBUGFS
67 struct dentry *rs_sta_dbgfs_stats_table_file;
68#endif
66 69
67 /* used to be in sta_info */ 70 /* used to be in sta_info */
68 int last_txrate_idx; 71 int last_txrate_idx;
@@ -114,9 +117,11 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
114}; 117};
115 118
116#define IWL_RATE_MAX_WINDOW 62 119#define IWL_RATE_MAX_WINDOW 62
117#define IWL_RATE_FLUSH (3*HZ/10) 120#define IWL_RATE_FLUSH (3*HZ)
118#define IWL_RATE_WIN_FLUSH (HZ/2) 121#define IWL_RATE_WIN_FLUSH (HZ/2)
119#define IWL_RATE_HIGH_TH 11520 122#define IWL_RATE_HIGH_TH 11520
123#define IWL_SUCCESS_UP_TH 8960
124#define IWL_SUCCESS_DOWN_TH 10880
120#define IWL_RATE_MIN_FAILURE_TH 8 125#define IWL_RATE_MIN_FAILURE_TH 8
121#define IWL_RATE_MIN_SUCCESS_TH 8 126#define IWL_RATE_MIN_SUCCESS_TH 8
122#define IWL_RATE_DECREASE_TH 1920 127#define IWL_RATE_DECREASE_TH 1920
@@ -203,6 +208,7 @@ static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
203 208
204#define IWL_RATE_FLUSH_MAX 5000 /* msec */ 209#define IWL_RATE_FLUSH_MAX 5000 /* msec */
205#define IWL_RATE_FLUSH_MIN 50 /* msec */ 210#define IWL_RATE_FLUSH_MIN 50 /* msec */
211#define IWL_AVERAGE_PACKETS 1500
206 212
207static void iwl3945_bg_rate_scale_flush(unsigned long data) 213static void iwl3945_bg_rate_scale_flush(unsigned long data)
208{ 214{
@@ -217,8 +223,6 @@ static void iwl3945_bg_rate_scale_flush(unsigned long data)
217 223
218 spin_lock_irqsave(&rs_sta->lock, flags); 224 spin_lock_irqsave(&rs_sta->lock, flags);
219 225
220 rs_sta->flush_pending = 0;
221
222 /* Number of packets Rx'd since last time this timer ran */ 226 /* Number of packets Rx'd since last time this timer ran */
223 packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1; 227 packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
224 228
@@ -227,7 +231,6 @@ static void iwl3945_bg_rate_scale_flush(unsigned long data)
227 if (unflushed) { 231 if (unflushed) {
228 duration = 232 duration =
229 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush); 233 jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
230/* duration = jiffies_to_msecs(rs_sta->flush_time); */
231 234
232 IWL_DEBUG_RATE("Tx'd %d packets in %dms\n", 235 IWL_DEBUG_RATE("Tx'd %d packets in %dms\n",
233 packet_count, duration); 236 packet_count, duration);
@@ -239,9 +242,11 @@ static void iwl3945_bg_rate_scale_flush(unsigned long data)
239 pps = 0; 242 pps = 0;
240 243
241 if (pps) { 244 if (pps) {
242 duration = IWL_RATE_FLUSH_MAX / pps; 245 duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
243 if (duration < IWL_RATE_FLUSH_MIN) 246 if (duration < IWL_RATE_FLUSH_MIN)
244 duration = IWL_RATE_FLUSH_MIN; 247 duration = IWL_RATE_FLUSH_MIN;
248 else if (duration > IWL_RATE_FLUSH_MAX)
249 duration = IWL_RATE_FLUSH_MAX;
245 } else 250 } else
246 duration = IWL_RATE_FLUSH_MAX; 251 duration = IWL_RATE_FLUSH_MAX;
247 252
@@ -254,8 +259,10 @@ static void iwl3945_bg_rate_scale_flush(unsigned long data)
254 rs_sta->flush_time); 259 rs_sta->flush_time);
255 260
256 rs_sta->last_partial_flush = jiffies; 261 rs_sta->last_partial_flush = jiffies;
262 } else {
263 rs_sta->flush_time = IWL_RATE_FLUSH;
264 rs_sta->flush_pending = 0;
257 } 265 }
258
259 /* If there weren't any unflushed entries, we don't schedule the timer 266 /* If there weren't any unflushed entries, we don't schedule the timer
260 * to run again */ 267 * to run again */
261 268
@@ -275,17 +282,18 @@ static void iwl3945_bg_rate_scale_flush(unsigned long data)
275 */ 282 */
276static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta, 283static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
277 struct iwl3945_rate_scale_data *window, 284 struct iwl3945_rate_scale_data *window,
278 int success, int retries) 285 int success, int retries, int index)
279{ 286{
280 unsigned long flags; 287 unsigned long flags;
288 s32 fail_count;
281 289
282 if (!retries) { 290 if (!retries) {
283 IWL_DEBUG_RATE("leave: retries == 0 -- should be at least 1\n"); 291 IWL_DEBUG_RATE("leave: retries == 0 -- should be at least 1\n");
284 return; 292 return;
285 } 293 }
286 294
295 spin_lock_irqsave(&rs_sta->lock, flags);
287 while (retries--) { 296 while (retries--) {
288 spin_lock_irqsave(&rs_sta->lock, flags);
289 297
290 /* If we have filled up the window then subtract one from the 298 /* If we have filled up the window then subtract one from the
291 * success counter if the high-bit is counting toward 299 * success counter if the high-bit is counting toward
@@ -313,14 +321,25 @@ static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
313 /* Tag this window as having been updated */ 321 /* Tag this window as having been updated */
314 window->stamp = jiffies; 322 window->stamp = jiffies;
315 323
316 spin_unlock_irqrestore(&rs_sta->lock, flags);
317 } 324 }
325
326 fail_count = window->counter - window->success_counter;
327 if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
328 (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
329 window->average_tpt = ((window->success_ratio *
330 rs_sta->expected_tpt[index] + 64) / 128);
331 else
332 window->average_tpt = IWL_INV_TPT;
333
334 spin_unlock_irqrestore(&rs_sta->lock, flags);
335
318} 336}
319 337
320static void rs_rate_init(void *priv, struct ieee80211_supported_band *sband, 338static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
321 struct ieee80211_sta *sta, void *priv_sta) 339 struct ieee80211_sta *sta, void *priv_sta)
322{ 340{
323 struct iwl3945_rs_sta *rs_sta = priv_sta; 341 struct iwl3945_rs_sta *rs_sta = priv_sta;
342 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r;
324 int i; 343 int i;
325 344
326 IWL_DEBUG_RATE("enter\n"); 345 IWL_DEBUG_RATE("enter\n");
@@ -330,16 +349,21 @@ static void rs_rate_init(void *priv, struct ieee80211_supported_band *sband,
330 * previous packets? Need to have IEEE 802.1X auth succeed immediately 349 * previous packets? Need to have IEEE 802.1X auth succeed immediately
331 * after assoc.. */ 350 * after assoc.. */
332 351
333 for (i = IWL_RATE_COUNT - 1; i >= 0; i--) { 352 for (i = sband->n_bitrates - 1; i >= 0; i--) {
334 if (sta->supp_rates[sband->band] & (1 << i)) { 353 if (sta->supp_rates[sband->band] & (1 << i)) {
335 rs_sta->last_txrate_idx = i; 354 rs_sta->last_txrate_idx = i;
336 break; 355 break;
337 } 356 }
338 } 357 }
339 358
359 priv->sta_supp_rates = sta->supp_rates[sband->band];
340 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */ 360 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
341 if (sband->band == IEEE80211_BAND_5GHZ) 361 if (sband->band == IEEE80211_BAND_5GHZ) {
342 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 362 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
363 priv->sta_supp_rates = priv->sta_supp_rates <<
364 IWL_FIRST_OFDM_RATE;
365 }
366
343 367
344 IWL_DEBUG_RATE("leave\n"); 368 IWL_DEBUG_RATE("leave\n");
345} 369}
@@ -355,12 +379,6 @@ static void rs_free(void *priv)
355 return; 379 return;
356} 380}
357 381
358static void rs_clear(void *priv)
359{
360 return;
361}
362
363
364static void *rs_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) 382static void *rs_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
365{ 383{
366 struct iwl3945_rs_sta *rs_sta; 384 struct iwl3945_rs_sta *rs_sta;
@@ -422,34 +440,6 @@ static void rs_free_sta(void *priv, struct ieee80211_sta *sta,
422} 440}
423 441
424 442
425/*
426 * get ieee prev rate from rate scale table.
427 * for A and B mode we need to overright prev
428 * value
429 */
430static int rs_adjust_next_rate(struct iwl3945_priv *priv, int rate)
431{
432 int next_rate = iwl3945_get_prev_ieee_rate(rate);
433
434 switch (priv->band) {
435 case IEEE80211_BAND_5GHZ:
436 if (rate == IWL_RATE_12M_INDEX)
437 next_rate = IWL_RATE_9M_INDEX;
438 else if (rate == IWL_RATE_6M_INDEX)
439 next_rate = IWL_RATE_6M_INDEX;
440 break;
441/* XXX cannot be invoked in current mac80211 so not a regression
442 case MODE_IEEE80211B:
443 if (rate == IWL_RATE_11M_INDEX_TABLE)
444 next_rate = IWL_RATE_5M_INDEX_TABLE;
445 break;
446 */
447 default:
448 break;
449 }
450
451 return next_rate;
452}
453/** 443/**
454 * rs_tx_status - Update rate control values based on Tx results 444 * rs_tx_status - Update rate control values based on Tx results
455 * 445 *
@@ -460,7 +450,7 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
460 struct ieee80211_sta *sta, void *priv_sta, 450 struct ieee80211_sta *sta, void *priv_sta,
461 struct sk_buff *skb) 451 struct sk_buff *skb)
462{ 452{
463 u8 retries, current_count; 453 s8 retries = 0, current_count;
464 int scale_rate_index, first_index, last_index; 454 int scale_rate_index, first_index, last_index;
465 unsigned long flags; 455 unsigned long flags;
466 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_rate; 456 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_rate;
@@ -469,8 +459,9 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
469 459
470 IWL_DEBUG_RATE("enter\n"); 460 IWL_DEBUG_RATE("enter\n");
471 461
472 retries = info->status.retry_count; 462 retries = info->status.rates[0].count;
473 first_index = sband->bitrates[info->tx_rate_idx].hw_value; 463
464 first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
474 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) { 465 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) {
475 IWL_DEBUG_RATE("leave: Rate out of bounds: %d\n", first_index); 466 IWL_DEBUG_RATE("leave: Rate out of bounds: %d\n", first_index);
476 return; 467 return;
@@ -496,13 +487,13 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
496 * at which the frame was finally transmitted (or failed if no 487 * at which the frame was finally transmitted (or failed if no
497 * ACK) 488 * ACK)
498 */ 489 */
499 while (retries > 0) { 490 while (retries > 1) {
500 if (retries < priv->retry_rate) { 491 if ((retries - 1) < priv->retry_rate) {
501 current_count = retries; 492 current_count = (retries - 1);
502 last_index = scale_rate_index; 493 last_index = scale_rate_index;
503 } else { 494 } else {
504 current_count = priv->retry_rate; 495 current_count = priv->retry_rate;
505 last_index = rs_adjust_next_rate(priv, 496 last_index = iwl3945_rs_next_rate(priv,
506 scale_rate_index); 497 scale_rate_index);
507 } 498 }
508 499
@@ -510,15 +501,13 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
510 * as was used for it (per current_count) */ 501 * as was used for it (per current_count) */
511 iwl3945_collect_tx_data(rs_sta, 502 iwl3945_collect_tx_data(rs_sta,
512 &rs_sta->win[scale_rate_index], 503 &rs_sta->win[scale_rate_index],
513 0, current_count); 504 0, current_count, scale_rate_index);
514 IWL_DEBUG_RATE("Update rate %d for %d retries.\n", 505 IWL_DEBUG_RATE("Update rate %d for %d retries.\n",
515 scale_rate_index, current_count); 506 scale_rate_index, current_count);
516 507
517 retries -= current_count; 508 retries -= current_count;
518 509
519 if (retries) 510 scale_rate_index = last_index;
520 scale_rate_index =
521 rs_adjust_next_rate(priv, scale_rate_index);
522 } 511 }
523 512
524 513
@@ -529,7 +518,7 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
529 "success" : "failure"); 518 "success" : "failure");
530 iwl3945_collect_tx_data(rs_sta, 519 iwl3945_collect_tx_data(rs_sta,
531 &rs_sta->win[last_index], 520 &rs_sta->win[last_index],
532 info->flags & IEEE80211_TX_STAT_ACK, 1); 521 info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
533 522
534 /* We updated the rate scale window -- if its been more than 523 /* We updated the rate scale window -- if its been more than
535 * flush_time since the last run, schedule the flush 524 * flush_time since the last run, schedule the flush
@@ -537,9 +526,10 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
537 spin_lock_irqsave(&rs_sta->lock, flags); 526 spin_lock_irqsave(&rs_sta->lock, flags);
538 527
539 if (!rs_sta->flush_pending && 528 if (!rs_sta->flush_pending &&
540 time_after(jiffies, rs_sta->last_partial_flush + 529 time_after(jiffies, rs_sta->last_flush +
541 rs_sta->flush_time)) { 530 rs_sta->flush_time)) {
542 531
532 rs_sta->last_partial_flush = jiffies;
543 rs_sta->flush_pending = 1; 533 rs_sta->flush_pending = 1;
544 mod_timer(&rs_sta->rate_scale_flush, 534 mod_timer(&rs_sta->rate_scale_flush,
545 jiffies + rs_sta->flush_time); 535 jiffies + rs_sta->flush_time);
@@ -630,10 +620,11 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
630 * rate table and must reference the driver allocated rate table 620 * rate table and must reference the driver allocated rate table
631 * 621 *
632 */ 622 */
633static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband, 623static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
634 struct ieee80211_sta *sta, void *priv_sta, 624 void *priv_sta, struct ieee80211_tx_rate_control *txrc)
635 struct sk_buff *skb, struct rate_selection *sel)
636{ 625{
626 struct ieee80211_supported_band *sband = txrc->sband;
627 struct sk_buff *skb = txrc->skb;
637 u8 low = IWL_RATE_INVALID; 628 u8 low = IWL_RATE_INVALID;
638 u8 high = IWL_RATE_INVALID; 629 u8 high = IWL_RATE_INVALID;
639 u16 high_low; 630 u16 high_low;
@@ -649,7 +640,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
649 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 640 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
650 u16 fc, rate_mask; 641 u16 fc, rate_mask;
651 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r; 642 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_r;
652 DECLARE_MAC_BUF(mac); 643 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
653 644
654 IWL_DEBUG_RATE("enter\n"); 645 IWL_DEBUG_RATE("enter\n");
655 646
@@ -660,7 +651,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
660 is_multicast_ether_addr(hdr->addr1) || 651 is_multicast_ether_addr(hdr->addr1) ||
661 !sta || !priv_sta) { 652 !sta || !priv_sta) {
662 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 653 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
663 sel->rate_idx = rate_lowest_index(sband, sta); 654 info->control.rates[0].idx = rate_lowest_index(sband, sta);
664 return; 655 return;
665 } 656 }
666 657
@@ -675,8 +666,8 @@ static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
675 u8 sta_id = iwl3945_hw_find_station(priv, hdr->addr1); 666 u8 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
676 667
677 if (sta_id == IWL_INVALID_STATION) { 668 if (sta_id == IWL_INVALID_STATION) {
678 IWL_DEBUG_RATE("LQ: ADD station %s\n", 669 IWL_DEBUG_RATE("LQ: ADD station %pm\n",
679 print_mac(mac, hdr->addr1)); 670 hdr->addr1);
680 sta_id = iwl3945_add_station(priv, 671 sta_id = iwl3945_add_station(priv,
681 hdr->addr1, 0, CMD_ASYNC); 672 hdr->addr1, 0, CMD_ASYNC);
682 } 673 }
@@ -686,8 +677,13 @@ static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
686 677
687 spin_lock_irqsave(&rs_sta->lock, flags); 678 spin_lock_irqsave(&rs_sta->lock, flags);
688 679
680 /* for recent assoc, choose best rate regarding
681 * to rssi value
682 */
689 if (rs_sta->start_rate != IWL_RATE_INVALID) { 683 if (rs_sta->start_rate != IWL_RATE_INVALID) {
690 index = rs_sta->start_rate; 684 if (rs_sta->start_rate < index &&
685 (rate_mask & (1 << rs_sta->start_rate)))
686 index = rs_sta->start_rate;
691 rs_sta->start_rate = IWL_RATE_INVALID; 687 rs_sta->start_rate = IWL_RATE_INVALID;
692 } 688 }
693 689
@@ -697,7 +693,6 @@ static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
697 693
698 if (((fail_count <= IWL_RATE_MIN_FAILURE_TH) && 694 if (((fail_count <= IWL_RATE_MIN_FAILURE_TH) &&
699 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) { 695 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
700 window->average_tpt = IWL_INV_TPT;
701 spin_unlock_irqrestore(&rs_sta->lock, flags); 696 spin_unlock_irqrestore(&rs_sta->lock, flags);
702 697
703 IWL_DEBUG_RATE("Invalid average_tpt on rate %d: " 698 IWL_DEBUG_RATE("Invalid average_tpt on rate %d: "
@@ -711,8 +706,6 @@ static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
711 706
712 } 707 }
713 708
714 window->average_tpt = ((window->success_ratio *
715 rs_sta->expected_tpt[index] + 64) / 128);
716 current_tpt = window->average_tpt; 709 current_tpt = window->average_tpt;
717 710
718 high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask, 711 high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
@@ -760,13 +753,15 @@ static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
760 } 753 }
761 } 754 }
762 755
763 if ((window->success_ratio > IWL_RATE_HIGH_TH) || 756 if (scale_action == -1) {
764 (current_tpt > window->average_tpt)) { 757 if (window->success_ratio > IWL_SUCCESS_DOWN_TH)
765 IWL_DEBUG_RATE("No action -- success_ratio [%d] > HIGH_TH or " 758 scale_action = 0;
766 "current_tpt [%d] > average_tpt [%d]\n", 759 } else if (scale_action == 1) {
767 window->success_ratio, 760 if (window->success_ratio < IWL_SUCCESS_UP_TH) {
768 current_tpt, window->average_tpt); 761 IWL_DEBUG_RATE("No action -- success_ratio [%d] < "
769 scale_action = 0; 762 "SUCCESS UP\n", window->success_ratio);
763 scale_action = 0;
764 }
770 } 765 }
771 766
772 switch (scale_action) { 767 switch (scale_action) {
@@ -793,24 +788,83 @@ static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
793 788
794 rs_sta->last_txrate_idx = index; 789 rs_sta->last_txrate_idx = index;
795 if (sband->band == IEEE80211_BAND_5GHZ) 790 if (sband->band == IEEE80211_BAND_5GHZ)
796 sel->rate_idx = rs_sta->last_txrate_idx - IWL_FIRST_OFDM_RATE; 791 info->control.rates[0].idx = rs_sta->last_txrate_idx -
792 IWL_FIRST_OFDM_RATE;
797 else 793 else
798 sel->rate_idx = rs_sta->last_txrate_idx; 794 info->control.rates[0].idx = rs_sta->last_txrate_idx;
799 795
800 IWL_DEBUG_RATE("leave: %d\n", index); 796 IWL_DEBUG_RATE("leave: %d\n", index);
801} 797}
802 798
799#ifdef CONFIG_MAC80211_DEBUGFS
800static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
801{
802 file->private_data = inode->i_private;
803 return 0;
804}
805
806static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
807 char __user *user_buf,
808 size_t count, loff_t *ppos)
809{
810 char buff[1024];
811 int desc = 0;
812 int j;
813 struct iwl3945_rs_sta *lq_sta = file->private_data;
814
815 desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
816 "rate=0x%X flush time %d\n",
817 lq_sta->tx_packets,
818 lq_sta->last_txrate_idx,
819 lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
820 for (j = 0; j < IWL_RATE_COUNT; j++) {
821 desc += sprintf(buff+desc,
822 "counter=%d success=%d %%=%d\n",
823 lq_sta->win[j].counter,
824 lq_sta->win[j].success_counter,
825 lq_sta->win[j].success_ratio);
826 }
827 return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
828}
829
830static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
831 .read = iwl3945_sta_dbgfs_stats_table_read,
832 .open = iwl3945_open_file_generic,
833};
834
835static void iwl3945_add_debugfs(void *priv, void *priv_sta,
836 struct dentry *dir)
837{
838 struct iwl3945_rs_sta *lq_sta = priv_sta;
839
840 lq_sta->rs_sta_dbgfs_stats_table_file =
841 debugfs_create_file("rate_stats_table", 0600, dir,
842 lq_sta, &rs_sta_dbgfs_stats_table_ops);
843
844}
845
846static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
847{
848 struct iwl3945_rs_sta *lq_sta = priv_sta;
849 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
850}
851#endif
852
803static struct rate_control_ops rs_ops = { 853static struct rate_control_ops rs_ops = {
804 .module = NULL, 854 .module = NULL,
805 .name = RS_NAME, 855 .name = RS_NAME,
806 .tx_status = rs_tx_status, 856 .tx_status = rs_tx_status,
807 .get_rate = rs_get_rate, 857 .get_rate = rs_get_rate,
808 .rate_init = rs_rate_init, 858 .rate_init = rs_rate_init,
809 .clear = rs_clear,
810 .alloc = rs_alloc, 859 .alloc = rs_alloc,
811 .free = rs_free, 860 .free = rs_free,
812 .alloc_sta = rs_alloc_sta, 861 .alloc_sta = rs_alloc_sta,
813 .free_sta = rs_free_sta, 862 .free_sta = rs_free_sta,
863#ifdef CONFIG_MAC80211_DEBUGFS
864 .add_sta_debugfs = iwl3945_add_debugfs,
865 .remove_sta_debugfs = iwl3945_remove_debugfs,
866#endif
867
814}; 868};
815 869
816void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) 870void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
@@ -827,13 +881,12 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
827 rcu_read_lock(); 881 rcu_read_lock();
828 882
829 sta = ieee80211_find_sta(hw, priv->stations[sta_id].sta.sta.addr); 883 sta = ieee80211_find_sta(hw, priv->stations[sta_id].sta.sta.addr);
830 psta = (void *) sta->drv_priv; 884 if (!sta) {
831 if (!sta || !psta) {
832 IWL_DEBUG_RATE("leave - no private rate data!\n");
833 rcu_read_unlock(); 885 rcu_read_unlock();
834 return; 886 return;
835 } 887 }
836 888
889 psta = (void *) sta->drv_priv;
837 rs_sta = psta->rs_sta; 890 rs_sta = psta->rs_sta;
838 891
839 spin_lock_irqsave(&rs_sta->lock, flags); 892 spin_lock_irqsave(&rs_sta->lock, flags);
@@ -857,7 +910,6 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
857 break; 910 break;
858 } 911 }
859 912
860 rcu_read_unlock();
861 spin_unlock_irqrestore(&rs_sta->lock, flags); 913 spin_unlock_irqrestore(&rs_sta->lock, flags);
862 914
863 rssi = priv->last_rx_rssi; 915 rssi = priv->last_rx_rssi;
@@ -871,6 +923,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
871 IWL_DEBUG_RATE("leave: rssi %d assign rate index: " 923 IWL_DEBUG_RATE("leave: rssi %d assign rate index: "
872 "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate, 924 "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
873 iwl3945_rates[rs_sta->start_rate].plcp); 925 iwl3945_rates[rs_sta->start_rate].plcp);
926 rcu_read_unlock();
874} 927}
875 928
876int iwl3945_rate_control_register(void) 929int iwl3945_rate_control_register(void)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
index 98b17ae6ef24..b5a66135dedd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 7ca5627cc078..8fdb34222c0a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
@@ -200,7 +200,7 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
200 * priv->eeprom is used to determine if antenna AUX/MAIN are reversed 200 * priv->eeprom is used to determine if antenna AUX/MAIN are reversed
201 * priv->antenna specifies the antenna diversity mode: 201 * priv->antenna specifies the antenna diversity mode:
202 * 202 *
203 * IWL_ANTENNA_DIVERISTY - NIC selects best antenna by itself 203 * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
204 * IWL_ANTENNA_MAIN - Force MAIN antenna 204 * IWL_ANTENNA_MAIN - Force MAIN antenna
205 * IWL_ANTENNA_AUX - Force AUX antenna 205 * IWL_ANTENNA_AUX - Force AUX antenna
206 */ 206 */
@@ -261,6 +261,37 @@ static inline const char *iwl3945_get_tx_fail_reason(u32 status)
261} 261}
262#endif 262#endif
263 263
264/*
265 * get ieee prev rate from rate scale table.
266 * for A and B mode we need to overright prev
267 * value
268 */
269int iwl3945_rs_next_rate(struct iwl3945_priv *priv, int rate)
270{
271 int next_rate = iwl3945_get_prev_ieee_rate(rate);
272
273 switch (priv->band) {
274 case IEEE80211_BAND_5GHZ:
275 if (rate == IWL_RATE_12M_INDEX)
276 next_rate = IWL_RATE_9M_INDEX;
277 else if (rate == IWL_RATE_6M_INDEX)
278 next_rate = IWL_RATE_6M_INDEX;
279 break;
280 case IEEE80211_BAND_2GHZ:
281 if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) &&
282 iwl3945_is_associated(priv)) {
283 if (rate == IWL_RATE_11M_INDEX)
284 next_rate = IWL_RATE_5M_INDEX;
285 }
286 break;
287
288 default:
289 break;
290 }
291
292 return next_rate;
293}
294
264 295
265/** 296/**
266 * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd 297 * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
@@ -308,6 +339,7 @@ static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
308 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 339 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
309 u32 status = le32_to_cpu(tx_resp->status); 340 u32 status = le32_to_cpu(tx_resp->status);
310 int rate_idx; 341 int rate_idx;
342 int fail;
311 343
312 if ((index >= txq->q.n_bd) || (iwl3945_x2_queue_used(&txq->q, index) == 0)) { 344 if ((index >= txq->q.n_bd) || (iwl3945_x2_queue_used(&txq->q, index) == 0)) {
313 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " 345 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
@@ -318,9 +350,18 @@ static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
318 } 350 }
319 351
320 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); 352 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
321 memset(&info->status, 0, sizeof(info->status)); 353 ieee80211_tx_info_clear_status(info);
354
355 /* Fill the MRR chain with some info about on-chip retransmissions */
356 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
357 if (info->band == IEEE80211_BAND_5GHZ)
358 rate_idx -= IWL_FIRST_OFDM_RATE;
359
360 fail = tx_resp->failure_frame;
361
362 info->status.rates[0].idx = rate_idx;
363 info->status.rates[0].count = fail + 1; /* add final attempt */
322 364
323 info->status.retry_count = tx_resp->failure_frame;
324 /* tx_status->rts_retry_count = tx_resp->failure_rts; */ 365 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
325 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ? 366 info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
326 IEEE80211_TX_STAT_ACK : 0; 367 IEEE80211_TX_STAT_ACK : 0;
@@ -329,10 +370,6 @@ static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
329 txq_id, iwl3945_get_tx_fail_reason(status), status, 370 txq_id, iwl3945_get_tx_fail_reason(status), status,
330 tx_resp->rate, tx_resp->failure_frame); 371 tx_resp->rate, tx_resp->failure_frame);
331 372
332 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
333 if (info->band == IEEE80211_BAND_5GHZ)
334 rate_idx -= IWL_FIRST_OFDM_RATE;
335 info->tx_rate_idx = rate_idx;
336 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); 373 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
337 iwl3945_tx_queue_reclaim(priv, txq_id, index); 374 iwl3945_tx_queue_reclaim(priv, txq_id, index);
338 375
@@ -756,13 +793,19 @@ int iwl3945_hw_txq_free_tfd(struct iwl3945_priv *priv, struct iwl3945_tx_queue *
756 793
757u8 iwl3945_hw_find_station(struct iwl3945_priv *priv, const u8 *addr) 794u8 iwl3945_hw_find_station(struct iwl3945_priv *priv, const u8 *addr)
758{ 795{
759 int i; 796 int i, start = IWL_AP_ID;
760 int ret = IWL_INVALID_STATION; 797 int ret = IWL_INVALID_STATION;
761 unsigned long flags; 798 unsigned long flags;
762 DECLARE_MAC_BUF(mac); 799
800 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
801 (priv->iw_mode == NL80211_IFTYPE_AP))
802 start = IWL_STA_ID;
803
804 if (is_broadcast_ether_addr(addr))
805 return priv->hw_setting.bcast_sta_id;
763 806
764 spin_lock_irqsave(&priv->sta_lock, flags); 807 spin_lock_irqsave(&priv->sta_lock, flags);
765 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) 808 for (i = start; i < priv->hw_setting.max_stations; i++)
766 if ((priv->stations[i].used) && 809 if ((priv->stations[i].used) &&
767 (!compare_ether_addr 810 (!compare_ether_addr
768 (priv->stations[i].sta.sta.addr, addr))) { 811 (priv->stations[i].sta.sta.addr, addr))) {
@@ -770,8 +813,8 @@ u8 iwl3945_hw_find_station(struct iwl3945_priv *priv, const u8 *addr)
770 goto out; 813 goto out;
771 } 814 }
772 815
773 IWL_DEBUG_INFO("can not find STA %s (total %d)\n", 816 IWL_DEBUG_INFO("can not find STA %pM (total %d)\n",
774 print_mac(mac, addr), priv->num_stations); 817 addr, priv->num_stations);
775 out: 818 out:
776 spin_unlock_irqrestore(&priv->sta_lock, flags); 819 spin_unlock_irqrestore(&priv->sta_lock, flags);
777 return ret; 820 return ret;
@@ -1060,9 +1103,8 @@ int iwl3945_hw_nic_init(struct iwl3945_priv *priv)
1060 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 1103 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1061 1104
1062 iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1105 iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1063 rc = iwl3945_poll_bit(priv, CSR_GP_CNTRL, 1106 rc = iwl3945_poll_direct_bit(priv, CSR_GP_CNTRL,
1064 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1107 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1065 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1066 if (rc < 0) { 1108 if (rc < 0) {
1067 spin_unlock_irqrestore(&priv->lock, flags); 1109 spin_unlock_irqrestore(&priv->lock, flags);
1068 IWL_DEBUG_INFO("Failed to init the card\n"); 1110 IWL_DEBUG_INFO("Failed to init the card\n");
@@ -1243,8 +1285,7 @@ int iwl3945_hw_nic_stop_master(struct iwl3945_priv *priv)
1243 IWL_DEBUG_INFO("Card in power save, master is already " 1285 IWL_DEBUG_INFO("Card in power save, master is already "
1244 "stopped\n"); 1286 "stopped\n");
1245 else { 1287 else {
1246 rc = iwl3945_poll_bit(priv, CSR_RESET, 1288 rc = iwl3945_poll_direct_bit(priv, CSR_RESET,
1247 CSR_RESET_REG_FLAG_MASTER_DISABLED,
1248 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 1289 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
1249 if (rc < 0) { 1290 if (rc < 0) {
1250 spin_unlock_irqrestore(&priv->lock, flags); 1291 spin_unlock_irqrestore(&priv->lock, flags);
@@ -1269,9 +1310,8 @@ int iwl3945_hw_nic_reset(struct iwl3945_priv *priv)
1269 1310
1270 iwl3945_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 1311 iwl3945_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1271 1312
1272 rc = iwl3945_poll_bit(priv, CSR_GP_CNTRL, 1313 iwl3945_poll_direct_bit(priv, CSR_GP_CNTRL,
1273 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1314 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1274 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1275 1315
1276 rc = iwl3945_grab_nic_access(priv); 1316 rc = iwl3945_grab_nic_access(priv);
1277 if (!rc) { 1317 if (!rc) {
@@ -1830,7 +1870,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl3945_priv *priv)
1830 ref_temp = (s16)priv->eeprom.groups[ch_info->group_index]. 1870 ref_temp = (s16)priv->eeprom.groups[ch_info->group_index].
1831 temperature; 1871 temperature;
1832 1872
1833 /* get power index adjustment based on curr and factory 1873 /* get power index adjustment based on current and factory
1834 * temps */ 1874 * temps */
1835 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature, 1875 delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
1836 ref_temp); 1876 ref_temp);
@@ -2268,7 +2308,8 @@ int iwl3945_hw_rxq_stop(struct iwl3945_priv *priv)
2268 } 2308 }
2269 2309
2270 iwl3945_write_direct32(priv, FH_RCSR_CONFIG(0), 0); 2310 iwl3945_write_direct32(priv, FH_RCSR_CONFIG(0), 0);
2271 rc = iwl3945_poll_direct_bit(priv, FH_RSSR_STATUS, (1 << 24), 1000); 2311 rc = iwl3945_poll_direct_bit(priv, FH_RSSR_STATUS,
2312 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
2272 if (rc < 0) 2313 if (rc < 0)
2273 IWL_ERROR("Can't stop Rx DMA.\n"); 2314 IWL_ERROR("Can't stop Rx DMA.\n");
2274 2315
@@ -2337,7 +2378,8 @@ int iwl3945_init_hw_rate_table(struct iwl3945_priv *priv)
2337 iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0); 2378 iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
2338 table[index].try_cnt = priv->retry_rate; 2379 table[index].try_cnt = priv->retry_rate;
2339 prev_index = iwl3945_get_prev_ieee_rate(i); 2380 prev_index = iwl3945_get_prev_ieee_rate(i);
2340 table[index].next_rate_index = iwl3945_rates[prev_index].table_rs_index; 2381 table[index].next_rate_index =
2382 iwl3945_rates[prev_index].table_rs_index;
2341 } 2383 }
2342 2384
2343 switch (priv->band) { 2385 switch (priv->band) {
@@ -2345,11 +2387,14 @@ int iwl3945_init_hw_rate_table(struct iwl3945_priv *priv)
2345 IWL_DEBUG_RATE("Select A mode rate scale\n"); 2387 IWL_DEBUG_RATE("Select A mode rate scale\n");
2346 /* If one of the following CCK rates is used, 2388 /* If one of the following CCK rates is used,
2347 * have it fall back to the 6M OFDM rate */ 2389 * have it fall back to the 6M OFDM rate */
2348 for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) 2390 for (i = IWL_RATE_1M_INDEX_TABLE;
2349 table[i].next_rate_index = iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; 2391 i <= IWL_RATE_11M_INDEX_TABLE; i++)
2392 table[i].next_rate_index =
2393 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2350 2394
2351 /* Don't fall back to CCK rates */ 2395 /* Don't fall back to CCK rates */
2352 table[IWL_RATE_12M_INDEX_TABLE].next_rate_index = IWL_RATE_9M_INDEX_TABLE; 2396 table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
2397 IWL_RATE_9M_INDEX_TABLE;
2353 2398
2354 /* Don't drop out of OFDM rates */ 2399 /* Don't drop out of OFDM rates */
2355 table[IWL_RATE_6M_INDEX_TABLE].next_rate_index = 2400 table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
@@ -2360,11 +2405,20 @@ int iwl3945_init_hw_rate_table(struct iwl3945_priv *priv)
2360 IWL_DEBUG_RATE("Select B/G mode rate scale\n"); 2405 IWL_DEBUG_RATE("Select B/G mode rate scale\n");
2361 /* If an OFDM rate is used, have it fall back to the 2406 /* If an OFDM rate is used, have it fall back to the
2362 * 1M CCK rates */ 2407 * 1M CCK rates */
2363 for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE; i++)
2364 table[i].next_rate_index = iwl3945_rates[IWL_FIRST_CCK_RATE].table_rs_index;
2365 2408
2366 /* CCK shouldn't fall back to OFDM... */ 2409 if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2367 table[IWL_RATE_11M_INDEX_TABLE].next_rate_index = IWL_RATE_5M_INDEX_TABLE; 2410 iwl3945_is_associated(priv)) {
2411
2412 index = IWL_FIRST_CCK_RATE;
2413 for (i = IWL_RATE_6M_INDEX_TABLE;
2414 i <= IWL_RATE_54M_INDEX_TABLE; i++)
2415 table[i].next_rate_index =
2416 iwl3945_rates[index].table_rs_index;
2417
2418 index = IWL_RATE_11M_INDEX_TABLE;
2419 /* CCK shouldn't fall back to OFDM... */
2420 table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
2421 }
2368 break; 2422 break;
2369 2423
2370 default: 2424 default:
@@ -2428,7 +2482,6 @@ unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv,
2428 2482
2429 frame_size = iwl3945_fill_beacon_frame(priv, 2483 frame_size = iwl3945_fill_beacon_frame(priv,
2430 tx_beacon_cmd->frame, 2484 tx_beacon_cmd->frame,
2431 iwl3945_broadcast_addr,
2432 sizeof(frame->u) - sizeof(*tx_beacon_cmd)); 2485 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2433 2486
2434 BUG_ON(frame_size > MAX_MPDU_SIZE); 2487 BUG_ON(frame_size > MAX_MPDU_SIZE);
@@ -2467,13 +2520,17 @@ void iwl3945_hw_cancel_deferred_work(struct iwl3945_priv *priv)
2467 2520
2468static struct iwl_3945_cfg iwl3945_bg_cfg = { 2521static struct iwl_3945_cfg iwl3945_bg_cfg = {
2469 .name = "3945BG", 2522 .name = "3945BG",
2470 .fw_name = "iwlwifi-3945" IWL3945_UCODE_API ".ucode", 2523 .fw_name_pre = IWL3945_FW_PRE,
2524 .ucode_api_max = IWL3945_UCODE_API_MAX,
2525 .ucode_api_min = IWL3945_UCODE_API_MIN,
2471 .sku = IWL_SKU_G, 2526 .sku = IWL_SKU_G,
2472}; 2527};
2473 2528
2474static struct iwl_3945_cfg iwl3945_abg_cfg = { 2529static struct iwl_3945_cfg iwl3945_abg_cfg = {
2475 .name = "3945ABG", 2530 .name = "3945ABG",
2476 .fw_name = "iwlwifi-3945" IWL3945_UCODE_API ".ucode", 2531 .fw_name_pre = IWL3945_FW_PRE,
2532 .ucode_api_max = IWL3945_UCODE_API_MAX,
2533 .ucode_api_min = IWL3945_UCODE_API_MIN,
2477 .sku = IWL_SKU_A|IWL_SKU_G, 2534 .sku = IWL_SKU_A|IWL_SKU_G,
2478}; 2535};
2479 2536
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index bdd32475b99c..2c0ddc5110c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
@@ -50,11 +50,15 @@ extern struct pci_device_id iwl3945_hw_card_ids[];
50#include "iwl-3945-debug.h" 50#include "iwl-3945-debug.h"
51#include "iwl-3945-led.h" 51#include "iwl-3945-led.h"
52 52
53/* Change firmware file name, using "-" and incrementing number, 53/* Highest firmware API version supported */
54 * *only* when uCode interface or architecture changes so that it 54#define IWL3945_UCODE_API_MAX 2
55 * is not compatible with earlier drivers. 55
56 * This number will also appear in << 8 position of 1st dword of uCode file */ 56/* Lowest firmware API version supported */
57#define IWL3945_UCODE_API "-1" 57#define IWL3945_UCODE_API_MIN 1
58
59#define IWL3945_FW_PRE "iwlwifi-3945-"
60#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
61#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
58 62
59/* Default noise level to report when noise measurement is not available. 63/* Default noise level to report when noise measurement is not available.
60 * This may be because we're: 64 * This may be because we're:
@@ -401,12 +405,6 @@ struct iwl3945_rx_queue {
401 405
402#define SCAN_INTERVAL 100 406#define SCAN_INTERVAL 100
403 407
404#define MAX_A_CHANNELS 252
405#define MIN_A_CHANNELS 7
406
407#define MAX_B_CHANNELS 14
408#define MIN_B_CHANNELS 1
409
410#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ 408#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
411#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */ 409#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
412#define STATUS_INT_ENABLED 2 410#define STATUS_INT_ENABLED 2
@@ -472,7 +470,6 @@ union iwl3945_qos_capabity {
472 470
473/* QoS structures */ 471/* QoS structures */
474struct iwl3945_qos_info { 472struct iwl3945_qos_info {
475 int qos_enable;
476 int qos_active; 473 int qos_active;
477 union iwl3945_qos_capabity qos_cap; 474 union iwl3945_qos_capabity qos_cap;
478 struct iwl3945_qosparam_cmd def_qos_parm; 475 struct iwl3945_qosparam_cmd def_qos_parm;
@@ -505,7 +502,7 @@ struct fw_desc {
505 502
506/* uCode file layout */ 503/* uCode file layout */
507struct iwl3945_ucode { 504struct iwl3945_ucode {
508 __le32 ver; /* major/minor/subminor */ 505 __le32 ver; /* major/minor/API/serial */
509 __le32 inst_size; /* bytes of runtime instructions */ 506 __le32 inst_size; /* bytes of runtime instructions */
510 __le32 data_size; /* bytes of runtime data */ 507 __le32 data_size; /* bytes of runtime data */
511 __le32 init_size; /* bytes of initialization instructions */ 508 __le32 init_size; /* bytes of initialization instructions */
@@ -587,8 +584,7 @@ extern int iwl3945_send_cmd_pdu(struct iwl3945_priv *priv, u8 id, u16 len,
587extern int __must_check iwl3945_send_cmd(struct iwl3945_priv *priv, 584extern int __must_check iwl3945_send_cmd(struct iwl3945_priv *priv,
588 struct iwl3945_host_cmd *cmd); 585 struct iwl3945_host_cmd *cmd);
589extern unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv, 586extern unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv,
590 struct ieee80211_hdr *hdr, 587 struct ieee80211_hdr *hdr,int left);
591 const u8 *dest, int left);
592extern int iwl3945_rx_queue_update_write_ptr(struct iwl3945_priv *priv, 588extern int iwl3945_rx_queue_update_write_ptr(struct iwl3945_priv *priv,
593 struct iwl3945_rx_queue *q); 589 struct iwl3945_rx_queue *q);
594extern int iwl3945_send_statistics_request(struct iwl3945_priv *priv); 590extern int iwl3945_send_statistics_request(struct iwl3945_priv *priv);
@@ -762,6 +758,8 @@ struct iwl3945_priv {
762 void __iomem *hw_base; 758 void __iomem *hw_base;
763 759
764 /* uCode images, save to reload in case of failure */ 760 /* uCode images, save to reload in case of failure */
761 u32 ucode_ver; /* ucode version, copy of
762 iwl3945_ucode.ver */
765 struct fw_desc ucode_code; /* runtime inst */ 763 struct fw_desc ucode_code; /* runtime inst */
766 struct fw_desc ucode_data; /* runtime data original */ 764 struct fw_desc ucode_data; /* runtime data original */
767 struct fw_desc ucode_data_backup; /* runtime data save/restore */ 765 struct fw_desc ucode_data_backup; /* runtime data save/restore */
@@ -804,6 +802,8 @@ struct iwl3945_priv {
804 u16 active_rate; 802 u16 active_rate;
805 u16 active_rate_basic; 803 u16 active_rate_basic;
806 804
805 u32 sta_supp_rates;
806
807 u8 call_post_assoc_from_beacon; 807 u8 call_post_assoc_from_beacon;
808 /* Rate scaling data */ 808 /* Rate scaling data */
809 s8 data_retry_limit; 809 s8 data_retry_limit;
@@ -828,8 +828,6 @@ struct iwl3945_priv {
828 unsigned long last_statistics_time; 828 unsigned long last_statistics_time;
829 829
830 /* context information */ 830 /* context information */
831 u8 essid[IW_ESSID_MAX_SIZE];
832 u8 essid_len;
833 u16 rates_mask; 831 u16 rates_mask;
834 832
835 u32 power_mode; 833 u32 power_mode;
@@ -888,7 +886,6 @@ struct iwl3945_priv {
888 struct work_struct report_work; 886 struct work_struct report_work;
889 struct work_struct request_scan; 887 struct work_struct request_scan;
890 struct work_struct beacon_update; 888 struct work_struct beacon_update;
891 struct work_struct set_monitor;
892 889
893 struct tasklet_struct irq_tasklet; 890 struct tasklet_struct irq_tasklet;
894 891
@@ -903,9 +900,6 @@ struct iwl3945_priv {
903 s8 user_txpower_limit; 900 s8 user_txpower_limit;
904 s8 max_channel_txpower_limit; 901 s8 max_channel_txpower_limit;
905 902
906#ifdef CONFIG_PM
907 u32 pm_state[16];
908#endif
909 903
910#ifdef CONFIG_IWL3945_DEBUG 904#ifdef CONFIG_IWL3945_DEBUG
911 /* debugging info */ 905 /* debugging info */
@@ -954,6 +948,8 @@ static inline int is_channel_ibss(const struct iwl3945_channel_info *ch)
954extern const struct iwl3945_channel_info *iwl3945_get_channel_info( 948extern const struct iwl3945_channel_info *iwl3945_get_channel_info(
955 const struct iwl3945_priv *priv, enum ieee80211_band band, u16 channel); 949 const struct iwl3945_priv *priv, enum ieee80211_band band, u16 channel);
956 950
951extern int iwl3945_rs_next_rate(struct iwl3945_priv *priv, int rate);
952
957/* Requires full declaration of iwl3945_priv before including */ 953/* Requires full declaration of iwl3945_priv before including */
958#include "iwl-3945-io.h" 954#include "iwl-3945-io.h"
959 955
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index f4793a609443..6649f7b55650 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -71,7 +71,7 @@
71 71
72#include "iwl-fh.h" 72#include "iwl-fh.h"
73 73
74/* EERPROM */ 74/* EEPROM */
75#define IWL4965_EEPROM_IMG_SIZE 1024 75#define IWL4965_EEPROM_IMG_SIZE 1024
76 76
77/* 77/*
@@ -84,12 +84,6 @@
84#define IWL_CMD_FIFO_NUM 4 84#define IWL_CMD_FIFO_NUM 4
85#define IWL49_FIRST_AMPDU_QUEUE 7 85#define IWL49_FIRST_AMPDU_QUEUE 7
86 86
87/* Tx rates */
88#define IWL_CCK_RATES 4
89#define IWL_OFDM_RATES 8
90#define IWL_HT_RATES 16
91#define IWL_MAX_RATES (IWL_CCK_RATES+IWL_OFDM_RATES+IWL_HT_RATES)
92
93/* Time constants */ 87/* Time constants */
94#define SHORT_SLOT_TIME 9 88#define SHORT_SLOT_TIME 9
95#define LONG_SLOT_TIME 20 89#define LONG_SLOT_TIME 20
@@ -111,7 +105,6 @@
111#define PCI_CFG_CMD_REG_INT_DIS_MSK 0x04 105#define PCI_CFG_CMD_REG_INT_DIS_MSK 0x04
112#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) 106#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
113 107
114#define TFD_QUEUE_SIZE_MAX (256)
115 108
116#define IWL_NUM_SCAN_RATES (2) 109#define IWL_NUM_SCAN_RATES (2)
117 110
@@ -287,13 +280,13 @@ static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
287 * that target txpower. 280 * that target txpower.
288 * 281 *
289 * 282 *
290 * 3) Determine (EEPROM) calibration subband for the target channel, by 283 * 3) Determine (EEPROM) calibration sub band for the target channel, by
291 * comparing against first and last channels in each subband 284 * comparing against first and last channels in each sub band
292 * (see struct iwl4965_eeprom_calib_subband_info). 285 * (see struct iwl4965_eeprom_calib_subband_info).
293 * 286 *
294 * 287 *
295 * 4) Linearly interpolate (EEPROM) factory calibration measurement sets, 288 * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
296 * referencing the 2 factory-measured (sample) channels within the subband. 289 * referencing the 2 factory-measured (sample) channels within the sub band.
297 * 290 *
298 * Interpolation is based on difference between target channel's frequency 291 * Interpolation is based on difference between target channel's frequency
299 * and the sample channels' frequencies. Since channel numbers are based 292 * and the sample channels' frequencies. Since channel numbers are based
@@ -301,7 +294,7 @@ static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
301 * to interpolating based on channel number differences. 294 * to interpolating based on channel number differences.
302 * 295 *
303 * Note that the sample channels may or may not be the channels at the 296 * Note that the sample channels may or may not be the channels at the
304 * edges of the subband. The target channel may be "outside" of the 297 * edges of the sub band. The target channel may be "outside" of the
305 * span of the sampled channels. 298 * span of the sampled channels.
306 * 299 *
307 * Driver may choose the pair (for 2 Tx chains) of measurements (see 300 * Driver may choose the pair (for 2 Tx chains) of measurements (see
@@ -345,7 +338,7 @@ static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
345 * "4965 temperature calculation". 338 * "4965 temperature calculation".
346 * 339 *
347 * If current temperature is higher than factory temperature, driver must 340 * If current temperature is higher than factory temperature, driver must
348 * increase gain (lower gain table index), and vice versa. 341 * increase gain (lower gain table index), and vice verse.
349 * 342 *
350 * Temperature affects gain differently for different channels: 343 * Temperature affects gain differently for different channels:
351 * 344 *
@@ -815,125 +808,14 @@ enum {
815 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array 808 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
816 * in DRAM containing 256 Transmit Frame Descriptors (TFDs). 809 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
817 */ 810 */
818#define IWL49_MAX_WIN_SIZE 64
819#define IWL49_QUEUE_SIZE 256
820#define IWL49_NUM_FIFOS 7 811#define IWL49_NUM_FIFOS 7
821#define IWL49_CMD_FIFO_NUM 4 812#define IWL49_CMD_FIFO_NUM 4
822#define IWL49_NUM_QUEUES 16 813#define IWL49_NUM_QUEUES 16
823#define IWL49_NUM_AMPDU_QUEUES 8 814#define IWL49_NUM_AMPDU_QUEUES 8
824 815
825/**
826 * struct iwl_tfd_frame_data
827 *
828 * Describes up to 2 buffers containing (contiguous) portions of a Tx frame.
829 * Each buffer must be on dword boundary.
830 * Up to 10 iwl_tfd_frame_data structures, describing up to 20 buffers,
831 * may be filled within a TFD (iwl_tfd_frame).
832 *
833 * Bit fields in tb1_addr:
834 * 31- 0: Tx buffer 1 address bits [31:0]
835 *
836 * Bit fields in val1:
837 * 31-16: Tx buffer 2 address bits [15:0]
838 * 15- 4: Tx buffer 1 length (bytes)
839 * 3- 0: Tx buffer 1 address bits [32:32]
840 *
841 * Bit fields in val2:
842 * 31-20: Tx buffer 2 length (bytes)
843 * 19- 0: Tx buffer 2 address bits [35:16]
844 */
845struct iwl_tfd_frame_data {
846 __le32 tb1_addr;
847
848 __le32 val1;
849 /* __le32 ptb1_32_35:4; */
850#define IWL_tb1_addr_hi_POS 0
851#define IWL_tb1_addr_hi_LEN 4
852#define IWL_tb1_addr_hi_SYM val1
853 /* __le32 tb_len1:12; */
854#define IWL_tb1_len_POS 4
855#define IWL_tb1_len_LEN 12
856#define IWL_tb1_len_SYM val1
857 /* __le32 ptb2_0_15:16; */
858#define IWL_tb2_addr_lo16_POS 16
859#define IWL_tb2_addr_lo16_LEN 16
860#define IWL_tb2_addr_lo16_SYM val1
861
862 __le32 val2;
863 /* __le32 ptb2_16_35:20; */
864#define IWL_tb2_addr_hi20_POS 0
865#define IWL_tb2_addr_hi20_LEN 20
866#define IWL_tb2_addr_hi20_SYM val2
867 /* __le32 tb_len2:12; */
868#define IWL_tb2_len_POS 20
869#define IWL_tb2_len_LEN 12
870#define IWL_tb2_len_SYM val2
871} __attribute__ ((packed));
872
873 816
874/** 817/**
875 * struct iwl_tfd_frame 818 * struct iwl4965_schedq_bc_tbl
876 *
877 * Transmit Frame Descriptor (TFD)
878 *
879 * 4965 supports up to 16 Tx queues resident in host DRAM.
880 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
881 * Both driver and device share these circular buffers, each of which must be
882 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes for 4965.
883 *
884 * Driver must indicate the physical address of the base of each
885 * circular buffer via the 4965's FH_MEM_CBBC_QUEUE registers.
886 *
887 * Each TFD contains pointer/size information for up to 20 data buffers
888 * in host DRAM. These buffers collectively contain the (one) frame described
889 * by the TFD. Each buffer must be a single contiguous block of memory within
890 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
891 * of (4K - 4). The 4965 concatenates all of a TFD's buffers into a single
892 * Tx frame, up to 8 KBytes in size.
893 *
894 * Bit fields in the control dword (val0):
895 * 31-30: # dwords (0-3) of padding required at end of frame for 16-byte bound
896 * 29: reserved
897 * 28-24: # Transmit Buffer Descriptors in TFD
898 * 23- 0: reserved
899 *
900 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
901 */
902struct iwl_tfd_frame {
903 __le32 val0;
904 /* __le32 rsvd1:24; */
905 /* __le32 num_tbs:5; */
906#define IWL_num_tbs_POS 24
907#define IWL_num_tbs_LEN 5
908#define IWL_num_tbs_SYM val0
909 /* __le32 rsvd2:1; */
910 /* __le32 padding:2; */
911 struct iwl_tfd_frame_data pa[10];
912 __le32 reserved;
913} __attribute__ ((packed));
914
915
916/**
917 * struct iwl4965_queue_byte_cnt_entry
918 *
919 * Byte Count Table Entry
920 *
921 * Bit fields:
922 * 15-12: reserved
923 * 11- 0: total to-be-transmitted byte count of frame (does not include command)
924 */
925struct iwl4965_queue_byte_cnt_entry {
926 __le16 val;
927 /* __le16 byte_cnt:12; */
928#define IWL_byte_cnt_POS 0
929#define IWL_byte_cnt_LEN 12
930#define IWL_byte_cnt_SYM val
931 /* __le16 rsvd:4; */
932} __attribute__ ((packed));
933
934
935/**
936 * struct iwl4965_sched_queue_byte_cnt_tbl
937 * 819 *
938 * Byte Count table 820 * Byte Count table
939 * 821 *
@@ -947,71 +829,12 @@ struct iwl4965_queue_byte_cnt_entry {
947 * count table for the chosen Tx queue. If the TFD index is 0-63, the driver 829 * count table for the chosen Tx queue. If the TFD index is 0-63, the driver
948 * must duplicate the byte count entry in corresponding index 256-319. 830 * must duplicate the byte count entry in corresponding index 256-319.
949 * 831 *
950 * "dont_care" padding puts each byte count table on a 1024-byte boundary; 832 * padding puts each byte count table on a 1024-byte boundary;
951 * 4965 assumes tables are separated by 1024 bytes. 833 * 4965 assumes tables are separated by 1024 bytes.
952 */ 834 */
953struct iwl4965_sched_queue_byte_cnt_tbl { 835struct iwl4965_scd_bc_tbl {
954 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL49_QUEUE_SIZE + 836 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
955 IWL49_MAX_WIN_SIZE]; 837 u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
956 u8 dont_care[1024 -
957 (IWL49_QUEUE_SIZE + IWL49_MAX_WIN_SIZE) *
958 sizeof(__le16)];
959} __attribute__ ((packed));
960
961
962/**
963 * struct iwl4965_shared - handshake area for Tx and Rx
964 *
965 * For convenience in allocating memory, this structure combines 2 areas of
966 * DRAM which must be shared between driver and 4965. These do not need to
967 * be combined, if better allocation would result from keeping them separate:
968 *
969 * 1) The Tx byte count tables occupy 1024 bytes each (16 KBytes total for
970 * 16 queues). Driver uses SCD_DRAM_BASE_ADDR to tell 4965 where to find
971 * the first of these tables. 4965 assumes tables are 1024 bytes apart.
972 *
973 * 2) The Rx status (val0 and val1) occupies only 8 bytes. Driver uses
974 * FH_RSCSR_CHNL0_STTS_WPTR_REG to tell 4965 where to find this area.
975 * Driver reads val0 to determine the latest Receive Buffer Descriptor (RBD)
976 * that has been filled by the 4965.
977 *
978 * Bit fields val0:
979 * 31-12: Not used
980 * 11- 0: Index of last filled Rx buffer descriptor (4965 writes, driver reads)
981 *
982 * Bit fields val1:
983 * 31- 0: Not used
984 */
985struct iwl4965_shared {
986 struct iwl4965_sched_queue_byte_cnt_tbl
987 queues_byte_cnt_tbls[IWL49_NUM_QUEUES];
988 __le32 rb_closed;
989
990 /* __le32 rb_closed_stts_rb_num:12; */
991#define IWL_rb_closed_stts_rb_num_POS 0
992#define IWL_rb_closed_stts_rb_num_LEN 12
993#define IWL_rb_closed_stts_rb_num_SYM rb_closed
994 /* __le32 rsrv1:4; */
995 /* __le32 rb_closed_stts_rx_frame_num:12; */
996#define IWL_rb_closed_stts_rx_frame_num_POS 16
997#define IWL_rb_closed_stts_rx_frame_num_LEN 12
998#define IWL_rb_closed_stts_rx_frame_num_SYM rb_closed
999 /* __le32 rsrv2:4; */
1000
1001 __le32 frm_finished;
1002 /* __le32 frame_finished_stts_rb_num:12; */
1003#define IWL_frame_finished_stts_rb_num_POS 0
1004#define IWL_frame_finished_stts_rb_num_LEN 12
1005#define IWL_frame_finished_stts_rb_num_SYM frm_finished
1006 /* __le32 rsrv3:4; */
1007 /* __le32 frame_finished_stts_rx_frame_num:12; */
1008#define IWL_frame_finished_stts_rx_frame_num_POS 16
1009#define IWL_frame_finished_stts_rx_frame_num_LEN 12
1010#define IWL_frame_finished_stts_rx_frame_num_SYM frm_finished
1011 /* __le32 rsrv4:4; */
1012
1013 __le32 padding1; /* so that allocation will be aligned to 16B */
1014 __le32 padding2;
1015} __attribute__ ((packed)); 838} __attribute__ ((packed));
1016 839
1017#endif /* __iwl4965_4965_hw_h__ */ 840#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9838de5f4369..5a72bc0377de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
@@ -48,18 +48,21 @@
48static int iwl4965_send_tx_power(struct iwl_priv *priv); 48static int iwl4965_send_tx_power(struct iwl_priv *priv);
49static int iwl4965_hw_get_temperature(const struct iwl_priv *priv); 49static int iwl4965_hw_get_temperature(const struct iwl_priv *priv);
50 50
51/* Change firmware file name, using "-" and incrementing number, 51/* Highest firmware API version supported */
52 * *only* when uCode interface or architecture changes so that it 52#define IWL4965_UCODE_API_MAX 2
53 * is not compatible with earlier drivers. 53
54 * This number will also appear in << 8 position of 1st dword of uCode file */ 54/* Lowest firmware API version supported */
55#define IWL4965_UCODE_API "-2" 55#define IWL4965_UCODE_API_MIN 2
56
57#define IWL4965_FW_PRE "iwlwifi-4965-"
58#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
59#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
56 60
57 61
58/* module parameters */ 62/* module parameters */
59static struct iwl_mod_params iwl4965_mod_params = { 63static struct iwl_mod_params iwl4965_mod_params = {
60 .num_of_queues = IWL49_NUM_QUEUES, 64 .num_of_queues = IWL49_NUM_QUEUES,
61 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES, 65 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
62 .enable_qos = 1,
63 .amsdu_size_8K = 1, 66 .amsdu_size_8K = 1,
64 .restart_fw = 1, 67 .restart_fw = 1,
65 /* the rest are 0 by default */ 68 /* the rest are 0 by default */
@@ -246,7 +249,7 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
246 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, 249 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
247 priv->ucode_data.len); 250 priv->ucode_data.len);
248 251
249 /* Inst bytecount must be last to set up, bit 31 signals uCode 252 /* Inst byte count must be last to set up, bit 31 signals uCode
250 * that all new ptr/size info is in place */ 253 * that all new ptr/size info is in place */
251 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, 254 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
252 priv->ucode_code.len | BSM_DRAM_INST_LOAD); 255 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
@@ -318,31 +321,13 @@ static int is_fat_channel(__le32 rxon_flags)
318/* 321/*
319 * EEPROM handlers 322 * EEPROM handlers
320 */ 323 */
321 324static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
322static int iwl4965_eeprom_check_version(struct iwl_priv *priv)
323{ 325{
324 u16 eeprom_ver; 326 return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
325 u16 calib_ver;
326
327 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
328
329 calib_ver = iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
330
331 if (eeprom_ver < EEPROM_4965_EEPROM_VERSION ||
332 calib_ver < EEPROM_4965_TX_POWER_VERSION)
333 goto err;
334
335 return 0;
336err:
337 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
338 eeprom_ver, EEPROM_4965_EEPROM_VERSION,
339 calib_ver, EEPROM_4965_TX_POWER_VERSION);
340 return -EINVAL;
341
342} 327}
343 328
344/* 329/*
345 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask 330 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
346 * must be called under priv->lock and mac access 331 * must be called under priv->lock and mac access
347 */ 332 */
348static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask) 333static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
@@ -366,9 +351,8 @@ static int iwl4965_apm_init(struct iwl_priv *priv)
366 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 351 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
367 352
368 /* wait for clock stabilization */ 353 /* wait for clock stabilization */
369 ret = iwl_poll_bit(priv, CSR_GP_CNTRL, 354 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
370 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 355 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
371 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
372 if (ret < 0) { 356 if (ret < 0) {
373 IWL_DEBUG_INFO("Failed to init the card\n"); 357 IWL_DEBUG_INFO("Failed to init the card\n");
374 goto out; 358 goto out;
@@ -414,7 +398,7 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
414 398
415 /* L1 is enabled by BIOS */ 399 /* L1 is enabled by BIOS */
416 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN) 400 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
417 /* diable L0S disabled L1A enabled */ 401 /* disable L0S disabled L1A enabled */
418 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 402 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
419 else 403 else
420 /* L0S enabled L1A disabled */ 404 /* L0S enabled L1A disabled */
@@ -442,7 +426,6 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
442 426
443static int iwl4965_apm_stop_master(struct iwl_priv *priv) 427static int iwl4965_apm_stop_master(struct iwl_priv *priv)
444{ 428{
445 int ret = 0;
446 unsigned long flags; 429 unsigned long flags;
447 430
448 spin_lock_irqsave(&priv->lock, flags); 431 spin_lock_irqsave(&priv->lock, flags);
@@ -450,17 +433,13 @@ static int iwl4965_apm_stop_master(struct iwl_priv *priv)
450 /* set stop master bit */ 433 /* set stop master bit */
451 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 434 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
452 435
453 ret = iwl_poll_bit(priv, CSR_RESET, 436 iwl_poll_direct_bit(priv, CSR_RESET,
454 CSR_RESET_REG_FLAG_MASTER_DISABLED, 437 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
455 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
456 if (ret < 0)
457 goto out;
458 438
459out:
460 spin_unlock_irqrestore(&priv->lock, flags); 439 spin_unlock_irqrestore(&priv->lock, flags);
461 IWL_DEBUG_INFO("stop master\n"); 440 IWL_DEBUG_INFO("stop master\n");
462 441
463 return ret; 442 return 0;
464} 443}
465 444
466static void iwl4965_apm_stop(struct iwl_priv *priv) 445static void iwl4965_apm_stop(struct iwl_priv *priv)
@@ -496,11 +475,9 @@ static int iwl4965_apm_reset(struct iwl_priv *priv)
496 475
497 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 476 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
498 477
499 ret = iwl_poll_bit(priv, CSR_RESET, 478 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
500 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 479 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
501 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25); 480 if (ret < 0)
502
503 if (ret)
504 goto out; 481 goto out;
505 482
506 udelay(10); 483 udelay(10);
@@ -537,10 +514,10 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
537 struct iwl_chain_noise_data *data = &(priv->chain_noise_data); 514 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
538 515
539 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { 516 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
540 struct iwl4965_calibration_cmd cmd; 517 struct iwl_calib_diff_gain_cmd cmd;
541 518
542 memset(&cmd, 0, sizeof(cmd)); 519 memset(&cmd, 0, sizeof(cmd));
543 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; 520 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
544 cmd.diff_gain_a = 0; 521 cmd.diff_gain_a = 0;
545 cmd.diff_gain_b = 0; 522 cmd.diff_gain_b = 0;
546 cmd.diff_gain_c = 0; 523 cmd.diff_gain_c = 0;
@@ -587,11 +564,11 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
587 564
588 /* Differential gain gets sent to uCode only once */ 565 /* Differential gain gets sent to uCode only once */
589 if (!data->radio_write) { 566 if (!data->radio_write) {
590 struct iwl4965_calibration_cmd cmd; 567 struct iwl_calib_diff_gain_cmd cmd;
591 data->radio_write = 1; 568 data->radio_write = 1;
592 569
593 memset(&cmd, 0, sizeof(cmd)); 570 memset(&cmd, 0, sizeof(cmd));
594 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD; 571 cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
595 cmd.diff_gain_a = data->delta_gain_code[0]; 572 cmd.diff_gain_a = data->delta_gain_code[0];
596 cmd.diff_gain_b = data->delta_gain_code[1]; 573 cmd.diff_gain_b = data->delta_gain_code[1];
597 cmd.diff_gain_c = data->delta_gain_code[2]; 574 cmd.diff_gain_c = data->delta_gain_code[2];
@@ -619,10 +596,10 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
619static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info, 596static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
620 __le32 *tx_flags) 597 __le32 *tx_flags)
621{ 598{
622 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { 599 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
623 *tx_flags |= TX_CMD_FLG_RTS_MSK; 600 *tx_flags |= TX_CMD_FLG_RTS_MSK;
624 *tx_flags &= ~TX_CMD_FLG_CTS_MSK; 601 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
625 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 602 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
626 *tx_flags &= ~TX_CMD_FLG_RTS_MSK; 603 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
627 *tx_flags |= TX_CMD_FLG_CTS_MSK; 604 *tx_flags |= TX_CMD_FLG_CTS_MSK;
628 } 605 }
@@ -643,7 +620,7 @@ static void iwl4965_bg_txpower_work(struct work_struct *work)
643 620
644 mutex_lock(&priv->mutex); 621 mutex_lock(&priv->mutex);
645 622
646 /* Regardless of if we are assocaited, we must reconfigure the 623 /* Regardless of if we are associated, we must reconfigure the
647 * TX power since frames can be sent on non-radar channels while 624 * TX power since frames can be sent on non-radar channels while
648 * not associated */ 625 * not associated */
649 iwl4965_send_tx_power(priv); 626 iwl4965_send_tx_power(priv);
@@ -679,7 +656,7 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
679 int txq_id = txq->q.id; 656 int txq_id = txq->q.id;
680 657
681 /* Find out whether to activate Tx queue */ 658 /* Find out whether to activate Tx queue */
682 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0; 659 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
683 660
684 /* Set up and activate */ 661 /* Set up and activate */
685 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), 662 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
@@ -709,9 +686,10 @@ static const u16 default_queue_to_tx_fifo[] = {
709static int iwl4965_alive_notify(struct iwl_priv *priv) 686static int iwl4965_alive_notify(struct iwl_priv *priv)
710{ 687{
711 u32 a; 688 u32 a;
712 int i = 0;
713 unsigned long flags; 689 unsigned long flags;
714 int ret; 690 int ret;
691 int i, chan;
692 u32 reg_val;
715 693
716 spin_lock_irqsave(&priv->lock, flags); 694 spin_lock_irqsave(&priv->lock, flags);
717 695
@@ -733,8 +711,18 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
733 711
734 /* Tel 4965 where to find Tx byte count tables */ 712 /* Tel 4965 where to find Tx byte count tables */
735 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR, 713 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
736 (priv->shared_phys + 714 priv->scd_bc_tbls.dma >> 10);
737 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10); 715
716 /* Enable DMA channel */
717 for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
718 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
719 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
720 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
721
722 /* Update FH chicken bits */
723 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
724 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
725 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
738 726
739 /* Disable chain mode for all queues */ 727 /* Disable chain mode for all queues */
740 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0); 728 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
@@ -766,7 +754,7 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
766 (1 << priv->hw_params.max_txq_num) - 1); 754 (1 << priv->hw_params.max_txq_num) - 1);
767 755
768 /* Activate all Tx DMA/FIFO channels */ 756 /* Activate all Tx DMA/FIFO channels */
769 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); 757 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
770 758
771 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 759 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
772 760
@@ -822,7 +810,9 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
822 } 810 }
823 811
824 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; 812 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
825 priv->hw_params.first_ampdu_q = IWL49_FIRST_AMPDU_QUEUE; 813 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
814 priv->hw_params.scd_bc_tbls_size =
815 IWL49_NUM_QUEUES * sizeof(struct iwl4965_scd_bc_tbl);
826 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 816 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
827 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; 817 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
828 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE; 818 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
@@ -1650,36 +1640,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1650} 1640}
1651#endif 1641#endif
1652 1642
1653static int iwl4965_shared_mem_rx_idx(struct iwl_priv *priv)
1654{
1655 struct iwl4965_shared *s = priv->shared_virt;
1656 return le32_to_cpu(s->rb_closed) & 0xFFF;
1657}
1658
1659static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
1660{
1661 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
1662 sizeof(struct iwl4965_shared),
1663 &priv->shared_phys);
1664 if (!priv->shared_virt)
1665 return -ENOMEM;
1666
1667 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
1668
1669 priv->rb_closed_offset = offsetof(struct iwl4965_shared, rb_closed);
1670
1671 return 0;
1672}
1673
1674static void iwl4965_free_shared_mem(struct iwl_priv *priv)
1675{
1676 if (priv->shared_virt)
1677 pci_free_consistent(priv->pci_dev,
1678 sizeof(struct iwl4965_shared),
1679 priv->shared_virt,
1680 priv->shared_phys);
1681}
1682
1683/** 1643/**
1684 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 1644 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
1685 */ 1645 */
@@ -1687,21 +1647,22 @@ static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
1687 struct iwl_tx_queue *txq, 1647 struct iwl_tx_queue *txq,
1688 u16 byte_cnt) 1648 u16 byte_cnt)
1689{ 1649{
1690 int len; 1650 struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
1691 int txq_id = txq->q.id; 1651 int txq_id = txq->q.id;
1692 struct iwl4965_shared *shared_data = priv->shared_virt; 1652 int write_ptr = txq->q.write_ptr;
1653 int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
1654 __le16 bc_ent;
1693 1655
1694 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 1656 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
1695 1657
1658 bc_ent = cpu_to_le16(len & 0xFFF);
1696 /* Set up byte count within first 256 entries */ 1659 /* Set up byte count within first 256 entries */
1697 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 1660 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
1698 tfd_offset[txq->q.write_ptr], byte_cnt, len);
1699 1661
1700 /* If within first 64 entries, duplicate at end */ 1662 /* If within first 64 entries, duplicate at end */
1701 if (txq->q.write_ptr < IWL49_MAX_WIN_SIZE) 1663 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
1702 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 1664 scd_bc_tbl[txq_id].
1703 tfd_offset[IWL49_QUEUE_SIZE + txq->q.write_ptr], 1665 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
1704 byte_cnt, len);
1705} 1666}
1706 1667
1707/** 1668/**
@@ -1956,7 +1917,7 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1956 ra_tid = BUILD_RAxTID(sta_id, tid); 1917 ra_tid = BUILD_RAxTID(sta_id, tid);
1957 1918
1958 /* Modify device's station table to Tx this TID */ 1919 /* Modify device's station table to Tx this TID */
1959 iwl_sta_modify_enable_tid_tx(priv, sta_id, tid); 1920 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
1960 1921
1961 spin_lock_irqsave(&priv->lock, flags); 1922 spin_lock_irqsave(&priv->lock, flags);
1962 ret = iwl_grab_nic_access(priv); 1923 ret = iwl_grab_nic_access(priv);
@@ -2037,7 +1998,7 @@ static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
2037} 1998}
2038 1999
2039/** 2000/**
2040 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue 2001 * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
2041 */ 2002 */
2042static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, 2003static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2043 struct iwl_ht_agg *agg, 2004 struct iwl_ht_agg *agg,
@@ -2059,7 +2020,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2059 agg->rate_n_flags = rate_n_flags; 2020 agg->rate_n_flags = rate_n_flags;
2060 agg->bitmap = 0; 2021 agg->bitmap = 0;
2061 2022
2062 /* # frames attempted by Tx command */ 2023 /* num frames attempted by Tx command */
2063 if (agg->frame_count == 1) { 2024 if (agg->frame_count == 1) {
2064 /* Only one frame was attempted; no block-ack will arrive */ 2025 /* Only one frame was attempted; no block-ack will arrive */
2065 status = le16_to_cpu(frame_status[0].status); 2026 status = le16_to_cpu(frame_status[0].status);
@@ -2070,9 +2031,9 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2070 agg->frame_count, agg->start_idx, idx); 2031 agg->frame_count, agg->start_idx, idx);
2071 2032
2072 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); 2033 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
2073 info->status.retry_count = tx_resp->failure_frame; 2034 info->status.rates[0].count = tx_resp->failure_frame + 1;
2074 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 2035 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
2075 info->flags |= iwl_is_tx_success(status)? 2036 info->flags |= iwl_is_tx_success(status) ?
2076 IEEE80211_TX_STAT_ACK : 0; 2037 IEEE80211_TX_STAT_ACK : 0;
2077 iwl_hwrate_to_tx_control(priv, rate_n_flags, info); 2038 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
2078 /* FIXME: code repetition end */ 2039 /* FIXME: code repetition end */
@@ -2158,12 +2119,13 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2158 int txq_id = SEQ_TO_QUEUE(sequence); 2119 int txq_id = SEQ_TO_QUEUE(sequence);
2159 int index = SEQ_TO_INDEX(sequence); 2120 int index = SEQ_TO_INDEX(sequence);
2160 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 2121 struct iwl_tx_queue *txq = &priv->txq[txq_id];
2122 struct ieee80211_hdr *hdr;
2161 struct ieee80211_tx_info *info; 2123 struct ieee80211_tx_info *info;
2162 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 2124 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
2163 u32 status = le32_to_cpu(tx_resp->u.status); 2125 u32 status = le32_to_cpu(tx_resp->u.status);
2164 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION; 2126 int tid = MAX_TID_COUNT;
2165 __le16 fc; 2127 int sta_id;
2166 struct ieee80211_hdr *hdr; 2128 int freed;
2167 u8 *qc = NULL; 2129 u8 *qc = NULL;
2168 2130
2169 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 2131 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
@@ -2178,8 +2140,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2178 memset(&info->status, 0, sizeof(info->status)); 2140 memset(&info->status, 0, sizeof(info->status));
2179 2141
2180 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index); 2142 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
2181 fc = hdr->frame_control; 2143 if (ieee80211_is_data_qos(hdr->frame_control)) {
2182 if (ieee80211_is_data_qos(fc)) {
2183 qc = ieee80211_get_qos_ctl(hdr); 2144 qc = ieee80211_get_qos_ctl(hdr);
2184 tid = qc[0] & 0xf; 2145 tid = qc[0] & 0xf;
2185 } 2146 }
@@ -2194,8 +2155,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2194 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp); 2155 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
2195 struct iwl_ht_agg *agg = NULL; 2156 struct iwl_ht_agg *agg = NULL;
2196 2157
2197 if (!qc) 2158 WARN_ON(!qc);
2198 return;
2199 2159
2200 agg = &priv->stations[sta_id].tid[tid].agg; 2160 agg = &priv->stations[sta_id].tid[tid].agg;
2201 2161
@@ -2206,54 +2166,49 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2206 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 2166 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
2207 2167
2208 if (txq->q.read_ptr != (scd_ssn & 0xff)) { 2168 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
2209 int freed, ampdu_q;
2210 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 2169 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2211 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn " 2170 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
2212 "%d index %d\n", scd_ssn , index); 2171 "%d index %d\n", scd_ssn , index);
2213 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2172 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2214 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 2173 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2215 2174
2216 if (iwl_queue_space(&txq->q) > txq->q.low_mark && 2175 if (priv->mac80211_registered &&
2217 txq_id >= 0 && priv->mac80211_registered && 2176 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
2218 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) { 2177 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
2219 /* calculate mac80211 ampdu sw queue to wake */
2220 ampdu_q = txq_id - IWL49_FIRST_AMPDU_QUEUE +
2221 priv->hw->queues;
2222 if (agg->state == IWL_AGG_OFF) 2178 if (agg->state == IWL_AGG_OFF)
2223 ieee80211_wake_queue(priv->hw, txq_id); 2179 ieee80211_wake_queue(priv->hw, txq_id);
2224 else 2180 else
2225 ieee80211_wake_queue(priv->hw, ampdu_q); 2181 ieee80211_wake_queue(priv->hw,
2182 txq->swq_id);
2226 } 2183 }
2227 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
2228 } 2184 }
2229 } else { 2185 } else {
2230 info->status.retry_count = tx_resp->failure_frame; 2186 info->status.rates[0].count = tx_resp->failure_frame + 1;
2231 info->flags |= 2187 info->flags |= iwl_is_tx_success(status) ?
2232 iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0; 2188 IEEE80211_TX_STAT_ACK : 0;
2233 iwl_hwrate_to_tx_control(priv, 2189 iwl_hwrate_to_tx_control(priv,
2234 le32_to_cpu(tx_resp->rate_n_flags), 2190 le32_to_cpu(tx_resp->rate_n_flags),
2235 info); 2191 info);
2236 2192
2237 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags " 2193 IWL_DEBUG_TX_REPLY("TXQ %d status %s (0x%08x) "
2238 "0x%x retries %d\n", txq_id, 2194 "rate_n_flags 0x%x retries %d\n",
2239 iwl_get_tx_fail_reason(status), 2195 txq_id,
2240 status, le32_to_cpu(tx_resp->rate_n_flags), 2196 iwl_get_tx_fail_reason(status), status,
2241 tx_resp->failure_frame); 2197 le32_to_cpu(tx_resp->rate_n_flags),
2198 tx_resp->failure_frame);
2242 2199
2243 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); 2200 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2244 2201 if (qc && likely(sta_id != IWL_INVALID_STATION))
2245 if (index != -1) {
2246 int freed = iwl_tx_queue_reclaim(priv, txq_id, index);
2247 if (tid != MAX_TID_COUNT)
2248 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 2202 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2249 if (iwl_queue_space(&txq->q) > txq->q.low_mark && 2203
2250 (txq_id >= 0) && priv->mac80211_registered) 2204 if (priv->mac80211_registered &&
2205 (iwl_queue_space(&txq->q) > txq->q.low_mark))
2251 ieee80211_wake_queue(priv->hw, txq_id); 2206 ieee80211_wake_queue(priv->hw, txq_id);
2252 if (tid != MAX_TID_COUNT)
2253 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
2254 }
2255 } 2207 }
2256 2208
2209 if (qc && likely(sta_id != IWL_INVALID_STATION))
2210 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
2211
2257 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 2212 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
2258 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); 2213 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
2259} 2214}
@@ -2328,9 +2283,6 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2328 2283
2329static struct iwl_lib_ops iwl4965_lib = { 2284static struct iwl_lib_ops iwl4965_lib = {
2330 .set_hw_params = iwl4965_hw_set_hw_params, 2285 .set_hw_params = iwl4965_hw_set_hw_params,
2331 .alloc_shared_mem = iwl4965_alloc_shared_mem,
2332 .free_shared_mem = iwl4965_free_shared_mem,
2333 .shared_mem_rx_idx = iwl4965_shared_mem_rx_idx,
2334 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, 2286 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
2335 .txq_set_sched = iwl4965_txq_set_sched, 2287 .txq_set_sched = iwl4965_txq_set_sched,
2336 .txq_agg_enable = iwl4965_txq_agg_enable, 2288 .txq_agg_enable = iwl4965_txq_agg_enable,
@@ -2347,7 +2299,7 @@ static struct iwl_lib_ops iwl4965_lib = {
2347 .reset = iwl4965_apm_reset, 2299 .reset = iwl4965_apm_reset,
2348 .stop = iwl4965_apm_stop, 2300 .stop = iwl4965_apm_stop,
2349 .config = iwl4965_nic_config, 2301 .config = iwl4965_nic_config,
2350 .set_pwr_src = iwl4965_set_pwr_src, 2302 .set_pwr_src = iwl_set_pwr_src,
2351 }, 2303 },
2352 .eeprom_ops = { 2304 .eeprom_ops = {
2353 .regulatory_bands = { 2305 .regulatory_bands = {
@@ -2362,11 +2314,11 @@ static struct iwl_lib_ops iwl4965_lib = {
2362 .verify_signature = iwlcore_eeprom_verify_signature, 2314 .verify_signature = iwlcore_eeprom_verify_signature,
2363 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 2315 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
2364 .release_semaphore = iwlcore_eeprom_release_semaphore, 2316 .release_semaphore = iwlcore_eeprom_release_semaphore,
2365 .check_version = iwl4965_eeprom_check_version, 2317 .calib_version = iwl4965_eeprom_calib_version,
2366 .query_addr = iwlcore_eeprom_query_addr, 2318 .query_addr = iwlcore_eeprom_query_addr,
2367 }, 2319 },
2368 .send_tx_power = iwl4965_send_tx_power, 2320 .send_tx_power = iwl4965_send_tx_power,
2369 .update_chain_flags = iwl4965_update_chain_flags, 2321 .update_chain_flags = iwl_update_chain_flags,
2370 .temperature = iwl4965_temperature_calib, 2322 .temperature = iwl4965_temperature_calib,
2371}; 2323};
2372 2324
@@ -2378,15 +2330,19 @@ static struct iwl_ops iwl4965_ops = {
2378 2330
2379struct iwl_cfg iwl4965_agn_cfg = { 2331struct iwl_cfg iwl4965_agn_cfg = {
2380 .name = "4965AGN", 2332 .name = "4965AGN",
2381 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode", 2333 .fw_name_pre = IWL4965_FW_PRE,
2334 .ucode_api_max = IWL4965_UCODE_API_MAX,
2335 .ucode_api_min = IWL4965_UCODE_API_MIN,
2382 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 2336 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2383 .eeprom_size = IWL4965_EEPROM_IMG_SIZE, 2337 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
2338 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2339 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2384 .ops = &iwl4965_ops, 2340 .ops = &iwl4965_ops,
2385 .mod_params = &iwl4965_mod_params, 2341 .mod_params = &iwl4965_mod_params,
2386}; 2342};
2387 2343
2388/* Module firmware */ 2344/* Module firmware */
2389MODULE_FIRMWARE("iwlwifi-4965" IWL4965_UCODE_API ".ucode"); 2345MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
2390 2346
2391module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444); 2347module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
2392MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 2348MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
@@ -2394,7 +2350,7 @@ module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
2394MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); 2350MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
2395module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444); 2351module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
2396MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); 2352MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
2397module_param_named(debug, iwl4965_mod_params.debug, int, 0444); 2353module_param_named(debug, iwl4965_mod_params.debug, uint, 0444);
2398MODULE_PARM_DESC(debug, "debug output mask"); 2354MODULE_PARM_DESC(debug, "debug output mask");
2399module_param_named( 2355module_param_named(
2400 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444); 2356 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444);
@@ -2402,9 +2358,6 @@ MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
2402 2358
2403module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444); 2359module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
2404MODULE_PARM_DESC(queues_num, "number of hw queues."); 2360MODULE_PARM_DESC(queues_num, "number of hw queues.");
2405/* QoS */
2406module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
2407MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
2408/* 11n */ 2361/* 11n */
2409module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, 0444); 2362module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, 0444);
2410MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); 2363MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index c479ee211c5c..82c3859ce0f8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -73,69 +73,27 @@
73#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) 73#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND)
74#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND) 74#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
75 75
76/* EERPROM */ 76/* EEPROM */
77#define IWL_5000_EEPROM_IMG_SIZE 2048 77#define IWL_5000_EEPROM_IMG_SIZE 2048
78 78
79
80#define IWL50_MAX_WIN_SIZE 64
81#define IWL50_QUEUE_SIZE 256
82#define IWL50_CMD_FIFO_NUM 7 79#define IWL50_CMD_FIFO_NUM 7
83#define IWL50_NUM_QUEUES 20 80#define IWL50_NUM_QUEUES 20
84#define IWL50_NUM_AMPDU_QUEUES 10 81#define IWL50_NUM_AMPDU_QUEUES 10
85#define IWL50_FIRST_AMPDU_QUEUE 10 82#define IWL50_FIRST_AMPDU_QUEUE 10
86 83
87#define IWL_sta_id_POS 12
88#define IWL_sta_id_LEN 4
89#define IWL_sta_id_SYM val
90
91/* Fixed (non-configurable) rx data from phy */ 84/* Fixed (non-configurable) rx data from phy */
92 85
93/* Base physical address of iwl5000_shared is provided to SCD_DRAM_BASE_ADDR 86/**
94 * and &iwl5000_shared.val0 is provided to FH_RSCSR_CHNL0_STTS_WPTR_REG */ 87 * struct iwl5000_schedq_bc_tbl scheduler byte count table
95struct iwl5000_sched_queue_byte_cnt_tbl { 88 * base physical address of iwl5000_shared
96 struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL50_QUEUE_SIZE + 89 * is provided to SCD_DRAM_BASE_ADDR
97 IWL50_MAX_WIN_SIZE]; 90 * @tfd_offset 0-12 - tx command byte count
98} __attribute__ ((packed)); 91 * 12-16 - station index
99 92 */
100struct iwl5000_shared { 93struct iwl5000_scd_bc_tbl {
101 struct iwl5000_sched_queue_byte_cnt_tbl 94 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
102 queues_byte_cnt_tbls[IWL50_NUM_QUEUES];
103 __le32 rb_closed;
104
105 /* __le32 rb_closed_stts_rb_num:12; */
106#define IWL_rb_closed_stts_rb_num_POS 0
107#define IWL_rb_closed_stts_rb_num_LEN 12
108#define IWL_rb_closed_stts_rb_num_SYM rb_closed
109 /* __le32 rsrv1:4; */
110 /* __le32 rb_closed_stts_rx_frame_num:12; */
111#define IWL_rb_closed_stts_rx_frame_num_POS 16
112#define IWL_rb_closed_stts_rx_frame_num_LEN 12
113#define IWL_rb_closed_stts_rx_frame_num_SYM rb_closed
114 /* __le32 rsrv2:4; */
115
116 __le32 frm_finished;
117 /* __le32 frame_finished_stts_rb_num:12; */
118#define IWL_frame_finished_stts_rb_num_POS 0
119#define IWL_frame_finished_stts_rb_num_LEN 12
120#define IWL_frame_finished_stts_rb_num_SYM frm_finished
121 /* __le32 rsrv3:4; */
122 /* __le32 frame_finished_stts_rx_frame_num:12; */
123#define IWL_frame_finished_stts_rx_frame_num_POS 16
124#define IWL_frame_finished_stts_rx_frame_num_LEN 12
125#define IWL_frame_finished_stts_rx_frame_num_SYM frm_finished
126 /* __le32 rsrv4:4; */
127
128 __le32 padding1; /* so that allocation will be aligned to 16B */
129 __le32 padding2;
130} __attribute__ ((packed)); 95} __attribute__ ((packed));
131 96
132/* calibrations defined for 5000 */
133/* defines the order in which results should be sent to the runtime uCode */
134enum iwl5000_calib {
135 IWL5000_CALIB_LO,
136 IWL5000_CALIB_TX_IQ,
137 IWL5000_CALIB_TX_IQ_PERD,
138};
139 97
140#endif /* __iwl_5000_hw_h__ */ 98#endif /* __iwl_5000_hw_h__ */
141 99
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 5155b8a760a7..66d053d28a74 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -44,7 +44,21 @@
44#include "iwl-helpers.h" 44#include "iwl-helpers.h"
45#include "iwl-5000-hw.h" 45#include "iwl-5000-hw.h"
46 46
47#define IWL5000_UCODE_API "-1" 47/* Highest firmware API version supported */
48#define IWL5000_UCODE_API_MAX 1
49#define IWL5150_UCODE_API_MAX 1
50
51/* Lowest firmware API version supported */
52#define IWL5000_UCODE_API_MIN 1
53#define IWL5150_UCODE_API_MIN 1
54
55#define IWL5000_FW_PRE "iwlwifi-5000-"
56#define _IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE #api ".ucode"
57#define IWL5000_MODULE_FIRMWARE(api) _IWL5000_MODULE_FIRMWARE(api)
58
59#define IWL5150_FW_PRE "iwlwifi-5150-"
60#define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode"
61#define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api)
48 62
49static const u16 iwl5000_default_queue_to_tx_fifo[] = { 63static const u16 iwl5000_default_queue_to_tx_fifo[] = {
50 IWL_TX_FIFO_AC3, 64 IWL_TX_FIFO_AC3,
@@ -59,7 +73,6 @@ static const u16 iwl5000_default_queue_to_tx_fifo[] = {
59/* FIXME: same implementation as 4965 */ 73/* FIXME: same implementation as 4965 */
60static int iwl5000_apm_stop_master(struct iwl_priv *priv) 74static int iwl5000_apm_stop_master(struct iwl_priv *priv)
61{ 75{
62 int ret = 0;
63 unsigned long flags; 76 unsigned long flags;
64 77
65 spin_lock_irqsave(&priv->lock, flags); 78 spin_lock_irqsave(&priv->lock, flags);
@@ -67,17 +80,13 @@ static int iwl5000_apm_stop_master(struct iwl_priv *priv)
67 /* set stop master bit */ 80 /* set stop master bit */
68 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 81 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
69 82
70 ret = iwl_poll_bit(priv, CSR_RESET, 83 iwl_poll_direct_bit(priv, CSR_RESET,
71 CSR_RESET_REG_FLAG_MASTER_DISABLED,
72 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 84 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
73 if (ret < 0)
74 goto out;
75 85
76out:
77 spin_unlock_irqrestore(&priv->lock, flags); 86 spin_unlock_irqrestore(&priv->lock, flags);
78 IWL_DEBUG_INFO("stop master\n"); 87 IWL_DEBUG_INFO("stop master\n");
79 88
80 return ret; 89 return 0;
81} 90}
82 91
83 92
@@ -92,7 +101,7 @@ static int iwl5000_apm_init(struct iwl_priv *priv)
92 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 101 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
93 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 102 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
94 103
95 /* Set FH wait treshold to maximum (HW error during stress W/A) */ 104 /* Set FH wait threshold to maximum (HW error during stress W/A) */
96 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 105 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
97 106
98 /* enable HAP INTA to move device L1a -> L0s */ 107 /* enable HAP INTA to move device L1a -> L0s */
@@ -106,9 +115,8 @@ static int iwl5000_apm_init(struct iwl_priv *priv)
106 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 115 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
107 116
108 /* wait for clock stabilization */ 117 /* wait for clock stabilization */
109 ret = iwl_poll_bit(priv, CSR_GP_CNTRL, 118 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
110 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 119 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
111 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
112 if (ret < 0) { 120 if (ret < 0) {
113 IWL_DEBUG_INFO("Failed to init the card\n"); 121 IWL_DEBUG_INFO("Failed to init the card\n");
114 return ret; 122 return ret;
@@ -132,7 +140,7 @@ static int iwl5000_apm_init(struct iwl_priv *priv)
132 return ret; 140 return ret;
133} 141}
134 142
135/* FIXME: this is indentical to 4965 */ 143/* FIXME: this is identical to 4965 */
136static void iwl5000_apm_stop(struct iwl_priv *priv) 144static void iwl5000_apm_stop(struct iwl_priv *priv)
137{ 145{
138 unsigned long flags; 146 unsigned long flags;
@@ -175,9 +183,8 @@ static int iwl5000_apm_reset(struct iwl_priv *priv)
175 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 183 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
176 184
177 /* wait for clock stabilization */ 185 /* wait for clock stabilization */
178 ret = iwl_poll_bit(priv, CSR_GP_CNTRL, 186 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
179 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 187 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
180 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
181 if (ret < 0) { 188 if (ret < 0) {
182 IWL_DEBUG_INFO("Failed to init the card\n"); 189 IWL_DEBUG_INFO("Failed to init the card\n");
183 goto out; 190 goto out;
@@ -217,7 +224,7 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
217 224
218 /* L1 is enabled by BIOS */ 225 /* L1 is enabled by BIOS */
219 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN) 226 if ((link & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
220 /* diable L0S disabled L1A enabled */ 227 /* disable L0S disabled L1A enabled */
221 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 228 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
222 else 229 else
223 /* L0S enabled L1A disabled */ 230 /* L0S enabled L1A disabled */
@@ -291,30 +298,17 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
291 return (address & ADDRESS_MSK) + (offset << 1); 298 return (address & ADDRESS_MSK) + (offset << 1);
292} 299}
293 300
294static int iwl5000_eeprom_check_version(struct iwl_priv *priv) 301static u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv)
295{ 302{
296 u16 eeprom_ver;
297 struct iwl_eeprom_calib_hdr { 303 struct iwl_eeprom_calib_hdr {
298 u8 version; 304 u8 version;
299 u8 pa_type; 305 u8 pa_type;
300 u16 voltage; 306 u16 voltage;
301 } *hdr; 307 } *hdr;
302 308
303 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
304
305 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv, 309 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
306 EEPROM_5000_CALIB_ALL); 310 EEPROM_5000_CALIB_ALL);
307 311 return hdr->version;
308 if (eeprom_ver < EEPROM_5000_EEPROM_VERSION ||
309 hdr->version < EEPROM_5000_TX_POWER_VERSION)
310 goto err;
311
312 return 0;
313err:
314 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
315 eeprom_ver, EEPROM_5000_EEPROM_VERSION,
316 hdr->version, EEPROM_5000_TX_POWER_VERSION);
317 return -EINVAL;
318 312
319} 313}
320 314
@@ -348,10 +342,14 @@ static void iwl5000_gain_computation(struct iwl_priv *priv,
348 data->delta_gain_code[1], data->delta_gain_code[2]); 342 data->delta_gain_code[1], data->delta_gain_code[2]);
349 343
350 if (!data->radio_write) { 344 if (!data->radio_write) {
351 struct iwl5000_calibration_chain_noise_gain_cmd cmd; 345 struct iwl_calib_chain_noise_gain_cmd cmd;
346
352 memset(&cmd, 0, sizeof(cmd)); 347 memset(&cmd, 0, sizeof(cmd));
353 348
354 cmd.op_code = IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD; 349 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
350 cmd.hdr.first_group = 0;
351 cmd.hdr.groups_num = 1;
352 cmd.hdr.data_valid = 1;
355 cmd.delta_gain_1 = data->delta_gain_code[1]; 353 cmd.delta_gain_1 = data->delta_gain_code[1];
356 cmd.delta_gain_2 = data->delta_gain_code[2]; 354 cmd.delta_gain_2 = data->delta_gain_code[2];
357 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD, 355 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
@@ -373,14 +371,19 @@ static void iwl5000_gain_computation(struct iwl_priv *priv,
373static void iwl5000_chain_noise_reset(struct iwl_priv *priv) 371static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
374{ 372{
375 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 373 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
374 int ret;
376 375
377 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) { 376 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
378 struct iwl5000_calibration_chain_noise_reset_cmd cmd; 377 struct iwl_calib_chain_noise_reset_cmd cmd;
379
380 memset(&cmd, 0, sizeof(cmd)); 378 memset(&cmd, 0, sizeof(cmd));
381 cmd.op_code = IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD; 379
382 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 380 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
383 sizeof(cmd), &cmd)) 381 cmd.hdr.first_group = 0;
382 cmd.hdr.groups_num = 1;
383 cmd.hdr.data_valid = 1;
384 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
385 sizeof(cmd), &cmd);
386 if (ret)
384 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n"); 387 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
385 data->state = IWL_CHAIN_NOISE_ACCUMULATE; 388 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
386 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n"); 389 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
@@ -390,8 +393,8 @@ static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
390static void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info, 393static void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
391 __le32 *tx_flags) 394 __le32 *tx_flags)
392{ 395{
393 if ((info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) || 396 if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
394 (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT)) 397 (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
395 *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; 398 *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
396 else 399 else
397 *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK; 400 *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
@@ -426,31 +429,41 @@ static const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
426 return &priv->eeprom[address]; 429 return &priv->eeprom[address];
427} 430}
428 431
432static s32 iwl5150_get_ct_threshold(struct iwl_priv *priv)
433{
434 const s32 volt2temp_coef = -5;
435 u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
436 EEPROM_5000_TEMPERATURE);
437 /* offset = temperate - voltage / coef */
438 s32 offset = temp_calib[0] - temp_calib[1] / volt2temp_coef;
439 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD) - offset;
440 return threshold * volt2temp_coef;
441}
442
429/* 443/*
430 * Calibration 444 * Calibration
431 */ 445 */
432static int iwl5000_send_Xtal_calib(struct iwl_priv *priv) 446static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
433{ 447{
448 struct iwl_calib_xtal_freq_cmd cmd;
434 u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL); 449 u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
435 450
436 struct iwl5000_calibration cal_cmd = { 451 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
437 .op_code = IWL5000_PHY_CALIBRATE_CRYSTAL_FRQ_CMD, 452 cmd.hdr.first_group = 0;
438 .data = { 453 cmd.hdr.groups_num = 1;
439 (u8)xtal_calib[0], 454 cmd.hdr.data_valid = 1;
440 (u8)xtal_calib[1], 455 cmd.cap_pin1 = (u8)xtal_calib[0];
441 } 456 cmd.cap_pin2 = (u8)xtal_calib[1];
442 }; 457 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
443 458 (u8 *)&cmd, sizeof(cmd));
444 return iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
445 sizeof(cal_cmd), &cal_cmd);
446} 459}
447 460
448static int iwl5000_send_calib_cfg(struct iwl_priv *priv) 461static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
449{ 462{
450 struct iwl5000_calib_cfg_cmd calib_cfg_cmd; 463 struct iwl_calib_cfg_cmd calib_cfg_cmd;
451 struct iwl_host_cmd cmd = { 464 struct iwl_host_cmd cmd = {
452 .id = CALIBRATION_CFG_CMD, 465 .id = CALIBRATION_CFG_CMD,
453 .len = sizeof(struct iwl5000_calib_cfg_cmd), 466 .len = sizeof(struct iwl_calib_cfg_cmd),
454 .data = &calib_cfg_cmd, 467 .data = &calib_cfg_cmd,
455 }; 468 };
456 469
@@ -467,7 +480,7 @@ static void iwl5000_rx_calib_result(struct iwl_priv *priv,
467 struct iwl_rx_mem_buffer *rxb) 480 struct iwl_rx_mem_buffer *rxb)
468{ 481{
469 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 482 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
470 struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw; 483 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
471 int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK; 484 int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK;
472 int index; 485 int index;
473 486
@@ -478,14 +491,20 @@ static void iwl5000_rx_calib_result(struct iwl_priv *priv,
478 * uCode. iwl_send_calib_results sends them in a row according to their 491 * uCode. iwl_send_calib_results sends them in a row according to their
479 * index. We sort them here */ 492 * index. We sort them here */
480 switch (hdr->op_code) { 493 switch (hdr->op_code) {
481 case IWL5000_PHY_CALIBRATE_LO_CMD: 494 case IWL_PHY_CALIBRATE_DC_CMD:
482 index = IWL5000_CALIB_LO; 495 index = IWL_CALIB_DC;
496 break;
497 case IWL_PHY_CALIBRATE_LO_CMD:
498 index = IWL_CALIB_LO;
483 break; 499 break;
484 case IWL5000_PHY_CALIBRATE_TX_IQ_CMD: 500 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
485 index = IWL5000_CALIB_TX_IQ; 501 index = IWL_CALIB_TX_IQ;
486 break; 502 break;
487 case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD: 503 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
488 index = IWL5000_CALIB_TX_IQ_PERD; 504 index = IWL_CALIB_TX_IQ_PERD;
505 break;
506 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
507 index = IWL_CALIB_BASE_BAND;
489 break; 508 break;
490 default: 509 default:
491 IWL_ERROR("Unknown calibration notification %d\n", 510 IWL_ERROR("Unknown calibration notification %d\n",
@@ -535,7 +554,7 @@ static int iwl5000_load_section(struct iwl_priv *priv,
535 554
536 iwl_write_direct32(priv, 555 iwl_write_direct32(priv,
537 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 556 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
538 (iwl_get_dma_hi_address(phy_addr) 557 (iwl_get_dma_hi_addr(phy_addr)
539 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 558 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
540 559
541 iwl_write_direct32(priv, 560 iwl_write_direct32(priv,
@@ -547,7 +566,7 @@ static int iwl5000_load_section(struct iwl_priv *priv,
547 iwl_write_direct32(priv, 566 iwl_write_direct32(priv,
548 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 567 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
549 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 568 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
550 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL | 569 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
551 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 570 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
552 571
553 iwl_release_nic_access(priv); 572 iwl_release_nic_access(priv);
@@ -561,14 +580,13 @@ static int iwl5000_load_given_ucode(struct iwl_priv *priv,
561{ 580{
562 int ret = 0; 581 int ret = 0;
563 582
564 ret = iwl5000_load_section( 583 ret = iwl5000_load_section(priv, inst_image, RTC_INST_LOWER_BOUND);
565 priv, inst_image, RTC_INST_LOWER_BOUND);
566 if (ret) 584 if (ret)
567 return ret; 585 return ret;
568 586
569 IWL_DEBUG_INFO("INST uCode section being loaded...\n"); 587 IWL_DEBUG_INFO("INST uCode section being loaded...\n");
570 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 588 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
571 priv->ucode_write_complete, 5 * HZ); 589 priv->ucode_write_complete, 5 * HZ);
572 if (ret == -ERESTARTSYS) { 590 if (ret == -ERESTARTSYS) {
573 IWL_ERROR("Could not load the INST uCode section due " 591 IWL_ERROR("Could not load the INST uCode section due "
574 "to interrupt\n"); 592 "to interrupt\n");
@@ -682,7 +700,7 @@ static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
682 int tx_fifo_id, int scd_retry) 700 int tx_fifo_id, int scd_retry)
683{ 701{
684 int txq_id = txq->q.id; 702 int txq_id = txq->q.id;
685 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0; 703 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
686 704
687 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id), 705 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
688 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) | 706 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
@@ -710,9 +728,10 @@ static int iwl5000_send_wimax_coex(struct iwl_priv *priv)
710static int iwl5000_alive_notify(struct iwl_priv *priv) 728static int iwl5000_alive_notify(struct iwl_priv *priv)
711{ 729{
712 u32 a; 730 u32 a;
713 int i = 0;
714 unsigned long flags; 731 unsigned long flags;
715 int ret; 732 int ret;
733 int i, chan;
734 u32 reg_val;
716 735
717 spin_lock_irqsave(&priv->lock, flags); 736 spin_lock_irqsave(&priv->lock, flags);
718 737
@@ -734,11 +753,21 @@ static int iwl5000_alive_notify(struct iwl_priv *priv)
734 iwl_write_targ_mem(priv, a, 0); 753 iwl_write_targ_mem(priv, a, 0);
735 754
736 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR, 755 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
737 (priv->shared_phys + 756 priv->scd_bc_tbls.dma >> 10);
738 offsetof(struct iwl5000_shared, queues_byte_cnt_tbls)) >> 10); 757
758 /* Enable DMA channel */
759 for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
760 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
761 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
762 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
763
764 /* Update FH chicken bits */
765 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
766 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
767 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
768
739 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, 769 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
740 IWL50_SCD_QUEUECHAIN_SEL_ALL( 770 IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
741 priv->hw_params.max_txq_num));
742 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0); 771 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
743 772
744 /* initiate the queues */ 773 /* initiate the queues */
@@ -765,6 +794,7 @@ static int iwl5000_alive_notify(struct iwl_priv *priv)
765 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); 794 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
766 795
767 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 796 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
797
768 /* map qos queues to fifos one-to-one */ 798 /* map qos queues to fifos one-to-one */
769 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) { 799 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
770 int ac = iwl5000_default_queue_to_tx_fifo[i]; 800 int ac = iwl5000_default_queue_to_tx_fifo[i];
@@ -784,10 +814,8 @@ static int iwl5000_alive_notify(struct iwl_priv *priv)
784 814
785 iwl5000_send_wimax_coex(priv); 815 iwl5000_send_wimax_coex(priv);
786 816
787 iwl5000_send_Xtal_calib(priv); 817 iwl5000_set_Xtal_calib(priv);
788 818 iwl_send_calib_results(priv);
789 if (priv->ucode_type == UCODE_RT)
790 iwl_send_calib_results(priv);
791 819
792 return 0; 820 return 0;
793} 821}
@@ -802,7 +830,9 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
802 } 830 }
803 831
804 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; 832 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
805 priv->hw_params.first_ampdu_q = IWL50_FIRST_AMPDU_QUEUE; 833 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
834 priv->hw_params.scd_bc_tbls_size =
835 IWL50_NUM_QUEUES * sizeof(struct iwl5000_scd_bc_tbl);
806 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 836 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
807 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 837 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
808 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE; 838 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
@@ -814,10 +844,14 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
814 844
815 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 845 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
816 case CSR_HW_REV_TYPE_5100: 846 case CSR_HW_REV_TYPE_5100:
847 priv->hw_params.tx_chains_num = 1;
848 priv->hw_params.rx_chains_num = 2;
849 priv->hw_params.valid_tx_ant = ANT_B;
850 priv->hw_params.valid_rx_ant = ANT_AB;
851 break;
817 case CSR_HW_REV_TYPE_5150: 852 case CSR_HW_REV_TYPE_5150:
818 priv->hw_params.tx_chains_num = 1; 853 priv->hw_params.tx_chains_num = 1;
819 priv->hw_params.rx_chains_num = 2; 854 priv->hw_params.rx_chains_num = 2;
820 /* FIXME: move to ANT_A, ANT_B, ANT_C enum */
821 priv->hw_params.valid_tx_ant = ANT_A; 855 priv->hw_params.valid_tx_ant = ANT_A;
822 priv->hw_params.valid_rx_ant = ANT_AB; 856 priv->hw_params.valid_rx_ant = ANT_AB;
823 break; 857 break;
@@ -840,43 +874,36 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
840 case CSR_HW_REV_TYPE_5150: 874 case CSR_HW_REV_TYPE_5150:
841 /* 5150 wants in Kelvin */ 875 /* 5150 wants in Kelvin */
842 priv->hw_params.ct_kill_threshold = 876 priv->hw_params.ct_kill_threshold =
843 CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD); 877 iwl5150_get_ct_threshold(priv);
844 break; 878 break;
845 } 879 }
846 880
847 return 0; 881 /* Set initial calibration set */
848} 882 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
849 883 case CSR_HW_REV_TYPE_5100:
850static int iwl5000_alloc_shared_mem(struct iwl_priv *priv) 884 case CSR_HW_REV_TYPE_5300:
851{ 885 case CSR_HW_REV_TYPE_5350:
852 priv->shared_virt = pci_alloc_consistent(priv->pci_dev, 886 priv->hw_params.calib_init_cfg =
853 sizeof(struct iwl5000_shared), 887 BIT(IWL_CALIB_XTAL) |
854 &priv->shared_phys); 888 BIT(IWL_CALIB_LO) |
855 if (!priv->shared_virt) 889 BIT(IWL_CALIB_TX_IQ) |
856 return -ENOMEM; 890 BIT(IWL_CALIB_TX_IQ_PERD) |
891 BIT(IWL_CALIB_BASE_BAND);
892 break;
893 case CSR_HW_REV_TYPE_5150:
894 priv->hw_params.calib_init_cfg =
895 BIT(IWL_CALIB_DC) |
896 BIT(IWL_CALIB_LO) |
897 BIT(IWL_CALIB_TX_IQ) |
898 BIT(IWL_CALIB_BASE_BAND);
857 899
858 memset(priv->shared_virt, 0, sizeof(struct iwl5000_shared)); 900 break;
901 }
859 902
860 priv->rb_closed_offset = offsetof(struct iwl5000_shared, rb_closed);
861 903
862 return 0; 904 return 0;
863} 905}
864 906
865static void iwl5000_free_shared_mem(struct iwl_priv *priv)
866{
867 if (priv->shared_virt)
868 pci_free_consistent(priv->pci_dev,
869 sizeof(struct iwl5000_shared),
870 priv->shared_virt,
871 priv->shared_phys);
872}
873
874static int iwl5000_shared_mem_rx_idx(struct iwl_priv *priv)
875{
876 struct iwl5000_shared *s = priv->shared_virt;
877 return le32_to_cpu(s->rb_closed) & 0xFFF;
878}
879
880/** 907/**
881 * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 908 * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
882 */ 909 */
@@ -884,16 +911,18 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
884 struct iwl_tx_queue *txq, 911 struct iwl_tx_queue *txq,
885 u16 byte_cnt) 912 u16 byte_cnt)
886{ 913{
887 struct iwl5000_shared *shared_data = priv->shared_virt; 914 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
915 int write_ptr = txq->q.write_ptr;
888 int txq_id = txq->q.id; 916 int txq_id = txq->q.id;
889 u8 sec_ctl = 0; 917 u8 sec_ctl = 0;
890 u8 sta = 0; 918 u8 sta_id = 0;
891 int len; 919 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
920 __le16 bc_ent;
892 921
893 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 922 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
894 923
895 if (txq_id != IWL_CMD_QUEUE_NUM) { 924 if (txq_id != IWL_CMD_QUEUE_NUM) {
896 sta = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; 925 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
897 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; 926 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
898 927
899 switch (sec_ctl & TX_CMD_SEC_MSK) { 928 switch (sec_ctl & TX_CMD_SEC_MSK) {
@@ -909,40 +938,35 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
909 } 938 }
910 } 939 }
911 940
912 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 941 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
913 tfd_offset[txq->q.write_ptr], byte_cnt, len);
914 942
915 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 943 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
916 tfd_offset[txq->q.write_ptr], sta_id, sta);
917 944
918 if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) { 945 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
919 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 946 scd_bc_tbl[txq_id].
920 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr], 947 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
921 byte_cnt, len);
922 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
923 tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
924 sta_id, sta);
925 }
926} 948}
927 949
928static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, 950static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
929 struct iwl_tx_queue *txq) 951 struct iwl_tx_queue *txq)
930{ 952{
953 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
931 int txq_id = txq->q.id; 954 int txq_id = txq->q.id;
932 struct iwl5000_shared *shared_data = priv->shared_virt; 955 int read_ptr = txq->q.read_ptr;
933 u8 sta = 0; 956 u8 sta_id = 0;
957 __le16 bc_ent;
958
959 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
934 960
935 if (txq_id != IWL_CMD_QUEUE_NUM) 961 if (txq_id != IWL_CMD_QUEUE_NUM)
936 sta = txq->cmd[txq->q.read_ptr]->cmd.tx.sta_id; 962 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
937 963
938 shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr]. 964 bc_ent = cpu_to_le16(1 | (sta_id << 12));
939 val = cpu_to_le16(1 | (sta << 12)); 965 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
940 966
941 if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) { 967 if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
942 shared_data->queues_byte_cnt_tbls[txq_id]. 968 scd_bc_tbl[txq_id].
943 tfd_offset[IWL50_QUEUE_SIZE + txq->q.read_ptr]. 969 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
944 val = cpu_to_le16(1 | (sta << 12));
945 }
946} 970}
947 971
948static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, 972static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
@@ -996,7 +1020,7 @@ static int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
996 ra_tid = BUILD_RAxTID(sta_id, tid); 1020 ra_tid = BUILD_RAxTID(sta_id, tid);
997 1021
998 /* Modify device's station table to Tx this TID */ 1022 /* Modify device's station table to Tx this TID */
999 iwl_sta_modify_enable_tid_tx(priv, sta_id, tid); 1023 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
1000 1024
1001 spin_lock_irqsave(&priv->lock, flags); 1025 spin_lock_irqsave(&priv->lock, flags);
1002 ret = iwl_grab_nic_access(priv); 1026 ret = iwl_grab_nic_access(priv);
@@ -1089,7 +1113,7 @@ static u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
1089 1113
1090 1114
1091/* 1115/*
1092 * Activate/Deactivat Tx DMA/FIFO channels according tx fifos mask 1116 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
1093 * must be called under priv->lock and mac access 1117 * must be called under priv->lock and mac access
1094 */ 1118 */
1095static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask) 1119static void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
@@ -1136,10 +1160,10 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1136 agg->frame_count, agg->start_idx, idx); 1160 agg->frame_count, agg->start_idx, idx);
1137 1161
1138 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); 1162 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
1139 info->status.retry_count = tx_resp->failure_frame; 1163 info->status.rates[0].count = tx_resp->failure_frame + 1;
1140 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 1164 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1141 info->flags |= iwl_is_tx_success(status)? 1165 info->flags |= iwl_is_tx_success(status) ?
1142 IEEE80211_TX_STAT_ACK : 0; 1166 IEEE80211_TX_STAT_ACK : 0;
1143 iwl_hwrate_to_tx_control(priv, rate_n_flags, info); 1167 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
1144 1168
1145 /* FIXME: code repetition end */ 1169 /* FIXME: code repetition end */
@@ -1225,9 +1249,9 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1225 struct ieee80211_tx_info *info; 1249 struct ieee80211_tx_info *info;
1226 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 1250 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1227 u32 status = le16_to_cpu(tx_resp->status.status); 1251 u32 status = le16_to_cpu(tx_resp->status.status);
1228 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION; 1252 int tid;
1229 struct ieee80211_hdr *hdr; 1253 int sta_id;
1230 u8 *qc = NULL; 1254 int freed;
1231 1255
1232 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { 1256 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
1233 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " 1257 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
@@ -1240,25 +1264,13 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1240 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); 1264 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
1241 memset(&info->status, 0, sizeof(info->status)); 1265 memset(&info->status, 0, sizeof(info->status));
1242 1266
1243 hdr = iwl_tx_queue_get_hdr(priv, txq_id, index); 1267 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
1244 if (ieee80211_is_data_qos(hdr->frame_control)) { 1268 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
1245 qc = ieee80211_get_qos_ctl(hdr);
1246 tid = qc[0] & 0xf;
1247 }
1248
1249 sta_id = iwl_get_ra_sta_id(priv, hdr);
1250 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
1251 IWL_ERROR("Station not known\n");
1252 return;
1253 }
1254 1269
1255 if (txq->sched_retry) { 1270 if (txq->sched_retry) {
1256 const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp); 1271 const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp);
1257 struct iwl_ht_agg *agg = NULL; 1272 struct iwl_ht_agg *agg = NULL;
1258 1273
1259 if (!qc)
1260 return;
1261
1262 agg = &priv->stations[sta_id].tid[tid].agg; 1274 agg = &priv->stations[sta_id].tid[tid].agg;
1263 1275
1264 iwl5000_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); 1276 iwl5000_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
@@ -1268,58 +1280,58 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1268 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1280 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1269 1281
1270 if (txq->q.read_ptr != (scd_ssn & 0xff)) { 1282 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1271 int freed, ampdu_q;
1272 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 1283 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
1273 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn " 1284 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim "
1274 "%d index %d\n", scd_ssn , index); 1285 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1286 scd_ssn , index, txq_id, txq->swq_id);
1287
1275 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 1288 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1276 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1289 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1277 1290
1278 if (iwl_queue_space(&txq->q) > txq->q.low_mark && 1291 if (priv->mac80211_registered &&
1279 txq_id >= 0 && priv->mac80211_registered && 1292 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1280 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) { 1293 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
1281 /* calculate mac80211 ampdu sw queue to wake */
1282 ampdu_q = txq_id - IWL50_FIRST_AMPDU_QUEUE +
1283 priv->hw->queues;
1284 if (agg->state == IWL_AGG_OFF) 1294 if (agg->state == IWL_AGG_OFF)
1285 ieee80211_wake_queue(priv->hw, txq_id); 1295 ieee80211_wake_queue(priv->hw, txq_id);
1286 else 1296 else
1287 ieee80211_wake_queue(priv->hw, ampdu_q); 1297 ieee80211_wake_queue(priv->hw,
1298 txq->swq_id);
1288 } 1299 }
1289 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1290 } 1300 }
1291 } else { 1301 } else {
1292 info->status.retry_count = tx_resp->failure_frame; 1302 BUG_ON(txq_id != txq->swq_id);
1293 info->flags = 1303
1294 iwl_is_tx_success(status) ? IEEE80211_TX_STAT_ACK : 0; 1304 info->status.rates[0].count = tx_resp->failure_frame + 1;
1305 info->flags |= iwl_is_tx_success(status) ?
1306 IEEE80211_TX_STAT_ACK : 0;
1295 iwl_hwrate_to_tx_control(priv, 1307 iwl_hwrate_to_tx_control(priv,
1296 le32_to_cpu(tx_resp->rate_n_flags), 1308 le32_to_cpu(tx_resp->rate_n_flags),
1297 info); 1309 info);
1298 1310
1299 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags " 1311 IWL_DEBUG_TX_REPLY("TXQ %d status %s (0x%08x) rate_n_flags "
1300 "0x%x retries %d\n", txq_id, 1312 "0x%x retries %d\n",
1301 iwl_get_tx_fail_reason(status), 1313 txq_id,
1302 status, le32_to_cpu(tx_resp->rate_n_flags), 1314 iwl_get_tx_fail_reason(status), status,
1303 tx_resp->failure_frame); 1315 le32_to_cpu(tx_resp->rate_n_flags),
1316 tx_resp->failure_frame);
1304 1317
1305 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); 1318 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1306 if (index != -1) { 1319 if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
1307 int freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1308 if (tid != MAX_TID_COUNT)
1309 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; 1320 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1310 if (iwl_queue_space(&txq->q) > txq->q.low_mark && 1321
1311 (txq_id >= 0) && priv->mac80211_registered) 1322 if (priv->mac80211_registered &&
1323 (iwl_queue_space(&txq->q) > txq->q.low_mark))
1312 ieee80211_wake_queue(priv->hw, txq_id); 1324 ieee80211_wake_queue(priv->hw, txq_id);
1313 if (tid != MAX_TID_COUNT)
1314 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1315 }
1316 } 1325 }
1317 1326
1327 if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
1328 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1329
1318 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 1330 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
1319 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); 1331 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
1320} 1332}
1321 1333
1322/* Currently 5000 is the supperset of everything */ 1334/* Currently 5000 is the superset of everything */
1323static u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len) 1335static u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len)
1324{ 1336{
1325 return len; 1337 return len;
@@ -1466,9 +1478,6 @@ static struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
1466 1478
1467static struct iwl_lib_ops iwl5000_lib = { 1479static struct iwl_lib_ops iwl5000_lib = {
1468 .set_hw_params = iwl5000_hw_set_hw_params, 1480 .set_hw_params = iwl5000_hw_set_hw_params,
1469 .alloc_shared_mem = iwl5000_alloc_shared_mem,
1470 .free_shared_mem = iwl5000_free_shared_mem,
1471 .shared_mem_rx_idx = iwl5000_shared_mem_rx_idx,
1472 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 1481 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
1473 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 1482 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
1474 .txq_set_sched = iwl5000_txq_set_sched, 1483 .txq_set_sched = iwl5000_txq_set_sched,
@@ -1482,13 +1491,13 @@ static struct iwl_lib_ops iwl5000_lib = {
1482 .alive_notify = iwl5000_alive_notify, 1491 .alive_notify = iwl5000_alive_notify,
1483 .send_tx_power = iwl5000_send_tx_power, 1492 .send_tx_power = iwl5000_send_tx_power,
1484 .temperature = iwl5000_temperature, 1493 .temperature = iwl5000_temperature,
1485 .update_chain_flags = iwl4965_update_chain_flags, 1494 .update_chain_flags = iwl_update_chain_flags,
1486 .apm_ops = { 1495 .apm_ops = {
1487 .init = iwl5000_apm_init, 1496 .init = iwl5000_apm_init,
1488 .reset = iwl5000_apm_reset, 1497 .reset = iwl5000_apm_reset,
1489 .stop = iwl5000_apm_stop, 1498 .stop = iwl5000_apm_stop,
1490 .config = iwl5000_nic_config, 1499 .config = iwl5000_nic_config,
1491 .set_pwr_src = iwl4965_set_pwr_src, 1500 .set_pwr_src = iwl_set_pwr_src,
1492 }, 1501 },
1493 .eeprom_ops = { 1502 .eeprom_ops = {
1494 .regulatory_bands = { 1503 .regulatory_bands = {
@@ -1503,7 +1512,7 @@ static struct iwl_lib_ops iwl5000_lib = {
1503 .verify_signature = iwlcore_eeprom_verify_signature, 1512 .verify_signature = iwlcore_eeprom_verify_signature,
1504 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 1513 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
1505 .release_semaphore = iwlcore_eeprom_release_semaphore, 1514 .release_semaphore = iwlcore_eeprom_release_semaphore,
1506 .check_version = iwl5000_eeprom_check_version, 1515 .calib_version = iwl5000_eeprom_calib_version,
1507 .query_addr = iwl5000_eeprom_query_addr, 1516 .query_addr = iwl5000_eeprom_query_addr,
1508 }, 1517 },
1509}; 1518};
@@ -1517,7 +1526,6 @@ static struct iwl_ops iwl5000_ops = {
1517static struct iwl_mod_params iwl50_mod_params = { 1526static struct iwl_mod_params iwl50_mod_params = {
1518 .num_of_queues = IWL50_NUM_QUEUES, 1527 .num_of_queues = IWL50_NUM_QUEUES,
1519 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 1528 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1520 .enable_qos = 1,
1521 .amsdu_size_8K = 1, 1529 .amsdu_size_8K = 1,
1522 .restart_fw = 1, 1530 .restart_fw = 1,
1523 /* the rest are 0 by default */ 1531 /* the rest are 0 by default */
@@ -1526,50 +1534,84 @@ static struct iwl_mod_params iwl50_mod_params = {
1526 1534
1527struct iwl_cfg iwl5300_agn_cfg = { 1535struct iwl_cfg iwl5300_agn_cfg = {
1528 .name = "5300AGN", 1536 .name = "5300AGN",
1529 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode", 1537 .fw_name_pre = IWL5000_FW_PRE,
1538 .ucode_api_max = IWL5000_UCODE_API_MAX,
1539 .ucode_api_min = IWL5000_UCODE_API_MIN,
1530 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 1540 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1531 .ops = &iwl5000_ops, 1541 .ops = &iwl5000_ops,
1532 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1542 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1543 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1544 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1533 .mod_params = &iwl50_mod_params, 1545 .mod_params = &iwl50_mod_params,
1534}; 1546};
1535 1547
1536struct iwl_cfg iwl5100_bg_cfg = { 1548struct iwl_cfg iwl5100_bg_cfg = {
1537 .name = "5100BG", 1549 .name = "5100BG",
1538 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode", 1550 .fw_name_pre = IWL5000_FW_PRE,
1551 .ucode_api_max = IWL5000_UCODE_API_MAX,
1552 .ucode_api_min = IWL5000_UCODE_API_MIN,
1539 .sku = IWL_SKU_G, 1553 .sku = IWL_SKU_G,
1540 .ops = &iwl5000_ops, 1554 .ops = &iwl5000_ops,
1541 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1555 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1556 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1557 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1542 .mod_params = &iwl50_mod_params, 1558 .mod_params = &iwl50_mod_params,
1543}; 1559};
1544 1560
1545struct iwl_cfg iwl5100_abg_cfg = { 1561struct iwl_cfg iwl5100_abg_cfg = {
1546 .name = "5100ABG", 1562 .name = "5100ABG",
1547 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode", 1563 .fw_name_pre = IWL5000_FW_PRE,
1564 .ucode_api_max = IWL5000_UCODE_API_MAX,
1565 .ucode_api_min = IWL5000_UCODE_API_MIN,
1548 .sku = IWL_SKU_A|IWL_SKU_G, 1566 .sku = IWL_SKU_A|IWL_SKU_G,
1549 .ops = &iwl5000_ops, 1567 .ops = &iwl5000_ops,
1550 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1568 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1569 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1570 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1551 .mod_params = &iwl50_mod_params, 1571 .mod_params = &iwl50_mod_params,
1552}; 1572};
1553 1573
1554struct iwl_cfg iwl5100_agn_cfg = { 1574struct iwl_cfg iwl5100_agn_cfg = {
1555 .name = "5100AGN", 1575 .name = "5100AGN",
1556 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode", 1576 .fw_name_pre = IWL5000_FW_PRE,
1577 .ucode_api_max = IWL5000_UCODE_API_MAX,
1578 .ucode_api_min = IWL5000_UCODE_API_MIN,
1557 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 1579 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1558 .ops = &iwl5000_ops, 1580 .ops = &iwl5000_ops,
1559 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1581 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1582 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1583 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1560 .mod_params = &iwl50_mod_params, 1584 .mod_params = &iwl50_mod_params,
1561}; 1585};
1562 1586
1563struct iwl_cfg iwl5350_agn_cfg = { 1587struct iwl_cfg iwl5350_agn_cfg = {
1564 .name = "5350AGN", 1588 .name = "5350AGN",
1565 .fw_name = "iwlwifi-5000" IWL5000_UCODE_API ".ucode", 1589 .fw_name_pre = IWL5000_FW_PRE,
1590 .ucode_api_max = IWL5000_UCODE_API_MAX,
1591 .ucode_api_min = IWL5000_UCODE_API_MIN,
1592 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1593 .ops = &iwl5000_ops,
1594 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1595 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1596 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1597 .mod_params = &iwl50_mod_params,
1598};
1599
1600struct iwl_cfg iwl5150_agn_cfg = {
1601 .name = "5150AGN",
1602 .fw_name_pre = IWL5150_FW_PRE,
1603 .ucode_api_max = IWL5150_UCODE_API_MAX,
1604 .ucode_api_min = IWL5150_UCODE_API_MIN,
1566 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 1605 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1567 .ops = &iwl5000_ops, 1606 .ops = &iwl5000_ops,
1568 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1607 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1608 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1609 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1569 .mod_params = &iwl50_mod_params, 1610 .mod_params = &iwl50_mod_params,
1570}; 1611};
1571 1612
1572MODULE_FIRMWARE("iwlwifi-5000" IWL5000_UCODE_API ".ucode"); 1613MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
1614MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
1573 1615
1574module_param_named(disable50, iwl50_mod_params.disable, int, 0444); 1616module_param_named(disable50, iwl50_mod_params.disable, int, 0444);
1575MODULE_PARM_DESC(disable50, 1617MODULE_PARM_DESC(disable50,
@@ -1577,12 +1619,10 @@ MODULE_PARM_DESC(disable50,
1577module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, 0444); 1619module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, 0444);
1578MODULE_PARM_DESC(swcrypto50, 1620MODULE_PARM_DESC(swcrypto50,
1579 "using software crypto engine (default 0 [hardware])\n"); 1621 "using software crypto engine (default 0 [hardware])\n");
1580module_param_named(debug50, iwl50_mod_params.debug, int, 0444); 1622module_param_named(debug50, iwl50_mod_params.debug, uint, 0444);
1581MODULE_PARM_DESC(debug50, "50XX debug output mask"); 1623MODULE_PARM_DESC(debug50, "50XX debug output mask");
1582module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, 0444); 1624module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, 0444);
1583MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series"); 1625MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
1584module_param_named(qos_enable50, iwl50_mod_params.enable_qos, int, 0444);
1585MODULE_PARM_DESC(qos_enable50, "enable all 50XX QoS functionality");
1586module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, 0444); 1626module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, 0444);
1587MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality"); 1627MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality");
1588module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, int, 0444); 1628module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, int, 0444);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd-check.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd-check.c
new file mode 100644
index 000000000000..b8137eeae1db
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd-check.c
@@ -0,0 +1,108 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <net/mac80211.h>
31#include "iwl-dev.h"
32#include "iwl-debug.h"
33#include "iwl-commands.h"
34
35
36/**
37 * iwl_check_rxon_cmd - validate RXON structure is valid
38 *
39 * NOTE: This is really only useful during development and can eventually
40 * be #ifdef'd out once the driver is stable and folks aren't actively
41 * making changes
42 */
43int iwl_agn_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
44{
45 int error = 0;
46 int counter = 1;
47
48 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
49 error |= le32_to_cpu(rxon->flags &
50 (RXON_FLG_TGJ_NARROW_BAND_MSK |
51 RXON_FLG_RADAR_DETECT_MSK));
52 if (error)
53 IWL_WARNING("check 24G fields %d | %d\n",
54 counter++, error);
55 } else {
56 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
57 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
58 if (error)
59 IWL_WARNING("check 52 fields %d | %d\n",
60 counter++, error);
61 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
62 if (error)
63 IWL_WARNING("check 52 CCK %d | %d\n",
64 counter++, error);
65 }
66 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
67 if (error)
68 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
69
70 /* make sure basic rates 6Mbps and 1Mbps are supported */
71 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
72 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
73 if (error)
74 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
75
76 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
77 if (error)
78 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
79
80 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
81 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
82 if (error)
83 IWL_WARNING("check CCK and short slot %d | %d\n",
84 counter++, error);
85
86 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
87 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
88 if (error)
89 IWL_WARNING("check CCK & auto detect %d | %d\n",
90 counter++, error);
91
92 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
93 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
94 if (error)
95 IWL_WARNING("check TGG and auto detect %d | %d\n",
96 counter++, error);
97
98 if (error)
99 IWL_WARNING("Tuning to channel %d\n",
100 le16_to_cpu(rxon->channel));
101
102 if (error) {
103 IWL_ERROR("Not a valid iwl4965_rxon_assoc_cmd field values\n");
104 return -1;
105 }
106 return 0;
107}
108
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index e2a58e477036..f3f17929ca0b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
@@ -38,7 +38,6 @@
38#include "iwl-dev.h" 38#include "iwl-dev.h"
39#include "iwl-sta.h" 39#include "iwl-sta.h"
40#include "iwl-core.h" 40#include "iwl-core.h"
41#include "iwl-helpers.h"
42 41
43#define RS_NAME "iwl-agn-rs" 42#define RS_NAME "iwl-agn-rs"
44 43
@@ -188,7 +187,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
188 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits 187 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
189 * "G" is the only table that supports CCK (the first 4 rates). 188 * "G" is the only table that supports CCK (the first 4 rates).
190 */ 189 */
191/*FIXME:RS:need to spearate tables for MIMO2/MIMO3*/ 190/*FIXME:RS:need to separate tables for MIMO2/MIMO3*/
192static s32 expected_tpt_A[IWL_RATE_COUNT] = { 191static s32 expected_tpt_A[IWL_RATE_COUNT] = {
193 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186 192 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186
194}; 193};
@@ -281,10 +280,9 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
281 u32 time_diff; 280 u32 time_diff;
282 s32 index; 281 s32 index;
283 struct iwl_traffic_load *tl = NULL; 282 struct iwl_traffic_load *tl = NULL;
284 __le16 fc = hdr->frame_control;
285 u8 tid; 283 u8 tid;
286 284
287 if (ieee80211_is_data_qos(fc)) { 285 if (ieee80211_is_data_qos(hdr->frame_control)) {
288 u8 *qc = ieee80211_get_qos_ctl(hdr); 286 u8 *qc = ieee80211_get_qos_ctl(hdr);
289 tid = qc[0] & 0xf; 287 tid = qc[0] & 0xf;
290 } else 288 } else
@@ -357,11 +355,9 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
357 struct iwl_lq_sta *lq_data, u8 tid, 355 struct iwl_lq_sta *lq_data, u8 tid,
358 struct ieee80211_sta *sta) 356 struct ieee80211_sta *sta)
359{ 357{
360 DECLARE_MAC_BUF(mac);
361
362 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { 358 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
363 IWL_DEBUG_HT("Starting Tx agg: STA: %s tid: %d\n", 359 IWL_DEBUG_HT("Starting Tx agg: STA: %pM tid: %d\n",
364 print_mac(mac, sta->addr), tid); 360 sta->addr, tid);
365 ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid); 361 ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid);
366 } 362 }
367} 363}
@@ -775,7 +771,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
775 int status; 771 int status;
776 u8 retries; 772 u8 retries;
777 int rs_index, index = 0; 773 int rs_index, index = 0;
778 struct iwl_lq_sta *lq_sta; 774 struct iwl_lq_sta *lq_sta = priv_sta;
779 struct iwl_link_quality_cmd *table; 775 struct iwl_link_quality_cmd *table;
780 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 776 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
781 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 777 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
@@ -787,12 +783,12 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
787 struct iwl_scale_tbl_info tbl_type; 783 struct iwl_scale_tbl_info tbl_type;
788 struct iwl_scale_tbl_info *curr_tbl, *search_tbl; 784 struct iwl_scale_tbl_info *curr_tbl, *search_tbl;
789 u8 active_index = 0; 785 u8 active_index = 0;
790 __le16 fc = hdr->frame_control;
791 s32 tpt = 0; 786 s32 tpt = 0;
792 787
793 IWL_DEBUG_RATE_LIMIT("get frame ack response, update rate scale window\n"); 788 IWL_DEBUG_RATE_LIMIT("get frame ack response, update rate scale window\n");
794 789
795 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) 790 if (!ieee80211_is_data(hdr->frame_control) ||
791 is_multicast_ether_addr(hdr->addr1))
796 return; 792 return;
797 793
798 /* This packet was aggregated but doesn't carry rate scale info */ 794 /* This packet was aggregated but doesn't carry rate scale info */
@@ -800,13 +796,11 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
800 !(info->flags & IEEE80211_TX_STAT_AMPDU)) 796 !(info->flags & IEEE80211_TX_STAT_AMPDU))
801 return; 797 return;
802 798
803 retries = info->status.retry_count; 799 retries = info->status.rates[0].count - 1;
804 800
805 if (retries > 15) 801 if (retries > 15)
806 retries = 15; 802 retries = 15;
807 803
808 lq_sta = (struct iwl_lq_sta *)priv_sta;
809
810 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) && 804 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
811 !lq_sta->ibss_sta_added) 805 !lq_sta->ibss_sta_added)
812 goto out; 806 goto out;
@@ -832,21 +826,20 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
832 if (priv->band == IEEE80211_BAND_5GHZ) 826 if (priv->band == IEEE80211_BAND_5GHZ)
833 rs_index -= IWL_FIRST_OFDM_RATE; 827 rs_index -= IWL_FIRST_OFDM_RATE;
834 828
835 if ((info->tx_rate_idx < 0) || 829 if ((info->status.rates[0].idx < 0) ||
836 (tbl_type.is_SGI ^ 830 (tbl_type.is_SGI != !!(info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)) ||
837 !!(info->flags & IEEE80211_TX_CTL_SHORT_GI)) || 831 (tbl_type.is_fat != !!(info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
838 (tbl_type.is_fat ^ 832 (tbl_type.is_dup != !!(info->status.rates[0].flags & IEEE80211_TX_RC_DUP_DATA)) ||
839 !!(info->flags & IEEE80211_TX_CTL_40_MHZ_WIDTH)) || 833 (tbl_type.ant_type != info->antenna_sel_tx) ||
840 (tbl_type.is_dup ^ 834 (!!(tx_rate & RATE_MCS_HT_MSK) != !!(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) ||
841 !!(info->flags & IEEE80211_TX_CTL_DUP_DATA)) || 835 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
842 (tbl_type.ant_type ^ info->antenna_sel_tx) ||
843 (!!(tx_rate & RATE_MCS_HT_MSK) ^
844 !!(info->flags & IEEE80211_TX_CTL_OFDM_HT)) ||
845 (!!(tx_rate & RATE_MCS_GF_MSK) ^
846 !!(info->flags & IEEE80211_TX_CTL_GREEN_FIELD)) ||
847 (hw->wiphy->bands[priv->band]->bitrates[rs_index].bitrate != 836 (hw->wiphy->bands[priv->band]->bitrates[rs_index].bitrate !=
848 hw->wiphy->bands[info->band]->bitrates[info->tx_rate_idx].bitrate)) { 837 hw->wiphy->bands[info->band]->bitrates[info->status.rates[0].idx].bitrate)) {
849 IWL_DEBUG_RATE("initial rate does not match 0x%x\n", tx_rate); 838 IWL_DEBUG_RATE("initial rate does not match 0x%x\n", tx_rate);
839 /* the last LQ command could failed so the LQ in ucode not
840 * the same in driver sync up
841 */
842 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
850 goto out; 843 goto out;
851 } 844 }
852 845
@@ -1135,11 +1128,10 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1135 s32 rate; 1128 s32 rate;
1136 s8 is_green = lq_sta->is_green; 1129 s8 is_green = lq_sta->is_green;
1137 1130
1138 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) || 1131 if (!conf->ht.enabled || !sta->ht_cap.ht_supported)
1139 !sta->ht_info.ht_supported)
1140 return -1; 1132 return -1;
1141 1133
1142 if (((sta->ht_info.cap & IEEE80211_HT_CAP_SM_PS) >> 2) 1134 if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
1143 == WLAN_HT_CAP_SM_PS_STATIC) 1135 == WLAN_HT_CAP_SM_PS_STATIC)
1144 return -1; 1136 return -1;
1145 1137
@@ -1203,8 +1195,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
1203 u8 is_green = lq_sta->is_green; 1195 u8 is_green = lq_sta->is_green;
1204 s32 rate; 1196 s32 rate;
1205 1197
1206 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) || 1198 if (!conf->ht.enabled || !sta->ht_cap.ht_supported)
1207 !sta->ht_info.ht_supported)
1208 return -1; 1199 return -1;
1209 1200
1210 IWL_DEBUG_RATE("LQ: try to switch to SISO\n"); 1201 IWL_DEBUG_RATE("LQ: try to switch to SISO\n");
@@ -1684,7 +1675,6 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1684 int high_tpt = IWL_INVALID_VALUE; 1675 int high_tpt = IWL_INVALID_VALUE;
1685 u32 fail_count; 1676 u32 fail_count;
1686 s8 scale_action = 0; 1677 s8 scale_action = 0;
1687 __le16 fc;
1688 u16 rate_mask; 1678 u16 rate_mask;
1689 u8 update_lq = 0; 1679 u8 update_lq = 0;
1690 struct iwl_scale_tbl_info *tbl, *tbl1; 1680 struct iwl_scale_tbl_info *tbl, *tbl1;
@@ -1699,13 +1689,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
1699 1689
1700 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n"); 1690 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n");
1701 1691
1702 fc = hdr->frame_control; 1692 /* Send management frames and broadcast/multicast data using
1703 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) { 1693 * lowest rate. */
1704 /* Send management frames and broadcast/multicast data using 1694 /* TODO: this could probably be improved.. */
1705 * lowest rate. */ 1695 if (!ieee80211_is_data(hdr->frame_control) ||
1706 /* TODO: this could probably be improved.. */ 1696 is_multicast_ether_addr(hdr->addr1))
1707 return; 1697 return;
1708 }
1709 1698
1710 if (!sta || !lq_sta) 1699 if (!sta || !lq_sta)
1711 return; 1700 return;
@@ -2003,9 +1992,8 @@ lq_update:
2003 * stay with best antenna legacy modulation for a while 1992 * stay with best antenna legacy modulation for a while
2004 * before next round of mode comparisons. */ 1993 * before next round of mode comparisons. */
2005 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); 1994 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2006 if (is_legacy(tbl1->lq_type) && 1995 if (is_legacy(tbl1->lq_type) && !conf->ht.enabled &&
2007 (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)) && 1996 lq_sta->action_counter >= 1) {
2008 (lq_sta->action_counter >= 1)) {
2009 lq_sta->action_counter = 0; 1997 lq_sta->action_counter = 0;
2010 IWL_DEBUG_RATE("LQ: STAY in legacy table\n"); 1998 IWL_DEBUG_RATE("LQ: STAY in legacy table\n");
2011 rs_set_stay_in_table(priv, 1, lq_sta); 1999 rs_set_stay_in_table(priv, 1, lq_sta);
@@ -2081,15 +2069,13 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2081 if ((i < 0) || (i >= IWL_RATE_COUNT)) 2069 if ((i < 0) || (i >= IWL_RATE_COUNT))
2082 i = 0; 2070 i = 0;
2083 2071
2084 /* FIXME:RS: This is also wrong in 4965 */
2085 rate = iwl_rates[i].plcp; 2072 rate = iwl_rates[i].plcp;
2086 rate |= RATE_MCS_ANT_B_MSK; 2073 tbl->ant_type = first_antenna(valid_tx_ant);
2087 rate &= ~RATE_MCS_ANT_A_MSK; 2074 rate |= tbl->ant_type << RATE_MCS_ANT_POS;
2088 2075
2089 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) 2076 if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
2090 rate |= RATE_MCS_CCK_MSK; 2077 rate |= RATE_MCS_CCK_MSK;
2091 2078
2092 tbl->ant_type = ANT_B;
2093 rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx); 2079 rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
2094 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) 2080 if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
2095 rs_toggle_antenna(valid_tx_ant, &rate, tbl); 2081 rs_toggle_antenna(valid_tx_ant, &rate, tbl);
@@ -2103,40 +2089,38 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2103 return; 2089 return;
2104} 2090}
2105 2091
2106static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband, 2092static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2107 struct ieee80211_sta *sta, void *priv_sta, 2093 struct ieee80211_tx_rate_control *txrc)
2108 struct sk_buff *skb, struct rate_selection *sel)
2109{ 2094{
2110 2095
2111 int i; 2096 struct sk_buff *skb = txrc->skb;
2097 struct ieee80211_supported_band *sband = txrc->sband;
2112 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 2098 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
2113 struct ieee80211_conf *conf = &priv->hw->conf; 2099 struct ieee80211_conf *conf = &priv->hw->conf;
2114 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2100 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2115 __le16 fc; 2101 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2116 struct iwl_lq_sta *lq_sta; 2102 struct iwl_lq_sta *lq_sta = priv_sta;
2103 int rate_idx;
2117 2104
2118 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); 2105 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n");
2119 2106
2120 /* Send management frames and broadcast/multicast data using lowest 2107 /* Send management frames and broadcast/multicast data using lowest
2121 * rate. */ 2108 * rate. */
2122 fc = hdr->frame_control; 2109 if (!ieee80211_is_data(hdr->frame_control) ||
2123 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) || 2110 is_multicast_ether_addr(hdr->addr1) || !sta || !lq_sta) {
2124 !sta || !priv_sta) { 2111 info->control.rates[0].idx = rate_lowest_index(sband, sta);
2125 sel->rate_idx = rate_lowest_index(sband, sta);
2126 return; 2112 return;
2127 } 2113 }
2128 2114
2129 lq_sta = (struct iwl_lq_sta *)priv_sta; 2115 rate_idx = lq_sta->last_txrate_idx;
2130 i = lq_sta->last_txrate_idx;
2131 2116
2132 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) && 2117 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
2133 !lq_sta->ibss_sta_added) { 2118 !lq_sta->ibss_sta_added) {
2134 u8 sta_id = iwl_find_station(priv, hdr->addr1); 2119 u8 sta_id = iwl_find_station(priv, hdr->addr1);
2135 DECLARE_MAC_BUF(mac);
2136 2120
2137 if (sta_id == IWL_INVALID_STATION) { 2121 if (sta_id == IWL_INVALID_STATION) {
2138 IWL_DEBUG_RATE("LQ: ADD station %s\n", 2122 IWL_DEBUG_RATE("LQ: ADD station %pM\n",
2139 print_mac(mac, hdr->addr1)); 2123 hdr->addr1);
2140 sta_id = iwl_add_station_flags(priv, hdr->addr1, 2124 sta_id = iwl_add_station_flags(priv, hdr->addr1,
2141 0, CMD_ASYNC, NULL); 2125 0, CMD_ASYNC, NULL);
2142 } 2126 }
@@ -2148,14 +2132,12 @@ static void rs_get_rate(void *priv_r, struct ieee80211_supported_band *sband,
2148 } 2132 }
2149 } 2133 }
2150 2134
2151 if ((i < 0) || (i > IWL_RATE_COUNT)) { 2135 if (rate_idx < 0 || rate_idx > IWL_RATE_COUNT)
2152 sel->rate_idx = rate_lowest_index(sband, sta); 2136 rate_idx = rate_lowest_index(sband, sta);
2153 return; 2137 else if (sband->band == IEEE80211_BAND_5GHZ)
2154 } 2138 rate_idx -= IWL_FIRST_OFDM_RATE;
2155 2139
2156 if (sband->band == IEEE80211_BAND_5GHZ) 2140 info->control.rates[0].idx = rate_idx;
2157 i -= IWL_FIRST_OFDM_RATE;
2158 sel->rate_idx = i;
2159} 2141}
2160 2142
2161static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, 2143static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
@@ -2189,6 +2171,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2189 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 2171 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
2190 struct ieee80211_conf *conf = &priv->hw->conf; 2172 struct ieee80211_conf *conf = &priv->hw->conf;
2191 struct iwl_lq_sta *lq_sta = priv_sta; 2173 struct iwl_lq_sta *lq_sta = priv_sta;
2174 u16 mask_bit = 0;
2192 2175
2193 lq_sta->flush_timer = 0; 2176 lq_sta->flush_timer = 0;
2194 lq_sta->supp_rates = sta->supp_rates[sband->band]; 2177 lq_sta->supp_rates = sta->supp_rates[sband->band];
@@ -2205,15 +2188,12 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2205 lq_sta->ibss_sta_added = 0; 2188 lq_sta->ibss_sta_added = 0;
2206 if (priv->iw_mode == NL80211_IFTYPE_AP) { 2189 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2207 u8 sta_id = iwl_find_station(priv, sta->addr); 2190 u8 sta_id = iwl_find_station(priv, sta->addr);
2208 DECLARE_MAC_BUF(mac);
2209 2191
2210 /* for IBSS the call are from tasklet */ 2192 /* for IBSS the call are from tasklet */
2211 IWL_DEBUG_RATE("LQ: ADD station %s\n", 2193 IWL_DEBUG_RATE("LQ: ADD station %pM\n", sta->addr);
2212 print_mac(mac, sta->addr));
2213 2194
2214 if (sta_id == IWL_INVALID_STATION) { 2195 if (sta_id == IWL_INVALID_STATION) {
2215 IWL_DEBUG_RATE("LQ: ADD station %s\n", 2196 IWL_DEBUG_RATE("LQ: ADD station %pM\n", sta->addr);
2216 print_mac(mac, sta->addr));
2217 sta_id = iwl_add_station_flags(priv, sta->addr, 2197 sta_id = iwl_add_station_flags(priv, sta->addr,
2218 0, CMD_ASYNC, NULL); 2198 0, CMD_ASYNC, NULL);
2219 } 2199 }
@@ -2225,16 +2205,6 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2225 priv->assoc_station_added = 1; 2205 priv->assoc_station_added = 1;
2226 } 2206 }
2227 2207
2228 /* Find highest tx rate supported by hardware and destination station */
2229 lq_sta->last_txrate_idx = 3;
2230 for (i = 0; i < sband->n_bitrates; i++)
2231 if (sta->supp_rates[sband->band] & BIT(i))
2232 lq_sta->last_txrate_idx = i;
2233
2234 /* For MODE_IEEE80211A, skip over cck rates in global rate table */
2235 if (sband->band == IEEE80211_BAND_5GHZ)
2236 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2237
2238 lq_sta->is_dup = 0; 2208 lq_sta->is_dup = 0;
2239 lq_sta->is_green = rs_use_green(priv, conf); 2209 lq_sta->is_green = rs_use_green(priv, conf);
2240 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); 2210 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
@@ -2244,19 +2214,19 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2244 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), 2214 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2245 * supp_rates[] does not; shift to convert format, force 9 MBits off. 2215 * supp_rates[] does not; shift to convert format, force 9 MBits off.
2246 */ 2216 */
2247 lq_sta->active_siso_rate = conf->ht_conf.supp_mcs_set[0] << 1; 2217 lq_sta->active_siso_rate = sta->ht_cap.mcs.rx_mask[0] << 1;
2248 lq_sta->active_siso_rate |= conf->ht_conf.supp_mcs_set[0] & 0x1; 2218 lq_sta->active_siso_rate |= sta->ht_cap.mcs.rx_mask[0] & 0x1;
2249 lq_sta->active_siso_rate &= ~((u16)0x2); 2219 lq_sta->active_siso_rate &= ~((u16)0x2);
2250 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; 2220 lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
2251 2221
2252 /* Same here */ 2222 /* Same here */
2253 lq_sta->active_mimo2_rate = conf->ht_conf.supp_mcs_set[1] << 1; 2223 lq_sta->active_mimo2_rate = sta->ht_cap.mcs.rx_mask[1] << 1;
2254 lq_sta->active_mimo2_rate |= conf->ht_conf.supp_mcs_set[1] & 0x1; 2224 lq_sta->active_mimo2_rate |= sta->ht_cap.mcs.rx_mask[1] & 0x1;
2255 lq_sta->active_mimo2_rate &= ~((u16)0x2); 2225 lq_sta->active_mimo2_rate &= ~((u16)0x2);
2256 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; 2226 lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
2257 2227
2258 lq_sta->active_mimo3_rate = conf->ht_conf.supp_mcs_set[2] << 1; 2228 lq_sta->active_mimo3_rate = sta->ht_cap.mcs.rx_mask[2] << 1;
2259 lq_sta->active_mimo3_rate |= conf->ht_conf.supp_mcs_set[2] & 0x1; 2229 lq_sta->active_mimo3_rate |= sta->ht_cap.mcs.rx_mask[2] & 0x1;
2260 lq_sta->active_mimo3_rate &= ~((u16)0x2); 2230 lq_sta->active_mimo3_rate &= ~((u16)0x2);
2261 lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE; 2231 lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
2262 2232
@@ -2265,7 +2235,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2265 lq_sta->active_mimo2_rate, 2235 lq_sta->active_mimo2_rate,
2266 lq_sta->active_mimo3_rate); 2236 lq_sta->active_mimo3_rate);
2267 2237
2268 /* These values will be overriden later */ 2238 /* These values will be overridden later */
2269 lq_sta->lq.general_params.single_stream_ant_msk = ANT_A; 2239 lq_sta->lq.general_params.single_stream_ant_msk = ANT_A;
2270 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; 2240 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2271 2241
@@ -2273,6 +2243,17 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2273 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; 2243 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2274 lq_sta->drv = priv; 2244 lq_sta->drv = priv;
2275 2245
2246 /* Find highest tx rate supported by hardware and destination station */
2247 mask_bit = sta->supp_rates[sband->band] & lq_sta->active_legacy_rate;
2248 lq_sta->last_txrate_idx = 3;
2249 for (i = 0; i < sband->n_bitrates; i++)
2250 if (mask_bit & BIT(i))
2251 lq_sta->last_txrate_idx = i;
2252
2253 /* For MODE_IEEE80211A, skip over cck rates in global rate table */
2254 if (sband->band == IEEE80211_BAND_5GHZ)
2255 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2256
2276 rs_initialize_lq(priv, conf, sta, lq_sta); 2257 rs_initialize_lq(priv, conf, sta, lq_sta);
2277} 2258}
2278 2259
@@ -2405,19 +2386,6 @@ static void rs_free(void *priv_rate)
2405 return; 2386 return;
2406} 2387}
2407 2388
2408static void rs_clear(void *priv_rate)
2409{
2410#ifdef CONFIG_IWLWIFI_DEBUG
2411 struct iwl_priv *priv = (struct iwl_priv *) priv_rate;
2412
2413 IWL_DEBUG_RATE("enter\n");
2414
2415 /* TODO - add rate scale state reset */
2416
2417 IWL_DEBUG_RATE("leave\n");
2418#endif /* CONFIG_IWLWIFI_DEBUG */
2419}
2420
2421static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta, 2389static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2422 void *priv_sta) 2390 void *priv_sta)
2423{ 2391{
@@ -2552,7 +2520,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
2552 for (i = 0; i < LQ_SIZE; i++) { 2520 for (i = 0; i < LQ_SIZE; i++) {
2553 desc += sprintf(buff+desc, "%s type=%d SGI=%d FAT=%d DUP=%d\n" 2521 desc += sprintf(buff+desc, "%s type=%d SGI=%d FAT=%d DUP=%d\n"
2554 "rate=0x%X\n", 2522 "rate=0x%X\n",
2555 lq_sta->active_tbl == i?"*":"x", 2523 lq_sta->active_tbl == i ? "*" : "x",
2556 lq_sta->lq_info[i].lq_type, 2524 lq_sta->lq_info[i].lq_type,
2557 lq_sta->lq_info[i].is_SGI, 2525 lq_sta->lq_info[i].is_SGI,
2558 lq_sta->lq_info[i].is_fat, 2526 lq_sta->lq_info[i].is_fat,
@@ -2605,7 +2573,6 @@ static struct rate_control_ops rs_ops = {
2605 .tx_status = rs_tx_status, 2573 .tx_status = rs_tx_status,
2606 .get_rate = rs_get_rate, 2574 .get_rate = rs_get_rate,
2607 .rate_init = rs_rate_init, 2575 .rate_init = rs_rate_init,
2608 .clear = rs_clear,
2609 .alloc = rs_alloc, 2576 .alloc = rs_alloc,
2610 .free = rs_free, 2577 .free = rs_free,
2611 .alloc_sta = rs_alloc_sta, 2578 .alloc_sta = rs_alloc_sta,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index d148d73635eb..78ee83adf742 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
@@ -229,7 +229,7 @@ enum {
229#define IWL_MIMO2_SWITCH_SISO_C 4 229#define IWL_MIMO2_SWITCH_SISO_C 4
230#define IWL_MIMO2_SWITCH_GI 5 230#define IWL_MIMO2_SWITCH_GI 5
231 231
232/*FIXME:RS:add posible acctions for MIMO3*/ 232/*FIXME:RS:add possible actions for MIMO3*/
233 233
234#define IWL_ACTION_LIMIT 3 /* # possible actions */ 234#define IWL_ACTION_LIMIT 3 /* # possible actions */
235 235
@@ -284,7 +284,17 @@ static inline u8 num_of_ant(u8 mask)
284 !!((mask) & ANT_C); 284 !!((mask) & ANT_C);
285} 285}
286 286
287static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index) 287static inline u8 first_antenna(u8 mask)
288{
289 if (mask & ANT_A)
290 return ANT_A;
291 if (mask & ANT_B)
292 return ANT_B;
293 return ANT_C;
294}
295
296
297static inline u8 iwl_get_prev_ieee_rate(u8 rate_index)
288{ 298{
289 u8 rate = iwl_rates[rate_index].prev_ieee; 299 u8 rate = iwl_rates[rate_index].prev_ieee;
290 300
@@ -294,11 +304,11 @@ static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index)
294} 304}
295 305
296/** 306/**
297 * iwl4965_rate_control_register - Register the rate control algorithm callbacks 307 * iwl_rate_control_register - Register the rate control algorithm callbacks
298 * 308 *
299 * Since the rate control algorithm is hardware specific, there is no need 309 * Since the rate control algorithm is hardware specific, there is no need
300 * or reason to place it as a stand alone module. The driver can call 310 * or reason to place it as a stand alone module. The driver can call
301 * iwl4965_rate_control_register in order to register the rate control callbacks 311 * iwl_rate_control_register in order to register the rate control callbacks
302 * with the mac80211 subsystem. This should be performed prior to calling 312 * with the mac80211 subsystem. This should be performed prior to calling
303 * ieee80211_register_hw 313 * ieee80211_register_hw
304 * 314 *
@@ -306,7 +316,7 @@ static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index)
306extern int iwlagn_rate_control_register(void); 316extern int iwlagn_rate_control_register(void);
307 317
308/** 318/**
309 * iwl4965_rate_control_unregister - Unregister the rate control callbacks 319 * iwl_rate_control_unregister - Unregister the rate control callbacks
310 * 320 *
311 * This should be called after calling ieee80211_unregister_hw, but before 321 * This should be called after calling ieee80211_unregister_hw, but before
312 * the driver is unloaded. 322 * the driver is unloaded.
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index c4c0371c763b..5da6b35cd26d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -22,7 +22,7 @@
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
@@ -83,7 +83,7 @@
83 83
84MODULE_DESCRIPTION(DRV_DESCRIPTION); 84MODULE_DESCRIPTION(DRV_DESCRIPTION);
85MODULE_VERSION(DRV_VERSION); 85MODULE_VERSION(DRV_VERSION);
86MODULE_AUTHOR(DRV_COPYRIGHT); 86MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
87MODULE_LICENSE("GPL"); 87MODULE_LICENSE("GPL");
88MODULE_ALIAS("iwl4965"); 88MODULE_ALIAS("iwl4965");
89 89
@@ -96,7 +96,7 @@ MODULE_ALIAS("iwl4965");
96 96
97 97
98 98
99static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) 99static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
100{ 100{
101 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 101 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
102 102
@@ -108,79 +108,6 @@ static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
108} 108}
109 109
110/** 110/**
111 * iwl4965_check_rxon_cmd - validate RXON structure is valid
112 *
113 * NOTE: This is really only useful during development and can eventually
114 * be #ifdef'd out once the driver is stable and folks aren't actively
115 * making changes
116 */
117static int iwl4965_check_rxon_cmd(struct iwl_rxon_cmd *rxon)
118{
119 int error = 0;
120 int counter = 1;
121
122 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
123 error |= le32_to_cpu(rxon->flags &
124 (RXON_FLG_TGJ_NARROW_BAND_MSK |
125 RXON_FLG_RADAR_DETECT_MSK));
126 if (error)
127 IWL_WARNING("check 24G fields %d | %d\n",
128 counter++, error);
129 } else {
130 error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ?
131 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK);
132 if (error)
133 IWL_WARNING("check 52 fields %d | %d\n",
134 counter++, error);
135 error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK);
136 if (error)
137 IWL_WARNING("check 52 CCK %d | %d\n",
138 counter++, error);
139 }
140 error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1;
141 if (error)
142 IWL_WARNING("check mac addr %d | %d\n", counter++, error);
143
144 /* make sure basic rates 6Mbps and 1Mbps are supported */
145 error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) &&
146 ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0));
147 if (error)
148 IWL_WARNING("check basic rate %d | %d\n", counter++, error);
149
150 error |= (le16_to_cpu(rxon->assoc_id) > 2007);
151 if (error)
152 IWL_WARNING("check assoc id %d | %d\n", counter++, error);
153
154 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
155 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK));
156 if (error)
157 IWL_WARNING("check CCK and short slot %d | %d\n",
158 counter++, error);
159
160 error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
161 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK));
162 if (error)
163 IWL_WARNING("check CCK & auto detect %d | %d\n",
164 counter++, error);
165
166 error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
167 RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK);
168 if (error)
169 IWL_WARNING("check TGG and auto detect %d | %d\n",
170 counter++, error);
171
172 if (error)
173 IWL_WARNING("Tuning to channel %d\n",
174 le16_to_cpu(rxon->channel));
175
176 if (error) {
177 IWL_ERROR("Not a valid iwl4965_rxon_assoc_cmd field values\n");
178 return -1;
179 }
180 return 0;
181}
182
183/**
184 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed 111 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
185 * @priv: staging_rxon is compared to active_rxon 112 * @priv: staging_rxon is compared to active_rxon
186 * 113 *
@@ -228,18 +155,17 @@ static int iwl_full_rxon_required(struct iwl_priv *priv)
228} 155}
229 156
230/** 157/**
231 * iwl4965_commit_rxon - commit staging_rxon to hardware 158 * iwl_commit_rxon - commit staging_rxon to hardware
232 * 159 *
233 * The RXON command in staging_rxon is committed to the hardware and 160 * The RXON command in staging_rxon is committed to the hardware and
234 * the active_rxon structure is updated with the new data. This 161 * the active_rxon structure is updated with the new data. This
235 * function correctly transitions out of the RXON_ASSOC_MSK state if 162 * function correctly transitions out of the RXON_ASSOC_MSK state if
236 * a HW tune is required based on the RXON structure changes. 163 * a HW tune is required based on the RXON structure changes.
237 */ 164 */
238static int iwl4965_commit_rxon(struct iwl_priv *priv) 165static int iwl_commit_rxon(struct iwl_priv *priv)
239{ 166{
240 /* cast away the const for active_rxon in this function */ 167 /* cast away the const for active_rxon in this function */
241 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 168 struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
242 DECLARE_MAC_BUF(mac);
243 int ret; 169 int ret;
244 bool new_assoc = 170 bool new_assoc =
245 !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK); 171 !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK);
@@ -253,14 +179,14 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
253 * 5000, but will not damage 4965 */ 179 * 5000, but will not damage 4965 */
254 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN; 180 priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
255 181
256 ret = iwl4965_check_rxon_cmd(&priv->staging_rxon); 182 ret = iwl_agn_check_rxon_cmd(&priv->staging_rxon);
257 if (ret) { 183 if (ret) {
258 IWL_ERROR("Invalid RXON configuration. Not committing.\n"); 184 IWL_ERROR("Invalid RXON configuration. Not committing.\n");
259 return -EINVAL; 185 return -EINVAL;
260 } 186 }
261 187
262 /* If we don't need to send a full RXON, we can use 188 /* If we don't need to send a full RXON, we can use
263 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter 189 * iwl_rxon_assoc_cmd which is used to reconfigure filter
264 * and other flags for the current radio configuration. */ 190 * and other flags for the current radio configuration. */
265 if (!iwl_full_rxon_required(priv)) { 191 if (!iwl_full_rxon_required(priv)) {
266 ret = iwl_send_rxon_assoc(priv); 192 ret = iwl_send_rxon_assoc(priv);
@@ -300,12 +226,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
300 IWL_DEBUG_INFO("Sending RXON\n" 226 IWL_DEBUG_INFO("Sending RXON\n"
301 "* with%s RXON_FILTER_ASSOC_MSK\n" 227 "* with%s RXON_FILTER_ASSOC_MSK\n"
302 "* channel = %d\n" 228 "* channel = %d\n"
303 "* bssid = %s\n", 229 "* bssid = %pM\n",
304 (new_assoc ? "" : "out"), 230 (new_assoc ? "" : "out"),
305 le16_to_cpu(priv->staging_rxon.channel), 231 le16_to_cpu(priv->staging_rxon.channel),
306 print_mac(mac, priv->staging_rxon.bssid_addr)); 232 priv->staging_rxon.bssid_addr);
307 233
308 iwl4965_set_rxon_hwcrypto(priv, !priv->hw_params.sw_crypto); 234 iwl_set_rxon_hwcrypto(priv, !priv->hw_params.sw_crypto);
309 235
310 /* Apply the new configuration 236 /* Apply the new configuration
311 * RXON unassoc clears the station table in uCode, send it before 237 * RXON unassoc clears the station table in uCode, send it before
@@ -375,16 +301,16 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv)
375 return 0; 301 return 0;
376} 302}
377 303
378void iwl4965_update_chain_flags(struct iwl_priv *priv) 304void iwl_update_chain_flags(struct iwl_priv *priv)
379{ 305{
380 306
381 iwl_set_rxon_chain(priv); 307 iwl_set_rxon_chain(priv);
382 iwl4965_commit_rxon(priv); 308 iwl_commit_rxon(priv);
383} 309}
384 310
385static int iwl4965_send_bt_config(struct iwl_priv *priv) 311static int iwl_send_bt_config(struct iwl_priv *priv)
386{ 312{
387 struct iwl4965_bt_cmd bt_cmd = { 313 struct iwl_bt_cmd bt_cmd = {
388 .flags = 3, 314 .flags = 3,
389 .lead_time = 0xAA, 315 .lead_time = 0xAA,
390 .max_kill = 1, 316 .max_kill = 1,
@@ -393,7 +319,7 @@ static int iwl4965_send_bt_config(struct iwl_priv *priv)
393 }; 319 };
394 320
395 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, 321 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
396 sizeof(struct iwl4965_bt_cmd), &bt_cmd); 322 sizeof(struct iwl_bt_cmd), &bt_cmd);
397} 323}
398 324
399static void iwl_clear_free_frames(struct iwl_priv *priv) 325static void iwl_clear_free_frames(struct iwl_priv *priv)
@@ -445,7 +371,7 @@ static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
445 371
446static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv, 372static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
447 struct ieee80211_hdr *hdr, 373 struct ieee80211_hdr *hdr,
448 const u8 *dest, int left) 374 int left)
449{ 375{
450 if (!iwl_is_associated(priv) || !priv->ibss_beacon || 376 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
451 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) && 377 ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
@@ -460,16 +386,16 @@ static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
460 return priv->ibss_beacon->len; 386 return priv->ibss_beacon->len;
461} 387}
462 388
463static u8 iwl4965_rate_get_lowest_plcp(struct iwl_priv *priv) 389static u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
464{ 390{
465 int i; 391 int i;
466 int rate_mask; 392 int rate_mask;
467 393
468 /* Set rate mask*/ 394 /* Set rate mask*/
469 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) 395 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
470 rate_mask = priv->active_rate_basic & 0xF; 396 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
471 else 397 else
472 rate_mask = priv->active_rate_basic & 0xFF0; 398 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
473 399
474 /* Find lowest valid rate */ 400 /* Find lowest valid rate */
475 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; 401 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
@@ -485,7 +411,7 @@ static u8 iwl4965_rate_get_lowest_plcp(struct iwl_priv *priv)
485 return IWL_RATE_6M_PLCP; 411 return IWL_RATE_6M_PLCP;
486} 412}
487 413
488static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, 414static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
489 struct iwl_frame *frame, u8 rate) 415 struct iwl_frame *frame, u8 rate)
490{ 416{
491 struct iwl_tx_beacon_cmd *tx_beacon_cmd; 417 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
@@ -498,7 +424,6 @@ static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
498 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 424 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
499 425
500 frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame, 426 frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame,
501 iwl_bcast_addr,
502 sizeof(frame->u) - sizeof(*tx_beacon_cmd)); 427 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
503 428
504 BUG_ON(frame_size > MAX_MPDU_SIZE); 429 BUG_ON(frame_size > MAX_MPDU_SIZE);
@@ -517,7 +442,7 @@ static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
517 442
518 return sizeof(*tx_beacon_cmd) + frame_size; 443 return sizeof(*tx_beacon_cmd) + frame_size;
519} 444}
520static int iwl4965_send_beacon_cmd(struct iwl_priv *priv) 445static int iwl_send_beacon_cmd(struct iwl_priv *priv)
521{ 446{
522 struct iwl_frame *frame; 447 struct iwl_frame *frame;
523 unsigned int frame_size; 448 unsigned int frame_size;
@@ -532,9 +457,9 @@ static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
532 return -ENOMEM; 457 return -ENOMEM;
533 } 458 }
534 459
535 rate = iwl4965_rate_get_lowest_plcp(priv); 460 rate = iwl_rate_get_lowest_plcp(priv);
536 461
537 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate); 462 frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate);
538 463
539 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 464 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
540 &frame->u.cmd[0]); 465 &frame->u.cmd[0]);
@@ -550,20 +475,33 @@ static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
550 * 475 *
551 ******************************************************************************/ 476 ******************************************************************************/
552 477
553static void iwl4965_ht_conf(struct iwl_priv *priv, 478static void iwl_ht_conf(struct iwl_priv *priv,
554 struct ieee80211_bss_conf *bss_conf) 479 struct ieee80211_bss_conf *bss_conf)
555{ 480{
556 struct ieee80211_ht_info *ht_conf = bss_conf->ht_conf; 481 struct ieee80211_sta_ht_cap *ht_conf;
557 struct ieee80211_ht_bss_info *ht_bss_conf = bss_conf->ht_bss_conf;
558 struct iwl_ht_info *iwl_conf = &priv->current_ht_config; 482 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
483 struct ieee80211_sta *sta;
559 484
560 IWL_DEBUG_MAC80211("enter: \n"); 485 IWL_DEBUG_MAC80211("enter: \n");
561 486
562 iwl_conf->is_ht = bss_conf->assoc_ht;
563
564 if (!iwl_conf->is_ht) 487 if (!iwl_conf->is_ht)
565 return; 488 return;
566 489
490
491 /*
492 * It is totally wrong to base global information on something
493 * that is valid only when associated, alas, this driver works
494 * that way and I don't know how to fix it.
495 */
496
497 rcu_read_lock();
498 sta = ieee80211_find_sta(priv->hw, priv->bssid);
499 if (!sta) {
500 rcu_read_unlock();
501 return;
502 }
503 ht_conf = &sta->ht_cap;
504
567 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20) 505 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
568 iwl_conf->sgf |= HT_SHORT_GI_20MHZ; 506 iwl_conf->sgf |= HT_SHORT_GI_20MHZ;
569 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40) 507 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
@@ -574,29 +512,36 @@ static void iwl4965_ht_conf(struct iwl_priv *priv,
574 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU); 512 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
575 513
576 iwl_conf->supported_chan_width = 514 iwl_conf->supported_chan_width =
577 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH); 515 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
578 iwl_conf->extension_chan_offset = 516
579 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET; 517 /*
518 * XXX: The HT configuration needs to be moved into iwl_mac_config()
519 * to be done there correctly.
520 */
521
522 iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
523 if (priv->hw->conf.ht.channel_type == NL80211_CHAN_HT40MINUS)
524 iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
525 else if(priv->hw->conf.ht.channel_type == NL80211_CHAN_HT40PLUS)
526 iwl_conf->extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
527
580 /* If no above or below channel supplied disable FAT channel */ 528 /* If no above or below channel supplied disable FAT channel */
581 if (iwl_conf->extension_chan_offset != IEEE80211_HT_IE_CHA_SEC_ABOVE && 529 if (iwl_conf->extension_chan_offset != IEEE80211_HT_PARAM_CHA_SEC_ABOVE &&
582 iwl_conf->extension_chan_offset != IEEE80211_HT_IE_CHA_SEC_BELOW) { 530 iwl_conf->extension_chan_offset != IEEE80211_HT_PARAM_CHA_SEC_BELOW)
583 iwl_conf->extension_chan_offset = IEEE80211_HT_IE_CHA_SEC_NONE;
584 iwl_conf->supported_chan_width = 0; 531 iwl_conf->supported_chan_width = 0;
585 }
586 532
587 iwl_conf->sm_ps = (u8)((ht_conf->cap & IEEE80211_HT_CAP_SM_PS) >> 2); 533 iwl_conf->sm_ps = (u8)((ht_conf->cap & IEEE80211_HT_CAP_SM_PS) >> 2);
588 534
589 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16); 535 memcpy(&iwl_conf->mcs, &ht_conf->mcs, 16);
590 536
591 iwl_conf->control_channel = ht_bss_conf->primary_channel; 537 iwl_conf->tx_chan_width = iwl_conf->supported_chan_width != 0;
592 iwl_conf->tx_chan_width =
593 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
594 iwl_conf->ht_protection = 538 iwl_conf->ht_protection =
595 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION; 539 bss_conf->ht.operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
596 iwl_conf->non_GF_STA_present = 540 iwl_conf->non_GF_STA_present =
597 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT); 541 !!(bss_conf->ht.operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
542
543 rcu_read_unlock();
598 544
599 IWL_DEBUG_MAC80211("control channel %d\n", iwl_conf->control_channel);
600 IWL_DEBUG_MAC80211("leave\n"); 545 IWL_DEBUG_MAC80211("leave\n");
601} 546}
602 547
@@ -608,9 +553,6 @@ static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
608 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 553 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
609 return; 554 return;
610 555
611 if (!priv->qos_data.qos_enable)
612 return;
613
614 priv->qos_data.def_qos_parm.qos_flags = 0; 556 priv->qos_data.def_qos_parm.qos_flags = 0;
615 557
616 if (priv->qos_data.qos_cap.q_AP.queue_request && 558 if (priv->qos_data.qos_cap.q_AP.queue_request &&
@@ -637,23 +579,22 @@ static void iwl_activate_qos(struct iwl_priv *priv, u8 force)
637 579
638#define MAX_UCODE_BEACON_INTERVAL 4096 580#define MAX_UCODE_BEACON_INTERVAL 4096
639 581
640static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val) 582static u16 iwl_adjust_beacon_interval(u16 beacon_val)
641{ 583{
642 u16 new_val = 0; 584 u16 new_val = 0;
643 u16 beacon_factor = 0; 585 u16 beacon_factor = 0;
644 586
645 beacon_factor = 587 beacon_factor = (beacon_val + MAX_UCODE_BEACON_INTERVAL)
646 (beacon_val + MAX_UCODE_BEACON_INTERVAL) 588 / MAX_UCODE_BEACON_INTERVAL;
647 / MAX_UCODE_BEACON_INTERVAL;
648 new_val = beacon_val / beacon_factor; 589 new_val = beacon_val / beacon_factor;
649 590
650 return cpu_to_le16(new_val); 591 return new_val;
651} 592}
652 593
653static void iwl4965_setup_rxon_timing(struct iwl_priv *priv) 594static void iwl_setup_rxon_timing(struct iwl_priv *priv)
654{ 595{
655 u64 interval_tm_unit; 596 u64 tsf;
656 u64 tsf, result; 597 s32 interval_tm, rem;
657 unsigned long flags; 598 unsigned long flags;
658 struct ieee80211_conf *conf = NULL; 599 struct ieee80211_conf *conf = NULL;
659 u16 beacon_int = 0; 600 u16 beacon_int = 0;
@@ -661,49 +602,32 @@ static void iwl4965_setup_rxon_timing(struct iwl_priv *priv)
661 conf = ieee80211_get_hw_conf(priv->hw); 602 conf = ieee80211_get_hw_conf(priv->hw);
662 603
663 spin_lock_irqsave(&priv->lock, flags); 604 spin_lock_irqsave(&priv->lock, flags);
664 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp >> 32); 605 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
665 priv->rxon_timing.timestamp.dw[0] =
666 cpu_to_le32(priv->timestamp & 0xFFFFFFFF);
667
668 priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval); 606 priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
669 607
670 tsf = priv->timestamp;
671
672 beacon_int = priv->beacon_int;
673 spin_unlock_irqrestore(&priv->lock, flags);
674
675 if (priv->iw_mode == NL80211_IFTYPE_STATION) { 608 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
676 if (beacon_int == 0) { 609 beacon_int = iwl_adjust_beacon_interval(priv->beacon_int);
677 priv->rxon_timing.beacon_interval = cpu_to_le16(100);
678 priv->rxon_timing.beacon_init_val = cpu_to_le32(102400);
679 } else {
680 priv->rxon_timing.beacon_interval =
681 cpu_to_le16(beacon_int);
682 priv->rxon_timing.beacon_interval =
683 iwl4965_adjust_beacon_interval(
684 le16_to_cpu(priv->rxon_timing.beacon_interval));
685 }
686
687 priv->rxon_timing.atim_window = 0; 610 priv->rxon_timing.atim_window = 0;
688 } else { 611 } else {
689 priv->rxon_timing.beacon_interval = 612 beacon_int = iwl_adjust_beacon_interval(conf->beacon_int);
690 iwl4965_adjust_beacon_interval(conf->beacon_int); 613
691 /* TODO: we need to get atim_window from upper stack 614 /* TODO: we need to get atim_window from upper stack
692 * for now we set to 0 */ 615 * for now we set to 0 */
693 priv->rxon_timing.atim_window = 0; 616 priv->rxon_timing.atim_window = 0;
694 } 617 }
695 618
696 interval_tm_unit = 619 priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int);
697 (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024);
698 result = do_div(tsf, interval_tm_unit);
699 priv->rxon_timing.beacon_init_val =
700 cpu_to_le32((u32) ((u64) interval_tm_unit - result));
701 620
702 IWL_DEBUG_ASSOC 621 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
703 ("beacon interval %d beacon timer %d beacon tim %d\n", 622 interval_tm = beacon_int * 1024;
704 le16_to_cpu(priv->rxon_timing.beacon_interval), 623 rem = do_div(tsf, interval_tm);
705 le32_to_cpu(priv->rxon_timing.beacon_init_val), 624 priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
706 le16_to_cpu(priv->rxon_timing.atim_window)); 625
626 spin_unlock_irqrestore(&priv->lock, flags);
627 IWL_DEBUG_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
628 le16_to_cpu(priv->rxon_timing.beacon_interval),
629 le32_to_cpu(priv->rxon_timing.beacon_init_val),
630 le16_to_cpu(priv->rxon_timing.atim_window));
707} 631}
708 632
709static void iwl_set_flags_for_band(struct iwl_priv *priv, 633static void iwl_set_flags_for_band(struct iwl_priv *priv,
@@ -715,7 +639,7 @@ static void iwl_set_flags_for_band(struct iwl_priv *priv,
715 | RXON_FLG_CCK_MSK); 639 | RXON_FLG_CCK_MSK);
716 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 640 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
717 } else { 641 } else {
718 /* Copied from iwl4965_post_associate() */ 642 /* Copied from iwl_post_associate() */
719 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) 643 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
720 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 644 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
721 else 645 else
@@ -733,13 +657,13 @@ static void iwl_set_flags_for_band(struct iwl_priv *priv,
733/* 657/*
734 * initialize rxon structure with default values from eeprom 658 * initialize rxon structure with default values from eeprom
735 */ 659 */
736static void iwl4965_connection_init_rx_config(struct iwl_priv *priv) 660static void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
737{ 661{
738 const struct iwl_channel_info *ch_info; 662 const struct iwl_channel_info *ch_info;
739 663
740 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); 664 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
741 665
742 switch (priv->iw_mode) { 666 switch (mode) {
743 case NL80211_IFTYPE_AP: 667 case NL80211_IFTYPE_AP:
744 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; 668 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
745 break; 669 break;
@@ -762,7 +686,7 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
762 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 686 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
763 break; 687 break;
764 default: 688 default:
765 IWL_ERROR("Unsupported interface type %d\n", priv->iw_mode); 689 IWL_ERROR("Unsupported interface type %d\n", mode);
766 break; 690 break;
767 } 691 }
768 692
@@ -808,11 +732,9 @@ static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
808 iwl_set_rxon_chain(priv); 732 iwl_set_rxon_chain(priv);
809} 733}
810 734
811static int iwl4965_set_mode(struct iwl_priv *priv, int mode) 735static int iwl_set_mode(struct iwl_priv *priv, int mode)
812{ 736{
813 priv->iw_mode = mode; 737 iwl_connection_init_rx_config(priv, mode);
814
815 iwl4965_connection_init_rx_config(priv);
816 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 738 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
817 739
818 iwl_clear_stations_table(priv); 740 iwl_clear_stations_table(priv);
@@ -828,12 +750,12 @@ static int iwl4965_set_mode(struct iwl_priv *priv, int mode)
828 return -EAGAIN; 750 return -EAGAIN;
829 } 751 }
830 752
831 iwl4965_commit_rxon(priv); 753 iwl_commit_rxon(priv);
832 754
833 return 0; 755 return 0;
834} 756}
835 757
836static void iwl4965_set_rate(struct iwl_priv *priv) 758static void iwl_set_rate(struct iwl_priv *priv)
837{ 759{
838 const struct ieee80211_supported_band *hw = NULL; 760 const struct ieee80211_supported_band *hw = NULL;
839 struct ieee80211_rate *rate; 761 struct ieee80211_rate *rate;
@@ -880,138 +802,6 @@ static void iwl4965_set_rate(struct iwl_priv *priv)
880 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 802 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
881} 803}
882 804
883#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
884
885#include "iwl-spectrum.h"
886
887#define BEACON_TIME_MASK_LOW 0x00FFFFFF
888#define BEACON_TIME_MASK_HIGH 0xFF000000
889#define TIME_UNIT 1024
890
891/*
892 * extended beacon time format
893 * time in usec will be changed into a 32-bit value in 8:24 format
894 * the high 1 byte is the beacon counts
895 * the lower 3 bytes is the time in usec within one beacon interval
896 */
897
898static u32 iwl4965_usecs_to_beacons(u32 usec, u32 beacon_interval)
899{
900 u32 quot;
901 u32 rem;
902 u32 interval = beacon_interval * 1024;
903
904 if (!interval || !usec)
905 return 0;
906
907 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
908 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
909
910 return (quot << 24) + rem;
911}
912
913/* base is usually what we get from ucode with each received frame,
914 * the same as HW timer counter counting down
915 */
916
917static __le32 iwl4965_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
918{
919 u32 base_low = base & BEACON_TIME_MASK_LOW;
920 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
921 u32 interval = beacon_interval * TIME_UNIT;
922 u32 res = (base & BEACON_TIME_MASK_HIGH) +
923 (addon & BEACON_TIME_MASK_HIGH);
924
925 if (base_low > addon_low)
926 res += base_low - addon_low;
927 else if (base_low < addon_low) {
928 res += interval + base_low - addon_low;
929 res += (1 << 24);
930 } else
931 res += (1 << 24);
932
933 return cpu_to_le32(res);
934}
935
936static int iwl4965_get_measurement(struct iwl_priv *priv,
937 struct ieee80211_measurement_params *params,
938 u8 type)
939{
940 struct iwl4965_spectrum_cmd spectrum;
941 struct iwl_rx_packet *res;
942 struct iwl_host_cmd cmd = {
943 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
944 .data = (void *)&spectrum,
945 .meta.flags = CMD_WANT_SKB,
946 };
947 u32 add_time = le64_to_cpu(params->start_time);
948 int rc;
949 int spectrum_resp_status;
950 int duration = le16_to_cpu(params->duration);
951
952 if (iwl_is_associated(priv))
953 add_time =
954 iwl4965_usecs_to_beacons(
955 le64_to_cpu(params->start_time) - priv->last_tsf,
956 le16_to_cpu(priv->rxon_timing.beacon_interval));
957
958 memset(&spectrum, 0, sizeof(spectrum));
959
960 spectrum.channel_count = cpu_to_le16(1);
961 spectrum.flags =
962 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
963 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
964 cmd.len = sizeof(spectrum);
965 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
966
967 if (iwl_is_associated(priv))
968 spectrum.start_time =
969 iwl4965_add_beacon_time(priv->last_beacon_time,
970 add_time,
971 le16_to_cpu(priv->rxon_timing.beacon_interval));
972 else
973 spectrum.start_time = 0;
974
975 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
976 spectrum.channels[0].channel = params->channel;
977 spectrum.channels[0].type = type;
978 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
979 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
980 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
981
982 rc = iwl_send_cmd_sync(priv, &cmd);
983 if (rc)
984 return rc;
985
986 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
987 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
988 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
989 rc = -EIO;
990 }
991
992 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
993 switch (spectrum_resp_status) {
994 case 0: /* Command will be handled */
995 if (res->u.spectrum.id != 0xff) {
996 IWL_DEBUG_INFO
997 ("Replaced existing measurement: %d\n",
998 res->u.spectrum.id);
999 priv->measurement_status &= ~MEASUREMENT_READY;
1000 }
1001 priv->measurement_status |= MEASUREMENT_ACTIVE;
1002 rc = 0;
1003 break;
1004
1005 case 1: /* Command will not be handled */
1006 rc = -EAGAIN;
1007 break;
1008 }
1009
1010 dev_kfree_skb_any(cmd.meta.u.skb);
1011
1012 return rc;
1013}
1014#endif
1015 805
1016/****************************************************************************** 806/******************************************************************************
1017 * 807 *
@@ -1054,7 +844,7 @@ static void iwl_rx_reply_alive(struct iwl_priv *priv,
1054 IWL_WARNING("uCode did not respond OK.\n"); 844 IWL_WARNING("uCode did not respond OK.\n");
1055} 845}
1056 846
1057static void iwl4965_rx_reply_error(struct iwl_priv *priv, 847static void iwl_rx_reply_error(struct iwl_priv *priv,
1058 struct iwl_rx_mem_buffer *rxb) 848 struct iwl_rx_mem_buffer *rxb)
1059{ 849{
1060 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 850 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
@@ -1070,47 +860,29 @@ static void iwl4965_rx_reply_error(struct iwl_priv *priv,
1070 860
1071#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 861#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
1072 862
1073static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 863static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1074{ 864{
1075 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 865 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1076 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon; 866 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
1077 struct iwl4965_csa_notification *csa = &(pkt->u.csa_notif); 867 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
1078 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n", 868 IWL_DEBUG_11H("CSA notif: channel %d, status %d\n",
1079 le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); 869 le16_to_cpu(csa->channel), le32_to_cpu(csa->status));
1080 rxon->channel = csa->channel; 870 rxon->channel = csa->channel;
1081 priv->staging_rxon.channel = csa->channel; 871 priv->staging_rxon.channel = csa->channel;
1082} 872}
1083 873
1084static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv, 874static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
1085 struct iwl_rx_mem_buffer *rxb)
1086{
1087#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
1088 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1089 struct iwl4965_spectrum_notification *report = &(pkt->u.spectrum_notif);
1090
1091 if (!report->state) {
1092 IWL_DEBUG(IWL_DL_11H,
1093 "Spectrum Measure Notification: Start\n");
1094 return;
1095 }
1096
1097 memcpy(&priv->measure_report, report, sizeof(*report));
1098 priv->measurement_status |= MEASUREMENT_READY;
1099#endif
1100}
1101
1102static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv,
1103 struct iwl_rx_mem_buffer *rxb) 875 struct iwl_rx_mem_buffer *rxb)
1104{ 876{
1105#ifdef CONFIG_IWLWIFI_DEBUG 877#ifdef CONFIG_IWLWIFI_DEBUG
1106 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 878 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1107 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif); 879 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
1108 IWL_DEBUG_RX("sleep mode: %d, src: %d\n", 880 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
1109 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 881 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
1110#endif 882#endif
1111} 883}
1112 884
1113static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 885static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1114 struct iwl_rx_mem_buffer *rxb) 886 struct iwl_rx_mem_buffer *rxb)
1115{ 887{
1116 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 888 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
@@ -1120,7 +892,7 @@ static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
1120 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); 892 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
1121} 893}
1122 894
1123static void iwl4965_bg_beacon_update(struct work_struct *work) 895static void iwl_bg_beacon_update(struct work_struct *work)
1124{ 896{
1125 struct iwl_priv *priv = 897 struct iwl_priv *priv =
1126 container_of(work, struct iwl_priv, beacon_update); 898 container_of(work, struct iwl_priv, beacon_update);
@@ -1142,11 +914,11 @@ static void iwl4965_bg_beacon_update(struct work_struct *work)
1142 priv->ibss_beacon = beacon; 914 priv->ibss_beacon = beacon;
1143 mutex_unlock(&priv->mutex); 915 mutex_unlock(&priv->mutex);
1144 916
1145 iwl4965_send_beacon_cmd(priv); 917 iwl_send_beacon_cmd(priv);
1146} 918}
1147 919
1148/** 920/**
1149 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics 921 * iwl_bg_statistics_periodic - Timer callback to queue statistics
1150 * 922 *
1151 * This callback is provided in order to send a statistics request. 923 * This callback is provided in order to send a statistics request.
1152 * 924 *
@@ -1155,22 +927,27 @@ static void iwl4965_bg_beacon_update(struct work_struct *work)
1155 * was received. We need to ensure we receive the statistics in order 927 * was received. We need to ensure we receive the statistics in order
1156 * to update the temperature used for calibrating the TXPOWER. 928 * to update the temperature used for calibrating the TXPOWER.
1157 */ 929 */
1158static void iwl4965_bg_statistics_periodic(unsigned long data) 930static void iwl_bg_statistics_periodic(unsigned long data)
1159{ 931{
1160 struct iwl_priv *priv = (struct iwl_priv *)data; 932 struct iwl_priv *priv = (struct iwl_priv *)data;
1161 933
1162 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 934 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1163 return; 935 return;
1164 936
937 /* dont send host command if rf-kill is on */
938 if (!iwl_is_ready_rf(priv))
939 return;
940
1165 iwl_send_statistics_request(priv, CMD_ASYNC); 941 iwl_send_statistics_request(priv, CMD_ASYNC);
1166} 942}
1167 943
1168static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, 944static void iwl_rx_beacon_notif(struct iwl_priv *priv,
1169 struct iwl_rx_mem_buffer *rxb) 945 struct iwl_rx_mem_buffer *rxb)
1170{ 946{
1171#ifdef CONFIG_IWLWIFI_DEBUG 947#ifdef CONFIG_IWLWIFI_DEBUG
1172 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 948 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1173 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status); 949 struct iwl4965_beacon_notif *beacon =
950 (struct iwl4965_beacon_notif *)pkt->u.raw;
1174 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 951 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
1175 952
1176 IWL_DEBUG_RX("beacon status %x retries %d iss %d " 953 IWL_DEBUG_RX("beacon status %x retries %d iss %d "
@@ -1189,7 +966,7 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
1189 966
1190/* Handle notification from uCode that card's power state is changing 967/* Handle notification from uCode that card's power state is changing
1191 * due to software, hardware, or critical temperature RFKILL */ 968 * due to software, hardware, or critical temperature RFKILL */
1192static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, 969static void iwl_rx_card_state_notif(struct iwl_priv *priv,
1193 struct iwl_rx_mem_buffer *rxb) 970 struct iwl_rx_mem_buffer *rxb)
1194{ 971{
1195 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 972 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
@@ -1258,7 +1035,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
1258 wake_up_interruptible(&priv->wait_command_queue); 1035 wake_up_interruptible(&priv->wait_command_queue);
1259} 1036}
1260 1037
1261int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src) 1038int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
1262{ 1039{
1263 int ret; 1040 int ret;
1264 unsigned long flags; 1041 unsigned long flags;
@@ -1290,7 +1067,7 @@ err:
1290} 1067}
1291 1068
1292/** 1069/**
1293 * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks 1070 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
1294 * 1071 *
1295 * Setup the RX handlers for each of the reply types sent from the uCode 1072 * Setup the RX handlers for each of the reply types sent from the uCode
1296 * to the host. 1073 * to the host.
@@ -1301,14 +1078,12 @@ err:
1301static void iwl_setup_rx_handlers(struct iwl_priv *priv) 1078static void iwl_setup_rx_handlers(struct iwl_priv *priv)
1302{ 1079{
1303 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; 1080 priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
1304 priv->rx_handlers[REPLY_ERROR] = iwl4965_rx_reply_error; 1081 priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
1305 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl4965_rx_csa; 1082 priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
1306 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = 1083 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
1307 iwl4965_rx_spectrum_measure_notif;
1308 priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl4965_rx_pm_sleep_notif;
1309 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = 1084 priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
1310 iwl4965_rx_pm_debug_statistics_notif; 1085 iwl_rx_pm_debug_statistics_notif;
1311 priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; 1086 priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
1312 1087
1313 /* 1088 /*
1314 * The same handler is used for both the REPLY to a discrete 1089 * The same handler is used for both the REPLY to a discrete
@@ -1318,10 +1093,11 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
1318 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_rx_statistics; 1093 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_rx_statistics;
1319 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; 1094 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
1320 1095
1096 iwl_setup_spectrum_handlers(priv);
1321 iwl_setup_rx_scan_handlers(priv); 1097 iwl_setup_rx_scan_handlers(priv);
1322 1098
1323 /* status change handler */ 1099 /* status change handler */
1324 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl4965_rx_card_state_notif; 1100 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
1325 1101
1326 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = 1102 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
1327 iwl_rx_missed_beacon_notif; 1103 iwl_rx_missed_beacon_notif;
@@ -1334,16 +1110,6 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
1334 priv->cfg->ops->lib->rx_handler_setup(priv); 1110 priv->cfg->ops->lib->rx_handler_setup(priv);
1335} 1111}
1336 1112
1337/*
1338 * this should be called while priv->lock is locked
1339*/
1340static void __iwl_rx_replenish(struct iwl_priv *priv)
1341{
1342 iwl_rx_allocate(priv);
1343 iwl_rx_queue_restock(priv);
1344}
1345
1346
1347/** 1113/**
1348 * iwl_rx_handle - Main entry function for receiving responses from uCode 1114 * iwl_rx_handle - Main entry function for receiving responses from uCode
1349 * 1115 *
@@ -1364,7 +1130,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
1364 1130
1365 /* uCode's read index (stored in shared DRAM) indicates the last Rx 1131 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1366 * buffer that the driver may process (last buffer filled by ucode). */ 1132 * buffer that the driver may process (last buffer filled by ucode). */
1367 r = priv->cfg->ops->lib->shared_mem_rx_idx(priv); 1133 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
1368 i = rxq->read; 1134 i = rxq->read;
1369 1135
1370 /* Rx interrupt, but nothing sent from uCode */ 1136 /* Rx interrupt, but nothing sent from uCode */
@@ -1400,13 +1166,14 @@ void iwl_rx_handle(struct iwl_priv *priv)
1400 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && 1166 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
1401 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && 1167 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
1402 (pkt->hdr.cmd != REPLY_RX) && 1168 (pkt->hdr.cmd != REPLY_RX) &&
1169 (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
1403 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && 1170 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
1404 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && 1171 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
1405 (pkt->hdr.cmd != REPLY_TX); 1172 (pkt->hdr.cmd != REPLY_TX);
1406 1173
1407 /* Based on type of command response or notification, 1174 /* Based on type of command response or notification,
1408 * handle those that need handling via function in 1175 * handle those that need handling via function in
1409 * rx_handlers table. See iwl4965_setup_rx_handlers() */ 1176 * rx_handlers table. See iwl_setup_rx_handlers() */
1410 if (priv->rx_handlers[pkt->hdr.cmd]) { 1177 if (priv->rx_handlers[pkt->hdr.cmd]) {
1411 IWL_DEBUG(IWL_DL_RX, "r = %d, i = %d, %s, 0x%02x\n", r, 1178 IWL_DEBUG(IWL_DL_RX, "r = %d, i = %d, %s, 0x%02x\n", r,
1412 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1179 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
@@ -1451,7 +1218,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
1451 count++; 1218 count++;
1452 if (count >= 8) { 1219 if (count >= 8) {
1453 priv->rxq.read = i; 1220 priv->rxq.read = i;
1454 __iwl_rx_replenish(priv); 1221 iwl_rx_queue_restock(priv);
1455 count = 0; 1222 count = 0;
1456 } 1223 }
1457 } 1224 }
@@ -1463,10 +1230,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
1463} 1230}
1464 1231
1465#ifdef CONFIG_IWLWIFI_DEBUG 1232#ifdef CONFIG_IWLWIFI_DEBUG
1466static void iwl4965_print_rx_config_cmd(struct iwl_priv *priv) 1233static void iwl_print_rx_config_cmd(struct iwl_priv *priv)
1467{ 1234{
1468 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 1235 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
1469 DECLARE_MAC_BUF(mac);
1470 1236
1471 IWL_DEBUG_RADIO("RX CONFIG:\n"); 1237 IWL_DEBUG_RADIO("RX CONFIG:\n");
1472 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 1238 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
@@ -1478,50 +1244,26 @@ static void iwl4965_print_rx_config_cmd(struct iwl_priv *priv)
1478 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n", 1244 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
1479 rxon->ofdm_basic_rates); 1245 rxon->ofdm_basic_rates);
1480 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); 1246 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
1481 IWL_DEBUG_RADIO("u8[6] node_addr: %s\n", 1247 IWL_DEBUG_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
1482 print_mac(mac, rxon->node_addr)); 1248 IWL_DEBUG_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
1483 IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n",
1484 print_mac(mac, rxon->bssid_addr));
1485 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 1249 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
1486} 1250}
1487#endif 1251#endif
1488 1252
1489static void iwl4965_enable_interrupts(struct iwl_priv *priv)
1490{
1491 IWL_DEBUG_ISR("Enabling interrupts\n");
1492 set_bit(STATUS_INT_ENABLED, &priv->status);
1493 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
1494}
1495
1496/* call this function to flush any scheduled tasklet */ 1253/* call this function to flush any scheduled tasklet */
1497static inline void iwl_synchronize_irq(struct iwl_priv *priv) 1254static inline void iwl_synchronize_irq(struct iwl_priv *priv)
1498{ 1255{
1499 /* wait to make sure we flush pedding tasklet*/ 1256 /* wait to make sure we flush pending tasklet*/
1500 synchronize_irq(priv->pci_dev->irq); 1257 synchronize_irq(priv->pci_dev->irq);
1501 tasklet_kill(&priv->irq_tasklet); 1258 tasklet_kill(&priv->irq_tasklet);
1502} 1259}
1503 1260
1504static inline void iwl4965_disable_interrupts(struct iwl_priv *priv)
1505{
1506 clear_bit(STATUS_INT_ENABLED, &priv->status);
1507
1508 /* disable interrupts from uCode/NIC to host */
1509 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1510
1511 /* acknowledge/clear/reset any interrupts still pending
1512 * from uCode or flow handler (Rx/Tx DMA) */
1513 iwl_write32(priv, CSR_INT, 0xffffffff);
1514 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
1515 IWL_DEBUG_ISR("Disabled interrupts\n");
1516}
1517
1518
1519/** 1261/**
1520 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card 1262 * iwl_irq_handle_error - called for HW or SW error interrupt from card
1521 */ 1263 */
1522static void iwl4965_irq_handle_error(struct iwl_priv *priv) 1264static void iwl_irq_handle_error(struct iwl_priv *priv)
1523{ 1265{
1524 /* Set the FW error flag -- cleared on iwl4965_down */ 1266 /* Set the FW error flag -- cleared on iwl_down */
1525 set_bit(STATUS_FW_ERROR, &priv->status); 1267 set_bit(STATUS_FW_ERROR, &priv->status);
1526 1268
1527 /* Cancel currently queued command. */ 1269 /* Cancel currently queued command. */
@@ -1531,7 +1273,7 @@ static void iwl4965_irq_handle_error(struct iwl_priv *priv)
1531 if (priv->debug_level & IWL_DL_FW_ERRORS) { 1273 if (priv->debug_level & IWL_DL_FW_ERRORS) {
1532 iwl_dump_nic_error_log(priv); 1274 iwl_dump_nic_error_log(priv);
1533 iwl_dump_nic_event_log(priv); 1275 iwl_dump_nic_event_log(priv);
1534 iwl4965_print_rx_config_cmd(priv); 1276 iwl_print_rx_config_cmd(priv);
1535 } 1277 }
1536#endif 1278#endif
1537 1279
@@ -1555,14 +1297,14 @@ static void iwl4965_irq_handle_error(struct iwl_priv *priv)
1555 } 1297 }
1556} 1298}
1557 1299
1558static void iwl4965_error_recovery(struct iwl_priv *priv) 1300static void iwl_error_recovery(struct iwl_priv *priv)
1559{ 1301{
1560 unsigned long flags; 1302 unsigned long flags;
1561 1303
1562 memcpy(&priv->staging_rxon, &priv->recovery_rxon, 1304 memcpy(&priv->staging_rxon, &priv->recovery_rxon,
1563 sizeof(priv->staging_rxon)); 1305 sizeof(priv->staging_rxon));
1564 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1306 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1565 iwl4965_commit_rxon(priv); 1307 iwl_commit_rxon(priv);
1566 1308
1567 iwl_rxon_add_station(priv, priv->bssid, 1); 1309 iwl_rxon_add_station(priv, priv->bssid, 1);
1568 1310
@@ -1572,7 +1314,7 @@ static void iwl4965_error_recovery(struct iwl_priv *priv)
1572 spin_unlock_irqrestore(&priv->lock, flags); 1314 spin_unlock_irqrestore(&priv->lock, flags);
1573} 1315}
1574 1316
1575static void iwl4965_irq_tasklet(struct iwl_priv *priv) 1317static void iwl_irq_tasklet(struct iwl_priv *priv)
1576{ 1318{
1577 u32 inta, handled = 0; 1319 u32 inta, handled = 0;
1578 u32 inta_fh; 1320 u32 inta_fh;
@@ -1618,9 +1360,9 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
1618 IWL_ERROR("Microcode HW error detected. Restarting.\n"); 1360 IWL_ERROR("Microcode HW error detected. Restarting.\n");
1619 1361
1620 /* Tell the device to stop sending interrupts */ 1362 /* Tell the device to stop sending interrupts */
1621 iwl4965_disable_interrupts(priv); 1363 iwl_disable_interrupts(priv);
1622 1364
1623 iwl4965_irq_handle_error(priv); 1365 iwl_irq_handle_error(priv);
1624 1366
1625 handled |= CSR_INT_BIT_HW_ERR; 1367 handled |= CSR_INT_BIT_HW_ERR;
1626 1368
@@ -1652,14 +1394,17 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
1652 hw_rf_kill = 1; 1394 hw_rf_kill = 1;
1653 1395
1654 IWL_DEBUG(IWL_DL_RF_KILL, "RF_KILL bit toggled to %s.\n", 1396 IWL_DEBUG(IWL_DL_RF_KILL, "RF_KILL bit toggled to %s.\n",
1655 hw_rf_kill ? "disable radio":"enable radio"); 1397 hw_rf_kill ? "disable radio" : "enable radio");
1656 1398
1657 /* driver only loads ucode once setting the interface up. 1399 /* driver only loads ucode once setting the interface up.
1658 * the driver as well won't allow loading if RFKILL is set 1400 * the driver as well won't allow loading if RFKILL is set
1659 * therefore no need to restart the driver from this handler 1401 * therefore no need to restart the driver from this handler
1660 */ 1402 */
1661 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) 1403 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) {
1662 clear_bit(STATUS_RF_KILL_HW, &priv->status); 1404 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1405 if (priv->is_open && !iwl_is_rfkill(priv))
1406 queue_work(priv->workqueue, &priv->up);
1407 }
1663 1408
1664 handled |= CSR_INT_BIT_RF_KILL; 1409 handled |= CSR_INT_BIT_RF_KILL;
1665 } 1410 }
@@ -1674,7 +1419,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
1674 if (inta & CSR_INT_BIT_SW_ERR) { 1419 if (inta & CSR_INT_BIT_SW_ERR) {
1675 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n", 1420 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
1676 inta); 1421 inta);
1677 iwl4965_irq_handle_error(priv); 1422 iwl_irq_handle_error(priv);
1678 handled |= CSR_INT_BIT_SW_ERR; 1423 handled |= CSR_INT_BIT_SW_ERR;
1679 } 1424 }
1680 1425
@@ -1720,7 +1465,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
1720 /* Re-enable all interrupts */ 1465 /* Re-enable all interrupts */
1721 /* only Re-enable if diabled by irq */ 1466 /* only Re-enable if diabled by irq */
1722 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1467 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1723 iwl4965_enable_interrupts(priv); 1468 iwl_enable_interrupts(priv);
1724 1469
1725#ifdef CONFIG_IWLWIFI_DEBUG 1470#ifdef CONFIG_IWLWIFI_DEBUG
1726 if (priv->debug_level & (IWL_DL_ISR)) { 1471 if (priv->debug_level & (IWL_DL_ISR)) {
@@ -1734,7 +1479,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv)
1734 spin_unlock_irqrestore(&priv->lock, flags); 1479 spin_unlock_irqrestore(&priv->lock, flags);
1735} 1480}
1736 1481
1737static irqreturn_t iwl4965_isr(int irq, void *data) 1482static irqreturn_t iwl_isr(int irq, void *data)
1738{ 1483{
1739 struct iwl_priv *priv = data; 1484 struct iwl_priv *priv = data;
1740 u32 inta, inta_mask; 1485 u32 inta, inta_mask;
@@ -1766,7 +1511,7 @@ static irqreturn_t iwl4965_isr(int irq, void *data)
1766 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 1511 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1767 /* Hardware disappeared. It might have already raised 1512 /* Hardware disappeared. It might have already raised
1768 * an interrupt */ 1513 * an interrupt */
1769 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta); 1514 IWL_WARNING("HARDWARE GONE?? INTA == 0x%08x\n", inta);
1770 goto unplugged; 1515 goto unplugged;
1771 } 1516 }
1772 1517
@@ -1775,7 +1520,7 @@ static irqreturn_t iwl4965_isr(int irq, void *data)
1775 1520
1776 inta &= ~CSR_INT_BIT_SCD; 1521 inta &= ~CSR_INT_BIT_SCD;
1777 1522
1778 /* iwl4965_irq_tasklet() will service interrupts and re-enable them */ 1523 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1779 if (likely(inta || inta_fh)) 1524 if (likely(inta || inta_fh))
1780 tasklet_schedule(&priv->irq_tasklet); 1525 tasklet_schedule(&priv->irq_tasklet);
1781 1526
@@ -1787,7 +1532,7 @@ static irqreturn_t iwl4965_isr(int irq, void *data)
1787 /* re-enable interrupts here since we don't have anything to service. */ 1532 /* re-enable interrupts here since we don't have anything to service. */
1788 /* only Re-enable if diabled by irq */ 1533 /* only Re-enable if diabled by irq */
1789 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1534 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1790 iwl4965_enable_interrupts(priv); 1535 iwl_enable_interrupts(priv);
1791 spin_unlock(&priv->lock); 1536 spin_unlock(&priv->lock);
1792 return IRQ_NONE; 1537 return IRQ_NONE;
1793} 1538}
@@ -1798,7 +1543,7 @@ static irqreturn_t iwl4965_isr(int irq, void *data)
1798 * 1543 *
1799 ******************************************************************************/ 1544 ******************************************************************************/
1800 1545
1801static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv) 1546static void iwl_dealloc_ucode_pci(struct iwl_priv *priv)
1802{ 1547{
1803 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); 1548 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
1804 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); 1549 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
@@ -1808,7 +1553,7 @@ static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
1808 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); 1553 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
1809} 1554}
1810 1555
1811static void iwl4965_nic_start(struct iwl_priv *priv) 1556static void iwl_nic_start(struct iwl_priv *priv)
1812{ 1557{
1813 /* Remove all resets to allow NIC to operate */ 1558 /* Remove all resets to allow NIC to operate */
1814 iwl_write32(priv, CSR_RESET, 0); 1559 iwl_write32(priv, CSR_RESET, 0);
@@ -1816,31 +1561,47 @@ static void iwl4965_nic_start(struct iwl_priv *priv)
1816 1561
1817 1562
1818/** 1563/**
1819 * iwl4965_read_ucode - Read uCode images from disk file. 1564 * iwl_read_ucode - Read uCode images from disk file.
1820 * 1565 *
1821 * Copy into buffers for card to fetch via bus-mastering 1566 * Copy into buffers for card to fetch via bus-mastering
1822 */ 1567 */
1823static int iwl4965_read_ucode(struct iwl_priv *priv) 1568static int iwl_read_ucode(struct iwl_priv *priv)
1824{ 1569{
1825 struct iwl_ucode *ucode; 1570 struct iwl_ucode *ucode;
1826 int ret; 1571 int ret = -EINVAL, index;
1827 const struct firmware *ucode_raw; 1572 const struct firmware *ucode_raw;
1828 const char *name = priv->cfg->fw_name; 1573 const char *name_pre = priv->cfg->fw_name_pre;
1574 const unsigned int api_max = priv->cfg->ucode_api_max;
1575 const unsigned int api_min = priv->cfg->ucode_api_min;
1576 char buf[25];
1829 u8 *src; 1577 u8 *src;
1830 size_t len; 1578 size_t len;
1831 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size; 1579 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
1832 1580
1833 /* Ask kernel firmware_class module to get the boot firmware off disk. 1581 /* Ask kernel firmware_class module to get the boot firmware off disk.
1834 * request_firmware() is synchronous, file is in memory on return. */ 1582 * request_firmware() is synchronous, file is in memory on return. */
1835 ret = request_firmware(&ucode_raw, name, &priv->pci_dev->dev); 1583 for (index = api_max; index >= api_min; index--) {
1836 if (ret < 0) { 1584 sprintf(buf, "%s%d%s", name_pre, index, ".ucode");
1837 IWL_ERROR("%s firmware file req failed: Reason %d\n", 1585 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
1838 name, ret); 1586 if (ret < 0) {
1839 goto error; 1587 IWL_ERROR("%s firmware file req failed: Reason %d\n",
1588 buf, ret);
1589 if (ret == -ENOENT)
1590 continue;
1591 else
1592 goto error;
1593 } else {
1594 if (index < api_max)
1595 IWL_ERROR("Loaded firmware %s, which is deprecated. Please use API v%u instead.\n",
1596 buf, api_max);
1597 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
1598 buf, ucode_raw->size);
1599 break;
1600 }
1840 } 1601 }
1841 1602
1842 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n", 1603 if (ret < 0)
1843 name, ucode_raw->size); 1604 goto error;
1844 1605
1845 /* Make sure that we got at least our header! */ 1606 /* Make sure that we got at least our header! */
1846 if (ucode_raw->size < sizeof(*ucode)) { 1607 if (ucode_raw->size < sizeof(*ucode)) {
@@ -1852,14 +1613,40 @@ static int iwl4965_read_ucode(struct iwl_priv *priv)
1852 /* Data from ucode file: header followed by uCode images */ 1613 /* Data from ucode file: header followed by uCode images */
1853 ucode = (void *)ucode_raw->data; 1614 ucode = (void *)ucode_raw->data;
1854 1615
1855 ver = le32_to_cpu(ucode->ver); 1616 priv->ucode_ver = le32_to_cpu(ucode->ver);
1617 api_ver = IWL_UCODE_API(priv->ucode_ver);
1856 inst_size = le32_to_cpu(ucode->inst_size); 1618 inst_size = le32_to_cpu(ucode->inst_size);
1857 data_size = le32_to_cpu(ucode->data_size); 1619 data_size = le32_to_cpu(ucode->data_size);
1858 init_size = le32_to_cpu(ucode->init_size); 1620 init_size = le32_to_cpu(ucode->init_size);
1859 init_data_size = le32_to_cpu(ucode->init_data_size); 1621 init_data_size = le32_to_cpu(ucode->init_data_size);
1860 boot_size = le32_to_cpu(ucode->boot_size); 1622 boot_size = le32_to_cpu(ucode->boot_size);
1861 1623
1862 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver); 1624 /* api_ver should match the api version forming part of the
1625 * firmware filename ... but we don't check for that and only rely
1626 * on the API version read from firware header from here on forward */
1627
1628 if (api_ver < api_min || api_ver > api_max) {
1629 IWL_ERROR("Driver unable to support your firmware API. "
1630 "Driver supports v%u, firmware is v%u.\n",
1631 api_max, api_ver);
1632 priv->ucode_ver = 0;
1633 ret = -EINVAL;
1634 goto err_release;
1635 }
1636 if (api_ver != api_max)
1637 IWL_ERROR("Firmware has old API version. Expected v%u, "
1638 "got v%u. New firmware can be obtained "
1639 "from http://www.intellinuxwireless.org.\n",
1640 api_max, api_ver);
1641
1642 printk(KERN_INFO DRV_NAME " loaded firmware version %u.%u.%u.%u\n",
1643 IWL_UCODE_MAJOR(priv->ucode_ver),
1644 IWL_UCODE_MINOR(priv->ucode_ver),
1645 IWL_UCODE_API(priv->ucode_ver),
1646 IWL_UCODE_SERIAL(priv->ucode_ver));
1647
1648 IWL_DEBUG_INFO("f/w package hdr ucode version raw = 0x%x\n",
1649 priv->ucode_ver);
1863 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", 1650 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n",
1864 inst_size); 1651 inst_size);
1865 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", 1652 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n",
@@ -1964,7 +1751,7 @@ static int iwl4965_read_ucode(struct iwl_priv *priv)
1964 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); 1751 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1965 1752
1966 /* Runtime data (2nd block) 1753 /* Runtime data (2nd block)
1967 * NOTE: Copy into backup buffer will be done in iwl4965_up() */ 1754 * NOTE: Copy into backup buffer will be done in iwl_up() */
1968 src = &ucode->data[inst_size]; 1755 src = &ucode->data[inst_size];
1969 len = priv->ucode_data.len; 1756 len = priv->ucode_data.len;
1970 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len); 1757 IWL_DEBUG_INFO("Copying (but not loading) uCode data len %Zd\n", len);
@@ -2002,7 +1789,7 @@ static int iwl4965_read_ucode(struct iwl_priv *priv)
2002 err_pci_alloc: 1789 err_pci_alloc:
2003 IWL_ERROR("failed to allocate pci memory\n"); 1790 IWL_ERROR("failed to allocate pci memory\n");
2004 ret = -ENOMEM; 1791 ret = -ENOMEM;
2005 iwl4965_dealloc_ucode_pci(priv); 1792 iwl_dealloc_ucode_pci(priv);
2006 1793
2007 err_release: 1794 err_release:
2008 release_firmware(ucode_raw); 1795 release_firmware(ucode_raw);
@@ -2011,6 +1798,10 @@ static int iwl4965_read_ucode(struct iwl_priv *priv)
2011 return ret; 1798 return ret;
2012} 1799}
2013 1800
1801/* temporary */
1802static int iwl_mac_beacon_update(struct ieee80211_hw *hw,
1803 struct sk_buff *skb);
1804
2014/** 1805/**
2015 * iwl_alive_start - called after REPLY_ALIVE notification received 1806 * iwl_alive_start - called after REPLY_ALIVE notification received
2016 * from protocol/runtime uCode (initialization uCode's 1807 * from protocol/runtime uCode (initialization uCode's
@@ -2047,7 +1838,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
2047 goto restart; 1838 goto restart;
2048 } 1839 }
2049 1840
2050 /* After the ALIVE response, we can send host commands to 4965 uCode */ 1841 /* After the ALIVE response, we can send host commands to the uCode */
2051 set_bit(STATUS_ALIVE, &priv->status); 1842 set_bit(STATUS_ALIVE, &priv->status);
2052 1843
2053 if (iwl_is_rfkill(priv)) 1844 if (iwl_is_rfkill(priv))
@@ -2067,17 +1858,17 @@ static void iwl_alive_start(struct iwl_priv *priv)
2067 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 1858 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2068 } else { 1859 } else {
2069 /* Initialize our rx_config data */ 1860 /* Initialize our rx_config data */
2070 iwl4965_connection_init_rx_config(priv); 1861 iwl_connection_init_rx_config(priv, priv->iw_mode);
2071 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 1862 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2072 } 1863 }
2073 1864
2074 /* Configure Bluetooth device coexistence support */ 1865 /* Configure Bluetooth device coexistence support */
2075 iwl4965_send_bt_config(priv); 1866 iwl_send_bt_config(priv);
2076 1867
2077 iwl_reset_run_time_calib(priv); 1868 iwl_reset_run_time_calib(priv);
2078 1869
2079 /* Configure the adapter for unassociated operation */ 1870 /* Configure the adapter for unassociated operation */
2080 iwl4965_commit_rxon(priv); 1871 iwl_commit_rxon(priv);
2081 1872
2082 /* At this point, the NIC is initialized and operational */ 1873 /* At this point, the NIC is initialized and operational */
2083 iwl_rf_kill_ct_config(priv); 1874 iwl_rf_kill_ct_config(priv);
@@ -2089,12 +1880,21 @@ static void iwl_alive_start(struct iwl_priv *priv)
2089 wake_up_interruptible(&priv->wait_command_queue); 1880 wake_up_interruptible(&priv->wait_command_queue);
2090 1881
2091 if (priv->error_recovering) 1882 if (priv->error_recovering)
2092 iwl4965_error_recovery(priv); 1883 iwl_error_recovery(priv);
2093 1884
2094 iwl_power_update_mode(priv, 1); 1885 iwl_power_update_mode(priv, 1);
2095 1886
1887 /* reassociate for ADHOC mode */
1888 if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
1889 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
1890 priv->vif);
1891 if (beacon)
1892 iwl_mac_beacon_update(priv->hw, beacon);
1893 }
1894
1895
2096 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status)) 1896 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
2097 iwl4965_set_mode(priv, priv->iw_mode); 1897 iwl_set_mode(priv, priv->iw_mode);
2098 1898
2099 return; 1899 return;
2100 1900
@@ -2104,7 +1904,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
2104 1904
2105static void iwl_cancel_deferred_work(struct iwl_priv *priv); 1905static void iwl_cancel_deferred_work(struct iwl_priv *priv);
2106 1906
2107static void __iwl4965_down(struct iwl_priv *priv) 1907static void __iwl_down(struct iwl_priv *priv)
2108{ 1908{
2109 unsigned long flags; 1909 unsigned long flags;
2110 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 1910 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -2131,14 +1931,14 @@ static void __iwl4965_down(struct iwl_priv *priv)
2131 1931
2132 /* tell the device to stop sending interrupts */ 1932 /* tell the device to stop sending interrupts */
2133 spin_lock_irqsave(&priv->lock, flags); 1933 spin_lock_irqsave(&priv->lock, flags);
2134 iwl4965_disable_interrupts(priv); 1934 iwl_disable_interrupts(priv);
2135 spin_unlock_irqrestore(&priv->lock, flags); 1935 spin_unlock_irqrestore(&priv->lock, flags);
2136 iwl_synchronize_irq(priv); 1936 iwl_synchronize_irq(priv);
2137 1937
2138 if (priv->mac80211_registered) 1938 if (priv->mac80211_registered)
2139 ieee80211_stop_queues(priv->hw); 1939 ieee80211_stop_queues(priv->hw);
2140 1940
2141 /* If we have not previously called iwl4965_init() then 1941 /* If we have not previously called iwl_init() then
2142 * clear all bits but the RF Kill and SUSPEND bits and return */ 1942 * clear all bits but the RF Kill and SUSPEND bits and return */
2143 if (!iwl_is_init(priv)) { 1943 if (!iwl_is_init(priv)) {
2144 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << 1944 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
@@ -2192,8 +1992,6 @@ static void __iwl4965_down(struct iwl_priv *priv)
2192 priv->cfg->ops->lib->apm_ops.stop(priv); 1992 priv->cfg->ops->lib->apm_ops.stop(priv);
2193 else 1993 else
2194 priv->cfg->ops->lib->apm_ops.reset(priv); 1994 priv->cfg->ops->lib->apm_ops.reset(priv);
2195 priv->cfg->ops->lib->free_shared_mem(priv);
2196
2197 exit: 1995 exit:
2198 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 1996 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
2199 1997
@@ -2205,10 +2003,10 @@ static void __iwl4965_down(struct iwl_priv *priv)
2205 iwl_clear_free_frames(priv); 2003 iwl_clear_free_frames(priv);
2206} 2004}
2207 2005
2208static void iwl4965_down(struct iwl_priv *priv) 2006static void iwl_down(struct iwl_priv *priv)
2209{ 2007{
2210 mutex_lock(&priv->mutex); 2008 mutex_lock(&priv->mutex);
2211 __iwl4965_down(priv); 2009 __iwl_down(priv);
2212 mutex_unlock(&priv->mutex); 2010 mutex_unlock(&priv->mutex);
2213 2011
2214 iwl_cancel_deferred_work(priv); 2012 iwl_cancel_deferred_work(priv);
@@ -2216,7 +2014,7 @@ static void iwl4965_down(struct iwl_priv *priv)
2216 2014
2217#define MAX_HW_RESTARTS 5 2015#define MAX_HW_RESTARTS 5
2218 2016
2219static int __iwl4965_up(struct iwl_priv *priv) 2017static int __iwl_up(struct iwl_priv *priv)
2220{ 2018{
2221 int i; 2019 int i;
2222 int ret; 2020 int ret;
@@ -2238,7 +2036,7 @@ static int __iwl4965_up(struct iwl_priv *priv)
2238 set_bit(STATUS_RF_KILL_HW, &priv->status); 2036 set_bit(STATUS_RF_KILL_HW, &priv->status);
2239 2037
2240 if (iwl_is_rfkill(priv)) { 2038 if (iwl_is_rfkill(priv)) {
2241 iwl4965_enable_interrupts(priv); 2039 iwl_enable_interrupts(priv);
2242 IWL_WARNING("Radio disabled by %s RF Kill switch\n", 2040 IWL_WARNING("Radio disabled by %s RF Kill switch\n",
2243 test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW"); 2041 test_bit(STATUS_RF_KILL_HW, &priv->status) ? "HW" : "SW");
2244 return 0; 2042 return 0;
@@ -2246,12 +2044,6 @@ static int __iwl4965_up(struct iwl_priv *priv)
2246 2044
2247 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2045 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2248 2046
2249 ret = priv->cfg->ops->lib->alloc_shared_mem(priv);
2250 if (ret) {
2251 IWL_ERROR("Unable to allocate shared memory\n");
2252 return ret;
2253 }
2254
2255 ret = iwl_hw_nic_init(priv); 2047 ret = iwl_hw_nic_init(priv);
2256 if (ret) { 2048 if (ret) {
2257 IWL_ERROR("Unable to init nic\n"); 2049 IWL_ERROR("Unable to init nic\n");
@@ -2265,7 +2057,7 @@ static int __iwl4965_up(struct iwl_priv *priv)
2265 2057
2266 /* clear (again), then enable host interrupts */ 2058 /* clear (again), then enable host interrupts */
2267 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2059 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2268 iwl4965_enable_interrupts(priv); 2060 iwl_enable_interrupts(priv);
2269 2061
2270 /* really make sure rfkill handshake bits are cleared */ 2062 /* really make sure rfkill handshake bits are cleared */
2271 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2063 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
@@ -2295,7 +2087,7 @@ static int __iwl4965_up(struct iwl_priv *priv)
2295 clear_bit(STATUS_FW_ERROR, &priv->status); 2087 clear_bit(STATUS_FW_ERROR, &priv->status);
2296 2088
2297 /* start card; "initialize" will load runtime ucode */ 2089 /* start card; "initialize" will load runtime ucode */
2298 iwl4965_nic_start(priv); 2090 iwl_nic_start(priv);
2299 2091
2300 IWL_DEBUG_INFO(DRV_NAME " is coming up\n"); 2092 IWL_DEBUG_INFO(DRV_NAME " is coming up\n");
2301 2093
@@ -2303,7 +2095,7 @@ static int __iwl4965_up(struct iwl_priv *priv)
2303 } 2095 }
2304 2096
2305 set_bit(STATUS_EXIT_PENDING, &priv->status); 2097 set_bit(STATUS_EXIT_PENDING, &priv->status);
2306 __iwl4965_down(priv); 2098 __iwl_down(priv);
2307 clear_bit(STATUS_EXIT_PENDING, &priv->status); 2099 clear_bit(STATUS_EXIT_PENDING, &priv->status);
2308 2100
2309 /* tried to restart and config the device for as long as our 2101 /* tried to restart and config the device for as long as our
@@ -2345,7 +2137,7 @@ static void iwl_bg_alive_start(struct work_struct *data)
2345 mutex_unlock(&priv->mutex); 2137 mutex_unlock(&priv->mutex);
2346} 2138}
2347 2139
2348static void iwl4965_bg_rf_kill(struct work_struct *work) 2140static void iwl_bg_rf_kill(struct work_struct *work)
2349{ 2141{
2350 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill); 2142 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
2351 2143
@@ -2379,28 +2171,6 @@ static void iwl4965_bg_rf_kill(struct work_struct *work)
2379 iwl_rfkill_set_hw_state(priv); 2171 iwl_rfkill_set_hw_state(priv);
2380} 2172}
2381 2173
2382static void iwl4965_bg_set_monitor(struct work_struct *work)
2383{
2384 struct iwl_priv *priv = container_of(work,
2385 struct iwl_priv, set_monitor);
2386 int ret;
2387
2388 IWL_DEBUG(IWL_DL_STATE, "setting monitor mode\n");
2389
2390 mutex_lock(&priv->mutex);
2391
2392 ret = iwl4965_set_mode(priv, NL80211_IFTYPE_MONITOR);
2393
2394 if (ret) {
2395 if (ret == -EAGAIN)
2396 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n");
2397 else
2398 IWL_ERROR("iwl4965_set_mode() failed ret = %d\n", ret);
2399 }
2400
2401 mutex_unlock(&priv->mutex);
2402}
2403
2404static void iwl_bg_run_time_calib_work(struct work_struct *work) 2174static void iwl_bg_run_time_calib_work(struct work_struct *work)
2405{ 2175{
2406 struct iwl_priv *priv = container_of(work, struct iwl_priv, 2176 struct iwl_priv *priv = container_of(work, struct iwl_priv,
@@ -2424,7 +2194,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
2424 return; 2194 return;
2425} 2195}
2426 2196
2427static void iwl4965_bg_up(struct work_struct *data) 2197static void iwl_bg_up(struct work_struct *data)
2428{ 2198{
2429 struct iwl_priv *priv = container_of(data, struct iwl_priv, up); 2199 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
2430 2200
@@ -2432,23 +2202,23 @@ static void iwl4965_bg_up(struct work_struct *data)
2432 return; 2202 return;
2433 2203
2434 mutex_lock(&priv->mutex); 2204 mutex_lock(&priv->mutex);
2435 __iwl4965_up(priv); 2205 __iwl_up(priv);
2436 mutex_unlock(&priv->mutex); 2206 mutex_unlock(&priv->mutex);
2437 iwl_rfkill_set_hw_state(priv); 2207 iwl_rfkill_set_hw_state(priv);
2438} 2208}
2439 2209
2440static void iwl4965_bg_restart(struct work_struct *data) 2210static void iwl_bg_restart(struct work_struct *data)
2441{ 2211{
2442 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); 2212 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2443 2213
2444 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2214 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2445 return; 2215 return;
2446 2216
2447 iwl4965_down(priv); 2217 iwl_down(priv);
2448 queue_work(priv->workqueue, &priv->up); 2218 queue_work(priv->workqueue, &priv->up);
2449} 2219}
2450 2220
2451static void iwl4965_bg_rx_replenish(struct work_struct *data) 2221static void iwl_bg_rx_replenish(struct work_struct *data)
2452{ 2222{
2453 struct iwl_priv *priv = 2223 struct iwl_priv *priv =
2454 container_of(data, struct iwl_priv, rx_replenish); 2224 container_of(data, struct iwl_priv, rx_replenish);
@@ -2463,11 +2233,10 @@ static void iwl4965_bg_rx_replenish(struct work_struct *data)
2463 2233
2464#define IWL_DELAY_NEXT_SCAN (HZ*2) 2234#define IWL_DELAY_NEXT_SCAN (HZ*2)
2465 2235
2466static void iwl4965_post_associate(struct iwl_priv *priv) 2236static void iwl_post_associate(struct iwl_priv *priv)
2467{ 2237{
2468 struct ieee80211_conf *conf = NULL; 2238 struct ieee80211_conf *conf = NULL;
2469 int ret = 0; 2239 int ret = 0;
2470 DECLARE_MAC_BUF(mac);
2471 unsigned long flags; 2240 unsigned long flags;
2472 2241
2473 if (priv->iw_mode == NL80211_IFTYPE_AP) { 2242 if (priv->iw_mode == NL80211_IFTYPE_AP) {
@@ -2475,9 +2244,8 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2475 return; 2244 return;
2476 } 2245 }
2477 2246
2478 IWL_DEBUG_ASSOC("Associated as %d to: %s\n", 2247 IWL_DEBUG_ASSOC("Associated as %d to: %pM\n",
2479 priv->assoc_id, 2248 priv->assoc_id, priv->active_rxon.bssid_addr);
2480 print_mac(mac, priv->active_rxon.bssid_addr));
2481 2249
2482 2250
2483 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2251 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -2493,10 +2261,9 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2493 conf = ieee80211_get_hw_conf(priv->hw); 2261 conf = ieee80211_get_hw_conf(priv->hw);
2494 2262
2495 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2263 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2496 iwl4965_commit_rxon(priv); 2264 iwl_commit_rxon(priv);
2497 2265
2498 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd)); 2266 iwl_setup_rxon_timing(priv);
2499 iwl4965_setup_rxon_timing(priv);
2500 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 2267 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
2501 sizeof(priv->rxon_timing), &priv->rxon_timing); 2268 sizeof(priv->rxon_timing), &priv->rxon_timing);
2502 if (ret) 2269 if (ret)
@@ -2529,7 +2296,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2529 2296
2530 } 2297 }
2531 2298
2532 iwl4965_commit_rxon(priv); 2299 iwl_commit_rxon(priv);
2533 2300
2534 switch (priv->iw_mode) { 2301 switch (priv->iw_mode) {
2535 case NL80211_IFTYPE_STATION: 2302 case NL80211_IFTYPE_STATION:
@@ -2541,7 +2308,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2541 priv->assoc_id = 1; 2308 priv->assoc_id = 1;
2542 2309
2543 iwl_rxon_add_station(priv, priv->bssid, 0); 2310 iwl_rxon_add_station(priv, priv->bssid, 0);
2544 iwl4965_send_beacon_cmd(priv); 2311 iwl_send_beacon_cmd(priv);
2545 2312
2546 break; 2313 break;
2547 2314
@@ -2578,7 +2345,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
2578 2345
2579#define UCODE_READY_TIMEOUT (4 * HZ) 2346#define UCODE_READY_TIMEOUT (4 * HZ)
2580 2347
2581static int iwl4965_mac_start(struct ieee80211_hw *hw) 2348static int iwl_mac_start(struct ieee80211_hw *hw)
2582{ 2349{
2583 struct iwl_priv *priv = hw->priv; 2350 struct iwl_priv *priv = hw->priv;
2584 int ret; 2351 int ret;
@@ -2600,7 +2367,7 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
2600 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); 2367 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
2601 } 2368 }
2602 2369
2603 ret = request_irq(priv->pci_dev->irq, iwl4965_isr, IRQF_SHARED, 2370 ret = request_irq(priv->pci_dev->irq, iwl_isr, IRQF_SHARED,
2604 DRV_NAME, priv); 2371 DRV_NAME, priv);
2605 if (ret) { 2372 if (ret) {
2606 IWL_ERROR("Error allocating IRQ %d\n", priv->pci_dev->irq); 2373 IWL_ERROR("Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -2615,7 +2382,7 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
2615 * ucode filename and max sizes are card-specific. */ 2382 * ucode filename and max sizes are card-specific. */
2616 2383
2617 if (!priv->ucode_code.len) { 2384 if (!priv->ucode_code.len) {
2618 ret = iwl4965_read_ucode(priv); 2385 ret = iwl_read_ucode(priv);
2619 if (ret) { 2386 if (ret) {
2620 IWL_ERROR("Could not read microcode: %d\n", ret); 2387 IWL_ERROR("Could not read microcode: %d\n", ret);
2621 mutex_unlock(&priv->mutex); 2388 mutex_unlock(&priv->mutex);
@@ -2623,7 +2390,7 @@ static int iwl4965_mac_start(struct ieee80211_hw *hw)
2623 } 2390 }
2624 } 2391 }
2625 2392
2626 ret = __iwl4965_up(priv); 2393 ret = __iwl_up(priv);
2627 2394
2628 mutex_unlock(&priv->mutex); 2395 mutex_unlock(&priv->mutex);
2629 2396
@@ -2669,7 +2436,7 @@ out_disable_msi:
2669 return ret; 2436 return ret;
2670} 2437}
2671 2438
2672static void iwl4965_mac_stop(struct ieee80211_hw *hw) 2439static void iwl_mac_stop(struct ieee80211_hw *hw)
2673{ 2440{
2674 struct iwl_priv *priv = hw->priv; 2441 struct iwl_priv *priv = hw->priv;
2675 2442
@@ -2691,7 +2458,7 @@ static void iwl4965_mac_stop(struct ieee80211_hw *hw)
2691 mutex_unlock(&priv->mutex); 2458 mutex_unlock(&priv->mutex);
2692 } 2459 }
2693 2460
2694 iwl4965_down(priv); 2461 iwl_down(priv);
2695 2462
2696 flush_workqueue(priv->workqueue); 2463 flush_workqueue(priv->workqueue);
2697 free_irq(priv->pci_dev->irq, priv); 2464 free_irq(priv->pci_dev->irq, priv);
@@ -2702,7 +2469,7 @@ static void iwl4965_mac_stop(struct ieee80211_hw *hw)
2702 IWL_DEBUG_MAC80211("leave\n"); 2469 IWL_DEBUG_MAC80211("leave\n");
2703} 2470}
2704 2471
2705static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 2472static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2706{ 2473{
2707 struct iwl_priv *priv = hw->priv; 2474 struct iwl_priv *priv = hw->priv;
2708 2475
@@ -2718,12 +2485,11 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2718 return 0; 2485 return 0;
2719} 2486}
2720 2487
2721static int iwl4965_mac_add_interface(struct ieee80211_hw *hw, 2488static int iwl_mac_add_interface(struct ieee80211_hw *hw,
2722 struct ieee80211_if_init_conf *conf) 2489 struct ieee80211_if_init_conf *conf)
2723{ 2490{
2724 struct iwl_priv *priv = hw->priv; 2491 struct iwl_priv *priv = hw->priv;
2725 unsigned long flags; 2492 unsigned long flags;
2726 DECLARE_MAC_BUF(mac);
2727 2493
2728 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type); 2494 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
2729 2495
@@ -2734,17 +2500,18 @@ static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
2734 2500
2735 spin_lock_irqsave(&priv->lock, flags); 2501 spin_lock_irqsave(&priv->lock, flags);
2736 priv->vif = conf->vif; 2502 priv->vif = conf->vif;
2503 priv->iw_mode = conf->type;
2737 2504
2738 spin_unlock_irqrestore(&priv->lock, flags); 2505 spin_unlock_irqrestore(&priv->lock, flags);
2739 2506
2740 mutex_lock(&priv->mutex); 2507 mutex_lock(&priv->mutex);
2741 2508
2742 if (conf->mac_addr) { 2509 if (conf->mac_addr) {
2743 IWL_DEBUG_MAC80211("Set %s\n", print_mac(mac, conf->mac_addr)); 2510 IWL_DEBUG_MAC80211("Set %pM\n", conf->mac_addr);
2744 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 2511 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
2745 } 2512 }
2746 2513
2747 if (iwl4965_set_mode(priv, conf->type) == -EAGAIN) 2514 if (iwl_set_mode(priv, conf->type) == -EAGAIN)
2748 /* we are not ready, will run again when ready */ 2515 /* we are not ready, will run again when ready */
2749 set_bit(STATUS_MODE_PENDING, &priv->status); 2516 set_bit(STATUS_MODE_PENDING, &priv->status);
2750 2517
@@ -2755,16 +2522,17 @@ static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
2755} 2522}
2756 2523
2757/** 2524/**
2758 * iwl4965_mac_config - mac80211 config callback 2525 * iwl_mac_config - mac80211 config callback
2759 * 2526 *
2760 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to 2527 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
2761 * be set inappropriately and the driver currently sets the hardware up to 2528 * be set inappropriately and the driver currently sets the hardware up to
2762 * use it whenever needed. 2529 * use it whenever needed.
2763 */ 2530 */
2764static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 2531static int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2765{ 2532{
2766 struct iwl_priv *priv = hw->priv; 2533 struct iwl_priv *priv = hw->priv;
2767 const struct iwl_channel_info *ch_info; 2534 const struct iwl_channel_info *ch_info;
2535 struct ieee80211_conf *conf = &hw->conf;
2768 unsigned long flags; 2536 unsigned long flags;
2769 int ret = 0; 2537 int ret = 0;
2770 u16 channel; 2538 u16 channel;
@@ -2772,6 +2540,8 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2772 mutex_lock(&priv->mutex); 2540 mutex_lock(&priv->mutex);
2773 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value); 2541 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
2774 2542
2543 priv->current_ht_config.is_ht = conf->ht.enabled;
2544
2775 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) { 2545 if (conf->radio_enabled && iwl_radio_kill_sw_enable_radio(priv)) {
2776 IWL_DEBUG_MAC80211("leave - RF-KILL - waiting for uCode\n"); 2546 IWL_DEBUG_MAC80211("leave - RF-KILL - waiting for uCode\n");
2777 goto out; 2547 goto out;
@@ -2829,13 +2599,13 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2829 /* The list of supported rates and rate mask can be different 2599 /* The list of supported rates and rate mask can be different
2830 * for each band; since the band may have changed, reset 2600 * for each band; since the band may have changed, reset
2831 * the rate mask to what mac80211 lists */ 2601 * the rate mask to what mac80211 lists */
2832 iwl4965_set_rate(priv); 2602 iwl_set_rate(priv);
2833 2603
2834 spin_unlock_irqrestore(&priv->lock, flags); 2604 spin_unlock_irqrestore(&priv->lock, flags);
2835 2605
2836#ifdef IEEE80211_CONF_CHANNEL_SWITCH 2606#ifdef IEEE80211_CONF_CHANNEL_SWITCH
2837 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) { 2607 if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) {
2838 iwl4965_hw_channel_switch(priv, conf->channel); 2608 iwl_hw_channel_switch(priv, conf->channel);
2839 goto out; 2609 goto out;
2840 } 2610 }
2841#endif 2611#endif
@@ -2863,11 +2633,11 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
2863 2633
2864 iwl_set_tx_power(priv, conf->power_level, false); 2634 iwl_set_tx_power(priv, conf->power_level, false);
2865 2635
2866 iwl4965_set_rate(priv); 2636 iwl_set_rate(priv);
2867 2637
2868 if (memcmp(&priv->active_rxon, 2638 if (memcmp(&priv->active_rxon,
2869 &priv->staging_rxon, sizeof(priv->staging_rxon))) 2639 &priv->staging_rxon, sizeof(priv->staging_rxon)))
2870 iwl4965_commit_rxon(priv); 2640 iwl_commit_rxon(priv);
2871 else 2641 else
2872 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n"); 2642 IWL_DEBUG_INFO("No re-sending same RXON configuration.\n");
2873 2643
@@ -2878,7 +2648,7 @@ out:
2878 return ret; 2648 return ret;
2879} 2649}
2880 2650
2881static void iwl4965_config_ap(struct iwl_priv *priv) 2651static void iwl_config_ap(struct iwl_priv *priv)
2882{ 2652{
2883 int ret = 0; 2653 int ret = 0;
2884 unsigned long flags; 2654 unsigned long flags;
@@ -2887,15 +2657,14 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2887 return; 2657 return;
2888 2658
2889 /* The following should be done only at AP bring up */ 2659 /* The following should be done only at AP bring up */
2890 if (!(iwl_is_associated(priv))) { 2660 if (!iwl_is_associated(priv)) {
2891 2661
2892 /* RXON - unassoc (to set timing command) */ 2662 /* RXON - unassoc (to set timing command) */
2893 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2663 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2894 iwl4965_commit_rxon(priv); 2664 iwl_commit_rxon(priv);
2895 2665
2896 /* RXON Timing */ 2666 /* RXON Timing */
2897 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd)); 2667 iwl_setup_rxon_timing(priv);
2898 iwl4965_setup_rxon_timing(priv);
2899 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 2668 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
2900 sizeof(priv->rxon_timing), &priv->rxon_timing); 2669 sizeof(priv->rxon_timing), &priv->rxon_timing);
2901 if (ret) 2670 if (ret)
@@ -2928,29 +2697,25 @@ static void iwl4965_config_ap(struct iwl_priv *priv)
2928 } 2697 }
2929 /* restore RXON assoc */ 2698 /* restore RXON assoc */
2930 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2699 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
2931 iwl4965_commit_rxon(priv); 2700 iwl_commit_rxon(priv);
2932 spin_lock_irqsave(&priv->lock, flags); 2701 spin_lock_irqsave(&priv->lock, flags);
2933 iwl_activate_qos(priv, 1); 2702 iwl_activate_qos(priv, 1);
2934 spin_unlock_irqrestore(&priv->lock, flags); 2703 spin_unlock_irqrestore(&priv->lock, flags);
2935 iwl_rxon_add_station(priv, iwl_bcast_addr, 0); 2704 iwl_rxon_add_station(priv, iwl_bcast_addr, 0);
2936 } 2705 }
2937 iwl4965_send_beacon_cmd(priv); 2706 iwl_send_beacon_cmd(priv);
2938 2707
2939 /* FIXME - we need to add code here to detect a totally new 2708 /* FIXME - we need to add code here to detect a totally new
2940 * configuration, reset the AP, unassoc, rxon timing, assoc, 2709 * configuration, reset the AP, unassoc, rxon timing, assoc,
2941 * clear sta table, add BCAST sta... */ 2710 * clear sta table, add BCAST sta... */
2942} 2711}
2943 2712
2944/* temporary */
2945static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
2946 2713
2947static int iwl4965_mac_config_interface(struct ieee80211_hw *hw, 2714static int iwl_mac_config_interface(struct ieee80211_hw *hw,
2948 struct ieee80211_vif *vif, 2715 struct ieee80211_vif *vif,
2949 struct ieee80211_if_conf *conf) 2716 struct ieee80211_if_conf *conf)
2950{ 2717{
2951 struct iwl_priv *priv = hw->priv; 2718 struct iwl_priv *priv = hw->priv;
2952 DECLARE_MAC_BUF(mac);
2953 unsigned long flags;
2954 int rc; 2719 int rc;
2955 2720
2956 if (conf == NULL) 2721 if (conf == NULL)
@@ -2966,26 +2731,20 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
2966 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 2731 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
2967 if (!beacon) 2732 if (!beacon)
2968 return -ENOMEM; 2733 return -ENOMEM;
2969 rc = iwl4965_mac_beacon_update(hw, beacon); 2734 mutex_lock(&priv->mutex);
2735 rc = iwl_mac_beacon_update(hw, beacon);
2736 mutex_unlock(&priv->mutex);
2970 if (rc) 2737 if (rc)
2971 return rc; 2738 return rc;
2972 } 2739 }
2973 2740
2974 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
2975 (!conf->ssid_len)) {
2976 IWL_DEBUG_MAC80211
2977 ("Leaving in AP mode because HostAPD is not ready.\n");
2978 return 0;
2979 }
2980
2981 if (!iwl_is_alive(priv)) 2741 if (!iwl_is_alive(priv))
2982 return -EAGAIN; 2742 return -EAGAIN;
2983 2743
2984 mutex_lock(&priv->mutex); 2744 mutex_lock(&priv->mutex);
2985 2745
2986 if (conf->bssid) 2746 if (conf->bssid)
2987 IWL_DEBUG_MAC80211("bssid: %s\n", 2747 IWL_DEBUG_MAC80211("bssid: %pM\n", conf->bssid);
2988 print_mac(mac, conf->bssid));
2989 2748
2990/* 2749/*
2991 * very dubious code was here; the probe filtering flag is never set: 2750 * very dubious code was here; the probe filtering flag is never set:
@@ -2998,8 +2757,8 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
2998 if (!conf->bssid) { 2757 if (!conf->bssid) {
2999 conf->bssid = priv->mac_addr; 2758 conf->bssid = priv->mac_addr;
3000 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); 2759 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
3001 IWL_DEBUG_MAC80211("bssid was set to: %s\n", 2760 IWL_DEBUG_MAC80211("bssid was set to: %pM\n",
3002 print_mac(mac, conf->bssid)); 2761 conf->bssid);
3003 } 2762 }
3004 if (priv->ibss_beacon) 2763 if (priv->ibss_beacon)
3005 dev_kfree_skb(priv->ibss_beacon); 2764 dev_kfree_skb(priv->ibss_beacon);
@@ -3030,9 +2789,9 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
3030 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 2789 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
3031 2790
3032 if (priv->iw_mode == NL80211_IFTYPE_AP) 2791 if (priv->iw_mode == NL80211_IFTYPE_AP)
3033 iwl4965_config_ap(priv); 2792 iwl_config_ap(priv);
3034 else { 2793 else {
3035 rc = iwl4965_commit_rxon(priv); 2794 rc = iwl_commit_rxon(priv);
3036 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc) 2795 if ((priv->iw_mode == NL80211_IFTYPE_STATION) && rc)
3037 iwl_rxon_add_station( 2796 iwl_rxon_add_station(
3038 priv, priv->active_rxon.bssid_addr, 1); 2797 priv, priv->active_rxon.bssid_addr, 1);
@@ -3041,45 +2800,63 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
3041 } else { 2800 } else {
3042 iwl_scan_cancel_timeout(priv, 100); 2801 iwl_scan_cancel_timeout(priv, 100);
3043 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2802 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3044 iwl4965_commit_rxon(priv); 2803 iwl_commit_rxon(priv);
3045 } 2804 }
3046 2805
3047 done: 2806 done:
3048 spin_lock_irqsave(&priv->lock, flags);
3049 if (!conf->ssid_len)
3050 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
3051 else
3052 memcpy(priv->essid, conf->ssid, conf->ssid_len);
3053
3054 priv->essid_len = conf->ssid_len;
3055 spin_unlock_irqrestore(&priv->lock, flags);
3056
3057 IWL_DEBUG_MAC80211("leave\n"); 2807 IWL_DEBUG_MAC80211("leave\n");
3058 mutex_unlock(&priv->mutex); 2808 mutex_unlock(&priv->mutex);
3059 2809
3060 return 0; 2810 return 0;
3061} 2811}
3062 2812
3063static void iwl4965_configure_filter(struct ieee80211_hw *hw, 2813static void iwl_configure_filter(struct ieee80211_hw *hw,
3064 unsigned int changed_flags, 2814 unsigned int changed_flags,
3065 unsigned int *total_flags, 2815 unsigned int *total_flags,
3066 int mc_count, struct dev_addr_list *mc_list) 2816 int mc_count, struct dev_addr_list *mc_list)
3067{ 2817{
3068 struct iwl_priv *priv = hw->priv; 2818 struct iwl_priv *priv = hw->priv;
2819 __le32 *filter_flags = &priv->staging_rxon.filter_flags;
3069 2820
3070 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) { 2821 IWL_DEBUG_MAC80211("Enter: changed: 0x%x, total: 0x%x\n",
3071 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n", 2822 changed_flags, *total_flags);
3072 NL80211_IFTYPE_MONITOR, 2823
3073 changed_flags, *total_flags); 2824 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
3074 /* queue work 'cuz mac80211 is holding a lock which 2825 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
3075 * prevents us from issuing (synchronous) f/w cmds */ 2826 *filter_flags |= RXON_FILTER_PROMISC_MSK;
3076 queue_work(priv->workqueue, &priv->set_monitor); 2827 else
2828 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
2829 }
2830 if (changed_flags & FIF_ALLMULTI) {
2831 if (*total_flags & FIF_ALLMULTI)
2832 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
2833 else
2834 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
2835 }
2836 if (changed_flags & FIF_CONTROL) {
2837 if (*total_flags & FIF_CONTROL)
2838 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
2839 else
2840 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
3077 } 2841 }
3078 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | 2842 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
2843 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
2844 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
2845 else
2846 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
2847 }
2848
2849 /* We avoid iwl_commit_rxon here to commit the new filter flags
2850 * since mac80211 will call ieee80211_hw_config immediately.
2851 * (mc_list is not supported at this time). Otherwise, we need to
2852 * queue a background iwl_commit_rxon work.
2853 */
2854
2855 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
3079 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 2856 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
3080} 2857}
3081 2858
3082static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw, 2859static void iwl_mac_remove_interface(struct ieee80211_hw *hw,
3083 struct ieee80211_if_init_conf *conf) 2860 struct ieee80211_if_init_conf *conf)
3084{ 2861{
3085 struct iwl_priv *priv = hw->priv; 2862 struct iwl_priv *priv = hw->priv;
@@ -3091,13 +2868,11 @@ static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
3091 if (iwl_is_ready_rf(priv)) { 2868 if (iwl_is_ready_rf(priv)) {
3092 iwl_scan_cancel_timeout(priv, 100); 2869 iwl_scan_cancel_timeout(priv, 100);
3093 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2870 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3094 iwl4965_commit_rxon(priv); 2871 iwl_commit_rxon(priv);
3095 } 2872 }
3096 if (priv->vif == conf->vif) { 2873 if (priv->vif == conf->vif) {
3097 priv->vif = NULL; 2874 priv->vif = NULL;
3098 memset(priv->bssid, 0, ETH_ALEN); 2875 memset(priv->bssid, 0, ETH_ALEN);
3099 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
3100 priv->essid_len = 0;
3101 } 2876 }
3102 mutex_unlock(&priv->mutex); 2877 mutex_unlock(&priv->mutex);
3103 2878
@@ -3106,7 +2881,7 @@ static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
3106} 2881}
3107 2882
3108#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 2883#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
3109static void iwl4965_bss_info_changed(struct ieee80211_hw *hw, 2884static void iwl_bss_info_changed(struct ieee80211_hw *hw,
3110 struct ieee80211_vif *vif, 2885 struct ieee80211_vif *vif,
3111 struct ieee80211_bss_conf *bss_conf, 2886 struct ieee80211_bss_conf *bss_conf,
3112 u32 changes) 2887 u32 changes)
@@ -3133,8 +2908,7 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
3133 } 2908 }
3134 2909
3135 if (changes & BSS_CHANGED_HT) { 2910 if (changes & BSS_CHANGED_HT) {
3136 IWL_DEBUG_MAC80211("HT %d\n", bss_conf->assoc_ht); 2911 iwl_ht_conf(priv, bss_conf);
3137 iwl4965_ht_conf(priv, bss_conf);
3138 iwl_set_rxon_chain(priv); 2912 iwl_set_rxon_chain(priv);
3139 } 2913 }
3140 2914
@@ -3157,7 +2931,7 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
3157 priv->next_scan_jiffies = jiffies + 2931 priv->next_scan_jiffies = jiffies +
3158 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC; 2932 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
3159 mutex_lock(&priv->mutex); 2933 mutex_lock(&priv->mutex);
3160 iwl4965_post_associate(priv); 2934 iwl_post_associate(priv);
3161 mutex_unlock(&priv->mutex); 2935 mutex_unlock(&priv->mutex);
3162 } else { 2936 } else {
3163 priv->assoc_id = 0; 2937 priv->assoc_id = 0;
@@ -3187,12 +2961,6 @@ static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t ssid_len)
3187 goto out_unlock; 2961 goto out_unlock;
3188 } 2962 }
3189 2963
3190 if (priv->iw_mode == NL80211_IFTYPE_AP) { /* APs don't scan */
3191 ret = -EIO;
3192 IWL_ERROR("ERROR: APs don't scan\n");
3193 goto out_unlock;
3194 }
3195
3196 /* We don't schedule scan within next_scan_jiffies period. 2964 /* We don't schedule scan within next_scan_jiffies period.
3197 * Avoid scanning during possible EAPOL exchange, return 2965 * Avoid scanning during possible EAPOL exchange, return
3198 * success immediately. 2966 * success immediately.
@@ -3233,64 +3001,24 @@ out_unlock:
3233 return ret; 3001 return ret;
3234} 3002}
3235 3003
3236static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, 3004static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
3237 struct ieee80211_key_conf *keyconf, const u8 *addr, 3005 struct ieee80211_key_conf *keyconf, const u8 *addr,
3238 u32 iv32, u16 *phase1key) 3006 u32 iv32, u16 *phase1key)
3239{ 3007{
3240 struct iwl_priv *priv = hw->priv;
3241 u8 sta_id = IWL_INVALID_STATION;
3242 unsigned long flags;
3243 __le16 key_flags = 0;
3244 int i;
3245 DECLARE_MAC_BUF(mac);
3246 3008
3009 struct iwl_priv *priv = hw->priv;
3247 IWL_DEBUG_MAC80211("enter\n"); 3010 IWL_DEBUG_MAC80211("enter\n");
3248 3011
3249 sta_id = iwl_find_station(priv, addr); 3012 iwl_update_tkip_key(priv, keyconf, addr, iv32, phase1key);
3250 if (sta_id == IWL_INVALID_STATION) {
3251 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
3252 print_mac(mac, addr));
3253 return;
3254 }
3255
3256 if (iwl_scan_cancel(priv)) {
3257 /* cancel scan failed, just live w/ bad key and rely
3258 briefly on SW decryption */
3259 return;
3260 }
3261
3262 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
3263 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
3264 key_flags &= ~STA_KEY_FLG_INVALID;
3265
3266 if (sta_id == priv->hw_params.bcast_sta_id)
3267 key_flags |= STA_KEY_MULTICAST_MSK;
3268
3269 spin_lock_irqsave(&priv->sta_lock, flags);
3270
3271 priv->stations[sta_id].sta.key.key_flags = key_flags;
3272 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
3273
3274 for (i = 0; i < 5; i++)
3275 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
3276 cpu_to_le16(phase1key[i]);
3277
3278 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
3279 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3280
3281 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
3282
3283 spin_unlock_irqrestore(&priv->sta_lock, flags);
3284 3013
3285 IWL_DEBUG_MAC80211("leave\n"); 3014 IWL_DEBUG_MAC80211("leave\n");
3286} 3015}
3287 3016
3288static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3017static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3289 const u8 *local_addr, const u8 *addr, 3018 const u8 *local_addr, const u8 *addr,
3290 struct ieee80211_key_conf *key) 3019 struct ieee80211_key_conf *key)
3291{ 3020{
3292 struct iwl_priv *priv = hw->priv; 3021 struct iwl_priv *priv = hw->priv;
3293 DECLARE_MAC_BUF(mac);
3294 int ret = 0; 3022 int ret = 0;
3295 u8 sta_id = IWL_INVALID_STATION; 3023 u8 sta_id = IWL_INVALID_STATION;
3296 u8 is_default_wep_key = 0; 3024 u8 is_default_wep_key = 0;
@@ -3308,8 +3036,8 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3308 3036
3309 sta_id = iwl_find_station(priv, addr); 3037 sta_id = iwl_find_station(priv, addr);
3310 if (sta_id == IWL_INVALID_STATION) { 3038 if (sta_id == IWL_INVALID_STATION) {
3311 IWL_DEBUG_MAC80211("leave - %s not in station map.\n", 3039 IWL_DEBUG_MAC80211("leave - %pM not in station map.\n",
3312 print_mac(mac, addr)); 3040 addr);
3313 return -EINVAL; 3041 return -EINVAL;
3314 3042
3315 } 3043 }
@@ -3357,7 +3085,7 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3357 return ret; 3085 return ret;
3358} 3086}
3359 3087
3360static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 3088static int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
3361 const struct ieee80211_tx_queue_params *params) 3089 const struct ieee80211_tx_queue_params *params)
3362{ 3090{
3363 struct iwl_priv *priv = hw->priv; 3091 struct iwl_priv *priv = hw->priv;
@@ -3376,11 +3104,6 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
3376 return 0; 3104 return 0;
3377 } 3105 }
3378 3106
3379 if (!priv->qos_data.qos_enable) {
3380 priv->qos_data.qos_active = 0;
3381 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
3382 return 0;
3383 }
3384 q = AC_NUM - 1 - queue; 3107 q = AC_NUM - 1 - queue;
3385 3108
3386 spin_lock_irqsave(&priv->lock, flags); 3109 spin_lock_irqsave(&priv->lock, flags);
@@ -3405,15 +3128,14 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
3405 return 0; 3128 return 0;
3406} 3129}
3407 3130
3408static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 3131static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3409 enum ieee80211_ampdu_mlme_action action, 3132 enum ieee80211_ampdu_mlme_action action,
3410 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 3133 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3411{ 3134{
3412 struct iwl_priv *priv = hw->priv; 3135 struct iwl_priv *priv = hw->priv;
3413 DECLARE_MAC_BUF(mac);
3414 3136
3415 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n", 3137 IWL_DEBUG_HT("A-MPDU action on addr %pM tid %d\n",
3416 print_mac(mac, sta->addr), tid); 3138 sta->addr, tid);
3417 3139
3418 if (!(priv->cfg->sku & IWL_SKU_N)) 3140 if (!(priv->cfg->sku & IWL_SKU_N))
3419 return -EACCES; 3141 return -EACCES;
@@ -3421,10 +3143,10 @@ static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
3421 switch (action) { 3143 switch (action) {
3422 case IEEE80211_AMPDU_RX_START: 3144 case IEEE80211_AMPDU_RX_START:
3423 IWL_DEBUG_HT("start Rx\n"); 3145 IWL_DEBUG_HT("start Rx\n");
3424 return iwl_rx_agg_start(priv, sta->addr, tid, *ssn); 3146 return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn);
3425 case IEEE80211_AMPDU_RX_STOP: 3147 case IEEE80211_AMPDU_RX_STOP:
3426 IWL_DEBUG_HT("stop Rx\n"); 3148 IWL_DEBUG_HT("stop Rx\n");
3427 return iwl_rx_agg_stop(priv, sta->addr, tid); 3149 return iwl_sta_rx_agg_stop(priv, sta->addr, tid);
3428 case IEEE80211_AMPDU_TX_START: 3150 case IEEE80211_AMPDU_TX_START:
3429 IWL_DEBUG_HT("start Tx\n"); 3151 IWL_DEBUG_HT("start Tx\n");
3430 return iwl_tx_agg_start(priv, sta->addr, tid, ssn); 3152 return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
@@ -3438,7 +3160,8 @@ static int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
3438 } 3160 }
3439 return 0; 3161 return 0;
3440} 3162}
3441static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw, 3163
3164static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
3442 struct ieee80211_tx_queue_stats *stats) 3165 struct ieee80211_tx_queue_stats *stats)
3443{ 3166{
3444 struct iwl_priv *priv = hw->priv; 3167 struct iwl_priv *priv = hw->priv;
@@ -3473,7 +3196,7 @@ static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
3473 return 0; 3196 return 0;
3474} 3197}
3475 3198
3476static int iwl4965_mac_get_stats(struct ieee80211_hw *hw, 3199static int iwl_mac_get_stats(struct ieee80211_hw *hw,
3477 struct ieee80211_low_level_stats *stats) 3200 struct ieee80211_low_level_stats *stats)
3478{ 3201{
3479 struct iwl_priv *priv = hw->priv; 3202 struct iwl_priv *priv = hw->priv;
@@ -3485,7 +3208,7 @@ static int iwl4965_mac_get_stats(struct ieee80211_hw *hw,
3485 return 0; 3208 return 0;
3486} 3209}
3487 3210
3488static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw) 3211static void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
3489{ 3212{
3490 struct iwl_priv *priv = hw->priv; 3213 struct iwl_priv *priv = hw->priv;
3491 unsigned long flags; 3214 unsigned long flags;
@@ -3529,7 +3252,7 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
3529 if (priv->iw_mode != NL80211_IFTYPE_AP) { 3252 if (priv->iw_mode != NL80211_IFTYPE_AP) {
3530 iwl_scan_cancel_timeout(priv, 100); 3253 iwl_scan_cancel_timeout(priv, 100);
3531 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 3254 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3532 iwl4965_commit_rxon(priv); 3255 iwl_commit_rxon(priv);
3533 } 3256 }
3534 3257
3535 iwl_power_update_mode(priv, 0); 3258 iwl_power_update_mode(priv, 0);
@@ -3552,31 +3275,28 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
3552 return; 3275 return;
3553 } 3276 }
3554 3277
3555 iwl4965_set_rate(priv); 3278 iwl_set_rate(priv);
3556 3279
3557 mutex_unlock(&priv->mutex); 3280 mutex_unlock(&priv->mutex);
3558 3281
3559 IWL_DEBUG_MAC80211("leave\n"); 3282 IWL_DEBUG_MAC80211("leave\n");
3560} 3283}
3561 3284
3562static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) 3285static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
3563{ 3286{
3564 struct iwl_priv *priv = hw->priv; 3287 struct iwl_priv *priv = hw->priv;
3565 unsigned long flags; 3288 unsigned long flags;
3566 __le64 timestamp; 3289 __le64 timestamp;
3567 3290
3568 mutex_lock(&priv->mutex);
3569 IWL_DEBUG_MAC80211("enter\n"); 3291 IWL_DEBUG_MAC80211("enter\n");
3570 3292
3571 if (!iwl_is_ready_rf(priv)) { 3293 if (!iwl_is_ready_rf(priv)) {
3572 IWL_DEBUG_MAC80211("leave - RF not ready\n"); 3294 IWL_DEBUG_MAC80211("leave - RF not ready\n");
3573 mutex_unlock(&priv->mutex);
3574 return -EIO; 3295 return -EIO;
3575 } 3296 }
3576 3297
3577 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { 3298 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
3578 IWL_DEBUG_MAC80211("leave - not IBSS\n"); 3299 IWL_DEBUG_MAC80211("leave - not IBSS\n");
3579 mutex_unlock(&priv->mutex);
3580 return -EIO; 3300 return -EIO;
3581 } 3301 }
3582 3302
@@ -3596,9 +3316,8 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
3596 3316
3597 iwl_reset_qos(priv); 3317 iwl_reset_qos(priv);
3598 3318
3599 iwl4965_post_associate(priv); 3319 iwl_post_associate(priv);
3600 3320
3601 mutex_unlock(&priv->mutex);
3602 3321
3603 return 0; 3322 return 0;
3604} 3323}
@@ -3613,7 +3332,7 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
3613 3332
3614/* 3333/*
3615 * The following adds a new attribute to the sysfs representation 3334 * The following adds a new attribute to the sysfs representation
3616 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) 3335 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
3617 * used for controlling the debug level. 3336 * used for controlling the debug level.
3618 * 3337 *
3619 * See the level definitions in iwl for details. 3338 * See the level definitions in iwl for details.
@@ -3699,7 +3418,11 @@ static ssize_t show_tx_power(struct device *d,
3699 struct device_attribute *attr, char *buf) 3418 struct device_attribute *attr, char *buf)
3700{ 3419{
3701 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; 3420 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
3702 return sprintf(buf, "%d\n", priv->tx_power_user_lmt); 3421
3422 if (!iwl_is_ready_rf(priv))
3423 return sprintf(buf, "off\n");
3424 else
3425 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
3703} 3426}
3704 3427
3705static ssize_t store_tx_power(struct device *d, 3428static ssize_t store_tx_power(struct device *d,
@@ -3750,7 +3473,7 @@ static ssize_t store_flags(struct device *d,
3750 else { 3473 else {
3751 IWL_DEBUG_INFO("Commit rxon.flags = 0x%04X\n", flags); 3474 IWL_DEBUG_INFO("Commit rxon.flags = 0x%04X\n", flags);
3752 priv->staging_rxon.flags = cpu_to_le32(flags); 3475 priv->staging_rxon.flags = cpu_to_le32(flags);
3753 iwl4965_commit_rxon(priv); 3476 iwl_commit_rxon(priv);
3754 } 3477 }
3755 } 3478 }
3756 mutex_unlock(&priv->mutex); 3479 mutex_unlock(&priv->mutex);
@@ -3791,7 +3514,7 @@ static ssize_t store_filter_flags(struct device *d,
3791 "0x%04X\n", filter_flags); 3514 "0x%04X\n", filter_flags);
3792 priv->staging_rxon.filter_flags = 3515 priv->staging_rxon.filter_flags =
3793 cpu_to_le32(filter_flags); 3516 cpu_to_le32(filter_flags);
3794 iwl4965_commit_rxon(priv); 3517 iwl_commit_rxon(priv);
3795 } 3518 }
3796 } 3519 }
3797 mutex_unlock(&priv->mutex); 3520 mutex_unlock(&priv->mutex);
@@ -3802,79 +3525,6 @@ static ssize_t store_filter_flags(struct device *d,
3802static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 3525static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
3803 store_filter_flags); 3526 store_filter_flags);
3804 3527
3805#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
3806
3807static ssize_t show_measurement(struct device *d,
3808 struct device_attribute *attr, char *buf)
3809{
3810 struct iwl_priv *priv = dev_get_drvdata(d);
3811 struct iwl4965_spectrum_notification measure_report;
3812 u32 size = sizeof(measure_report), len = 0, ofs = 0;
3813 u8 *data = (u8 *)&measure_report;
3814 unsigned long flags;
3815
3816 spin_lock_irqsave(&priv->lock, flags);
3817 if (!(priv->measurement_status & MEASUREMENT_READY)) {
3818 spin_unlock_irqrestore(&priv->lock, flags);
3819 return 0;
3820 }
3821 memcpy(&measure_report, &priv->measure_report, size);
3822 priv->measurement_status = 0;
3823 spin_unlock_irqrestore(&priv->lock, flags);
3824
3825 while (size && (PAGE_SIZE - len)) {
3826 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3827 PAGE_SIZE - len, 1);
3828 len = strlen(buf);
3829 if (PAGE_SIZE - len)
3830 buf[len++] = '\n';
3831
3832 ofs += 16;
3833 size -= min(size, 16U);
3834 }
3835
3836 return len;
3837}
3838
3839static ssize_t store_measurement(struct device *d,
3840 struct device_attribute *attr,
3841 const char *buf, size_t count)
3842{
3843 struct iwl_priv *priv = dev_get_drvdata(d);
3844 struct ieee80211_measurement_params params = {
3845 .channel = le16_to_cpu(priv->active_rxon.channel),
3846 .start_time = cpu_to_le64(priv->last_tsf),
3847 .duration = cpu_to_le16(1),
3848 };
3849 u8 type = IWL_MEASURE_BASIC;
3850 u8 buffer[32];
3851 u8 channel;
3852
3853 if (count) {
3854 char *p = buffer;
3855 strncpy(buffer, buf, min(sizeof(buffer), count));
3856 channel = simple_strtoul(p, NULL, 0);
3857 if (channel)
3858 params.channel = channel;
3859
3860 p = buffer;
3861 while (*p && *p != ' ')
3862 p++;
3863 if (*p)
3864 type = simple_strtoul(p + 1, NULL, 0);
3865 }
3866
3867 IWL_DEBUG_INFO("Invoking measurement of type %d on "
3868 "channel %d (for '%s')\n", type, params.channel, buf);
3869 iwl4965_get_measurement(priv, &params, type);
3870
3871 return count;
3872}
3873
3874static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
3875 show_measurement, store_measurement);
3876#endif /* CONFIG_IWLAGN_SPECTRUM_MEASUREMENT */
3877
3878static ssize_t store_retry_rate(struct device *d, 3528static ssize_t store_retry_rate(struct device *d,
3879 struct device_attribute *attr, 3529 struct device_attribute *attr,
3880 const char *buf, size_t count) 3530 const char *buf, size_t count)
@@ -3953,7 +3603,8 @@ static ssize_t show_power_level(struct device *d,
3953 break; 3603 break;
3954 } 3604 }
3955 3605
3956 p += sprintf(p, "\tMODE:%s", (mode < IWL_POWER_AUTO)?"fixed":"auto"); 3606 p += sprintf(p, "\tMODE:%s", (mode < IWL_POWER_AUTO) ?
3607 "fixed" : "auto");
3957 p += sprintf(p, "\tINDEX:%d", level); 3608 p += sprintf(p, "\tINDEX:%d", level);
3958 p += sprintf(p, "\n"); 3609 p += sprintf(p, "\n");
3959 return p - buf + 1; 3610 return p - buf + 1;
@@ -3962,68 +3613,6 @@ static ssize_t show_power_level(struct device *d,
3962static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, 3613static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
3963 store_power_level); 3614 store_power_level);
3964 3615
3965static ssize_t show_channels(struct device *d,
3966 struct device_attribute *attr, char *buf)
3967{
3968
3969 struct iwl_priv *priv = dev_get_drvdata(d);
3970 struct ieee80211_channel *channels = NULL;
3971 const struct ieee80211_supported_band *supp_band = NULL;
3972 int len = 0, i;
3973 int count = 0;
3974
3975 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
3976 return -EAGAIN;
3977
3978 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
3979 channels = supp_band->channels;
3980 count = supp_band->n_channels;
3981
3982 len += sprintf(&buf[len],
3983 "Displaying %d channels in 2.4GHz band "
3984 "(802.11bg):\n", count);
3985
3986 for (i = 0; i < count; i++)
3987 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
3988 ieee80211_frequency_to_channel(
3989 channels[i].center_freq),
3990 channels[i].max_power,
3991 channels[i].flags & IEEE80211_CHAN_RADAR ?
3992 " (IEEE 802.11h required)" : "",
3993 (!(channels[i].flags & IEEE80211_CHAN_NO_IBSS)
3994 || (channels[i].flags &
3995 IEEE80211_CHAN_RADAR)) ? "" :
3996 ", IBSS",
3997 channels[i].flags &
3998 IEEE80211_CHAN_PASSIVE_SCAN ?
3999 "passive only" : "active/passive");
4000
4001 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
4002 channels = supp_band->channels;
4003 count = supp_band->n_channels;
4004
4005 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
4006 "(802.11a):\n", count);
4007
4008 for (i = 0; i < count; i++)
4009 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
4010 ieee80211_frequency_to_channel(
4011 channels[i].center_freq),
4012 channels[i].max_power,
4013 channels[i].flags & IEEE80211_CHAN_RADAR ?
4014 " (IEEE 802.11h required)" : "",
4015 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
4016 || (channels[i].flags &
4017 IEEE80211_CHAN_RADAR)) ? "" :
4018 ", IBSS",
4019 channels[i].flags &
4020 IEEE80211_CHAN_PASSIVE_SCAN ?
4021 "passive only" : "active/passive");
4022
4023 return len;
4024}
4025
4026static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
4027 3616
4028static ssize_t show_statistics(struct device *d, 3617static ssize_t show_statistics(struct device *d,
4029 struct device_attribute *attr, char *buf) 3618 struct device_attribute *attr, char *buf)
@@ -4086,12 +3675,11 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
4086 3675
4087 init_waitqueue_head(&priv->wait_command_queue); 3676 init_waitqueue_head(&priv->wait_command_queue);
4088 3677
4089 INIT_WORK(&priv->up, iwl4965_bg_up); 3678 INIT_WORK(&priv->up, iwl_bg_up);
4090 INIT_WORK(&priv->restart, iwl4965_bg_restart); 3679 INIT_WORK(&priv->restart, iwl_bg_restart);
4091 INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish); 3680 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
4092 INIT_WORK(&priv->rf_kill, iwl4965_bg_rf_kill); 3681 INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill);
4093 INIT_WORK(&priv->beacon_update, iwl4965_bg_beacon_update); 3682 INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
4094 INIT_WORK(&priv->set_monitor, iwl4965_bg_set_monitor);
4095 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work); 3683 INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
4096 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); 3684 INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start);
4097 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); 3685 INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start);
@@ -4104,10 +3692,10 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
4104 3692
4105 init_timer(&priv->statistics_periodic); 3693 init_timer(&priv->statistics_periodic);
4106 priv->statistics_periodic.data = (unsigned long)priv; 3694 priv->statistics_periodic.data = (unsigned long)priv;
4107 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic; 3695 priv->statistics_periodic.function = iwl_bg_statistics_periodic;
4108 3696
4109 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3697 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
4110 iwl4965_irq_tasklet, (unsigned long)priv); 3698 iwl_irq_tasklet, (unsigned long)priv);
4111} 3699}
4112 3700
4113static void iwl_cancel_deferred_work(struct iwl_priv *priv) 3701static void iwl_cancel_deferred_work(struct iwl_priv *priv)
@@ -4123,13 +3711,9 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
4123 del_timer_sync(&priv->statistics_periodic); 3711 del_timer_sync(&priv->statistics_periodic);
4124} 3712}
4125 3713
4126static struct attribute *iwl4965_sysfs_entries[] = { 3714static struct attribute *iwl_sysfs_entries[] = {
4127 &dev_attr_channels.attr,
4128 &dev_attr_flags.attr, 3715 &dev_attr_flags.attr,
4129 &dev_attr_filter_flags.attr, 3716 &dev_attr_filter_flags.attr,
4130#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
4131 &dev_attr_measurement.attr,
4132#endif
4133 &dev_attr_power_level.attr, 3717 &dev_attr_power_level.attr,
4134 &dev_attr_retry_rate.attr, 3718 &dev_attr_retry_rate.attr,
4135 &dev_attr_statistics.attr, 3719 &dev_attr_statistics.attr,
@@ -4144,39 +3728,38 @@ static struct attribute *iwl4965_sysfs_entries[] = {
4144 NULL 3728 NULL
4145}; 3729};
4146 3730
4147static struct attribute_group iwl4965_attribute_group = { 3731static struct attribute_group iwl_attribute_group = {
4148 .name = NULL, /* put in device directory */ 3732 .name = NULL, /* put in device directory */
4149 .attrs = iwl4965_sysfs_entries, 3733 .attrs = iwl_sysfs_entries,
4150}; 3734};
4151 3735
4152static struct ieee80211_ops iwl4965_hw_ops = { 3736static struct ieee80211_ops iwl_hw_ops = {
4153 .tx = iwl4965_mac_tx, 3737 .tx = iwl_mac_tx,
4154 .start = iwl4965_mac_start, 3738 .start = iwl_mac_start,
4155 .stop = iwl4965_mac_stop, 3739 .stop = iwl_mac_stop,
4156 .add_interface = iwl4965_mac_add_interface, 3740 .add_interface = iwl_mac_add_interface,
4157 .remove_interface = iwl4965_mac_remove_interface, 3741 .remove_interface = iwl_mac_remove_interface,
4158 .config = iwl4965_mac_config, 3742 .config = iwl_mac_config,
4159 .config_interface = iwl4965_mac_config_interface, 3743 .config_interface = iwl_mac_config_interface,
4160 .configure_filter = iwl4965_configure_filter, 3744 .configure_filter = iwl_configure_filter,
4161 .set_key = iwl4965_mac_set_key, 3745 .set_key = iwl_mac_set_key,
4162 .update_tkip_key = iwl4965_mac_update_tkip_key, 3746 .update_tkip_key = iwl_mac_update_tkip_key,
4163 .get_stats = iwl4965_mac_get_stats, 3747 .get_stats = iwl_mac_get_stats,
4164 .get_tx_stats = iwl4965_mac_get_tx_stats, 3748 .get_tx_stats = iwl_mac_get_tx_stats,
4165 .conf_tx = iwl4965_mac_conf_tx, 3749 .conf_tx = iwl_mac_conf_tx,
4166 .reset_tsf = iwl4965_mac_reset_tsf, 3750 .reset_tsf = iwl_mac_reset_tsf,
4167 .bss_info_changed = iwl4965_bss_info_changed, 3751 .bss_info_changed = iwl_bss_info_changed,
4168 .ampdu_action = iwl4965_mac_ampdu_action, 3752 .ampdu_action = iwl_mac_ampdu_action,
4169 .hw_scan = iwl_mac_hw_scan 3753 .hw_scan = iwl_mac_hw_scan
4170}; 3754};
4171 3755
4172static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3756static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4173{ 3757{
4174 int err = 0; 3758 int err = 0;
4175 struct iwl_priv *priv; 3759 struct iwl_priv *priv;
4176 struct ieee80211_hw *hw; 3760 struct ieee80211_hw *hw;
4177 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 3761 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
4178 unsigned long flags; 3762 unsigned long flags;
4179 DECLARE_MAC_BUF(mac);
4180 3763
4181 /************************ 3764 /************************
4182 * 1. Allocating HW data 3765 * 1. Allocating HW data
@@ -4188,10 +3771,10 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4188 if (cfg->mod_params->debug & IWL_DL_INFO) 3771 if (cfg->mod_params->debug & IWL_DL_INFO)
4189 dev_printk(KERN_DEBUG, &(pdev->dev), 3772 dev_printk(KERN_DEBUG, &(pdev->dev),
4190 "Disabling hw_scan\n"); 3773 "Disabling hw_scan\n");
4191 iwl4965_hw_ops.hw_scan = NULL; 3774 iwl_hw_ops.hw_scan = NULL;
4192 } 3775 }
4193 3776
4194 hw = iwl_alloc_all(cfg, &iwl4965_hw_ops); 3777 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
4195 if (!hw) { 3778 if (!hw) {
4196 err = -ENOMEM; 3779 err = -ENOMEM;
4197 goto out; 3780 goto out;
@@ -4285,7 +3868,7 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4285 3868
4286 /* extract MAC Address */ 3869 /* extract MAC Address */
4287 iwl_eeprom_get_mac(priv, priv->mac_addr); 3870 iwl_eeprom_get_mac(priv, priv->mac_addr);
4288 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr)); 3871 IWL_DEBUG_INFO("MAC address: %pM\n", priv->mac_addr);
4289 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); 3872 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
4290 3873
4291 /************************ 3874 /************************
@@ -4319,10 +3902,10 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4319 * 8. Setup services 3902 * 8. Setup services
4320 ********************/ 3903 ********************/
4321 spin_lock_irqsave(&priv->lock, flags); 3904 spin_lock_irqsave(&priv->lock, flags);
4322 iwl4965_disable_interrupts(priv); 3905 iwl_disable_interrupts(priv);
4323 spin_unlock_irqrestore(&priv->lock, flags); 3906 spin_unlock_irqrestore(&priv->lock, flags);
4324 3907
4325 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group); 3908 err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group);
4326 if (err) { 3909 if (err) {
4327 IWL_ERROR("failed to create sysfs device attributes\n"); 3910 IWL_ERROR("failed to create sysfs device attributes\n");
4328 goto out_uninit_drv; 3911 goto out_uninit_drv;
@@ -4358,7 +3941,7 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4358 return 0; 3941 return 0;
4359 3942
4360 out_remove_sysfs: 3943 out_remove_sysfs:
4361 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group); 3944 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
4362 out_uninit_drv: 3945 out_uninit_drv:
4363 iwl_uninit_drv(priv); 3946 iwl_uninit_drv(priv);
4364 out_free_eeprom: 3947 out_free_eeprom:
@@ -4376,7 +3959,7 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4376 return err; 3959 return err;
4377} 3960}
4378 3961
4379static void __devexit iwl4965_pci_remove(struct pci_dev *pdev) 3962static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4380{ 3963{
4381 struct iwl_priv *priv = pci_get_drvdata(pdev); 3964 struct iwl_priv *priv = pci_get_drvdata(pdev);
4382 unsigned long flags; 3965 unsigned long flags;
@@ -4387,10 +3970,10 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
4387 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); 3970 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
4388 3971
4389 iwl_dbgfs_unregister(priv); 3972 iwl_dbgfs_unregister(priv);
4390 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group); 3973 sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
4391 3974
4392 /* ieee80211_unregister_hw call wil cause iwl4965_mac_stop to 3975 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
4393 * to be called and iwl4965_down since we are removing the device 3976 * to be called and iwl_down since we are removing the device
4394 * we need to set STATUS_EXIT_PENDING bit. 3977 * we need to set STATUS_EXIT_PENDING bit.
4395 */ 3978 */
4396 set_bit(STATUS_EXIT_PENDING, &priv->status); 3979 set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -4398,20 +3981,20 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
4398 ieee80211_unregister_hw(priv->hw); 3981 ieee80211_unregister_hw(priv->hw);
4399 priv->mac80211_registered = 0; 3982 priv->mac80211_registered = 0;
4400 } else { 3983 } else {
4401 iwl4965_down(priv); 3984 iwl_down(priv);
4402 } 3985 }
4403 3986
4404 /* make sure we flush any pending irq or 3987 /* make sure we flush any pending irq or
4405 * tasklet for the driver 3988 * tasklet for the driver
4406 */ 3989 */
4407 spin_lock_irqsave(&priv->lock, flags); 3990 spin_lock_irqsave(&priv->lock, flags);
4408 iwl4965_disable_interrupts(priv); 3991 iwl_disable_interrupts(priv);
4409 spin_unlock_irqrestore(&priv->lock, flags); 3992 spin_unlock_irqrestore(&priv->lock, flags);
4410 3993
4411 iwl_synchronize_irq(priv); 3994 iwl_synchronize_irq(priv);
4412 3995
4413 iwl_rfkill_unregister(priv); 3996 iwl_rfkill_unregister(priv);
4414 iwl4965_dealloc_ucode_pci(priv); 3997 iwl_dealloc_ucode_pci(priv);
4415 3998
4416 if (priv->rxq.bd) 3999 if (priv->rxq.bd)
4417 iwl_rx_queue_free(priv, &priv->rxq); 4000 iwl_rx_queue_free(priv, &priv->rxq);
@@ -4424,7 +4007,7 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
4424 /*netif_stop_queue(dev); */ 4007 /*netif_stop_queue(dev); */
4425 flush_workqueue(priv->workqueue); 4008 flush_workqueue(priv->workqueue);
4426 4009
4427 /* ieee80211_unregister_hw calls iwl4965_mac_stop, which flushes 4010 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
4428 * priv->workqueue... so we can't take down the workqueue 4011 * priv->workqueue... so we can't take down the workqueue
4429 * until now... */ 4012 * until now... */
4430 destroy_workqueue(priv->workqueue); 4013 destroy_workqueue(priv->workqueue);
@@ -4445,13 +4028,13 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
4445 4028
4446#ifdef CONFIG_PM 4029#ifdef CONFIG_PM
4447 4030
4448static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state) 4031static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4449{ 4032{
4450 struct iwl_priv *priv = pci_get_drvdata(pdev); 4033 struct iwl_priv *priv = pci_get_drvdata(pdev);
4451 4034
4452 if (priv->is_open) { 4035 if (priv->is_open) {
4453 set_bit(STATUS_IN_SUSPEND, &priv->status); 4036 set_bit(STATUS_IN_SUSPEND, &priv->status);
4454 iwl4965_mac_stop(priv->hw); 4037 iwl_mac_stop(priv->hw);
4455 priv->is_open = 1; 4038 priv->is_open = 1;
4456 } 4039 }
4457 4040
@@ -4460,14 +4043,14 @@ static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4460 return 0; 4043 return 0;
4461} 4044}
4462 4045
4463static int iwl4965_pci_resume(struct pci_dev *pdev) 4046static int iwl_pci_resume(struct pci_dev *pdev)
4464{ 4047{
4465 struct iwl_priv *priv = pci_get_drvdata(pdev); 4048 struct iwl_priv *priv = pci_get_drvdata(pdev);
4466 4049
4467 pci_set_power_state(pdev, PCI_D0); 4050 pci_set_power_state(pdev, PCI_D0);
4468 4051
4469 if (priv->is_open) 4052 if (priv->is_open)
4470 iwl4965_mac_start(priv->hw); 4053 iwl_mac_start(priv->hw);
4471 4054
4472 clear_bit(STATUS_IN_SUSPEND, &priv->status); 4055 clear_bit(STATUS_IN_SUSPEND, &priv->status);
4473 return 0; 4056 return 0;
@@ -4502,7 +4085,11 @@ static struct pci_device_id iwl_hw_card_ids[] = {
4502 {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, 4085 {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)},
4503 {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, 4086 {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)},
4504 {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, 4087 {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)},
4088/* 5150 Wifi/WiMax */
4089 {IWL_PCI_DEVICE(0x423C, PCI_ANY_ID, iwl5150_agn_cfg)},
4090 {IWL_PCI_DEVICE(0x423D, PCI_ANY_ID, iwl5150_agn_cfg)},
4505#endif /* CONFIG_IWL5000 */ 4091#endif /* CONFIG_IWL5000 */
4092
4506 {0} 4093 {0}
4507}; 4094};
4508MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); 4095MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
@@ -4510,15 +4097,15 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
4510static struct pci_driver iwl_driver = { 4097static struct pci_driver iwl_driver = {
4511 .name = DRV_NAME, 4098 .name = DRV_NAME,
4512 .id_table = iwl_hw_card_ids, 4099 .id_table = iwl_hw_card_ids,
4513 .probe = iwl4965_pci_probe, 4100 .probe = iwl_pci_probe,
4514 .remove = __devexit_p(iwl4965_pci_remove), 4101 .remove = __devexit_p(iwl_pci_remove),
4515#ifdef CONFIG_PM 4102#ifdef CONFIG_PM
4516 .suspend = iwl4965_pci_suspend, 4103 .suspend = iwl_pci_suspend,
4517 .resume = iwl4965_pci_resume, 4104 .resume = iwl_pci_resume,
4518#endif 4105#endif
4519}; 4106};
4520 4107
4521static int __init iwl4965_init(void) 4108static int __init iwl_init(void)
4522{ 4109{
4523 4110
4524 int ret; 4111 int ret;
@@ -4544,11 +4131,11 @@ error_register:
4544 return ret; 4131 return ret;
4545} 4132}
4546 4133
4547static void __exit iwl4965_exit(void) 4134static void __exit iwl_exit(void)
4548{ 4135{
4549 pci_unregister_driver(&iwl_driver); 4136 pci_unregister_driver(&iwl_driver);
4550 iwlagn_rate_control_unregister(); 4137 iwlagn_rate_control_unregister();
4551} 4138}
4552 4139
4553module_exit(iwl4965_exit); 4140module_exit(iwl_exit);
4554module_init(iwl4965_init); 4141module_init(iwl_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 72fbf47229db..f836ecc55758 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -70,7 +70,16 @@
70 * INIT calibrations framework 70 * INIT calibrations framework
71 *****************************************************************************/ 71 *****************************************************************************/
72 72
73 int iwl_send_calib_results(struct iwl_priv *priv) 73struct statistics_general_data {
74 u32 beacon_silence_rssi_a;
75 u32 beacon_silence_rssi_b;
76 u32 beacon_silence_rssi_c;
77 u32 beacon_energy_a;
78 u32 beacon_energy_b;
79 u32 beacon_energy_c;
80};
81
82int iwl_send_calib_results(struct iwl_priv *priv)
74{ 83{
75 int ret = 0; 84 int ret = 0;
76 int i = 0; 85 int i = 0;
@@ -80,14 +89,16 @@
80 .meta.flags = CMD_SIZE_HUGE, 89 .meta.flags = CMD_SIZE_HUGE,
81 }; 90 };
82 91
83 for (i = 0; i < IWL_CALIB_MAX; i++) 92 for (i = 0; i < IWL_CALIB_MAX; i++) {
84 if (priv->calib_results[i].buf) { 93 if ((BIT(i) & priv->hw_params.calib_init_cfg) &&
94 priv->calib_results[i].buf) {
85 hcmd.len = priv->calib_results[i].buf_len; 95 hcmd.len = priv->calib_results[i].buf_len;
86 hcmd.data = priv->calib_results[i].buf; 96 hcmd.data = priv->calib_results[i].buf;
87 ret = iwl_send_cmd_sync(priv, &hcmd); 97 ret = iwl_send_cmd_sync(priv, &hcmd);
88 if (ret) 98 if (ret)
89 goto err; 99 goto err;
90 } 100 }
101 }
91 102
92 return 0; 103 return 0;
93err: 104err:
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h
index 94c8e316382a..1abe84bb74ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 8d04e966ad48..52966ffbef6e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -66,8 +66,14 @@
66 * Please use iwl-dev.h for driver implementation definitions. 66 * Please use iwl-dev.h for driver implementation definitions.
67 */ 67 */
68 68
69#ifndef __iwl4965_commands_h__ 69#ifndef __iwl_commands_h__
70#define __iwl4965_commands_h__ 70#define __iwl_commands_h__
71
72/* uCode version contains 4 values: Major/Minor/API/Serial */
73#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
74#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
75#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
76#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
71 77
72enum { 78enum {
73 REPLY_ALIVE = 0x1, 79 REPLY_ALIVE = 0x1,
@@ -88,6 +94,7 @@ enum {
88 REPLY_WEPKEY = 0x20, 94 REPLY_WEPKEY = 0x20,
89 95
90 /* RX, TX, LEDs */ 96 /* RX, TX, LEDs */
97 REPLY_3945_RX = 0x1b, /* 3945 only */
91 REPLY_TX = 0x1c, 98 REPLY_TX = 0x1c,
92 REPLY_RATE_SCALE = 0x47, /* 3945 only */ 99 REPLY_RATE_SCALE = 0x47, /* 3945 only */
93 REPLY_LEDS_CMD = 0x48, 100 REPLY_LEDS_CMD = 0x48,
@@ -98,6 +105,11 @@ enum {
98 COEX_MEDIUM_NOTIFICATION = 0x5b, 105 COEX_MEDIUM_NOTIFICATION = 0x5b,
99 COEX_EVENT_CMD = 0x5c, 106 COEX_EVENT_CMD = 0x5c,
100 107
108 /* Calibration */
109 CALIBRATION_CFG_CMD = 0x65,
110 CALIBRATION_RES_NOTIFICATION = 0x66,
111 CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
112
101 /* 802.11h related */ 113 /* 802.11h related */
102 RADAR_NOTIFICATION = 0x70, /* not used */ 114 RADAR_NOTIFICATION = 0x70, /* not used */
103 REPLY_QUIET_CMD = 0x71, /* not used */ 115 REPLY_QUIET_CMD = 0x71, /* not used */
@@ -129,7 +141,7 @@ enum {
129 REPLY_TX_POWER_DBM_CMD = 0x98, 141 REPLY_TX_POWER_DBM_CMD = 0x98,
130 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */ 142 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */
131 143
132 /* Bluetooth device coexistance config command */ 144 /* Bluetooth device coexistence config command */
133 REPLY_BT_CONFIG = 0x9b, 145 REPLY_BT_CONFIG = 0x9b,
134 146
135 /* Statistics */ 147 /* Statistics */
@@ -167,8 +179,8 @@ enum {
167#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) 179#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
168#define SEQ_TO_INDEX(s) ((s) & 0xff) 180#define SEQ_TO_INDEX(s) ((s) & 0xff)
169#define INDEX_TO_SEQ(i) ((i) & 0xff) 181#define INDEX_TO_SEQ(i) ((i) & 0xff)
170#define SEQ_HUGE_FRAME __constant_cpu_to_le16(0x4000) 182#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
171#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000) 183#define SEQ_RX_FRAME cpu_to_le16(0x8000)
172 184
173/** 185/**
174 * struct iwl_cmd_header 186 * struct iwl_cmd_header
@@ -180,7 +192,7 @@ struct iwl_cmd_header {
180 u8 cmd; /* Command ID: REPLY_RXON, etc. */ 192 u8 cmd; /* Command ID: REPLY_RXON, etc. */
181 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ 193 u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
182 /* 194 /*
183 * The driver sets up the sequence number to values of its chosing. 195 * The driver sets up the sequence number to values of its choosing.
184 * uCode does not use this value, but passes it back to the driver 196 * uCode does not use this value, but passes it back to the driver
185 * when sending the response to each driver-originated command, so 197 * when sending the response to each driver-originated command, so
186 * the driver can match the response to the command. Since the values 198 * the driver can match the response to the command. Since the values
@@ -208,10 +220,11 @@ struct iwl_cmd_header {
208} __attribute__ ((packed)); 220} __attribute__ ((packed));
209 221
210/** 222/**
211 * 4965 rate_n_flags bit fields 223 * iwlagn rate_n_flags bit fields
212 * 224 *
213 * rate_n_flags format is used in following 4965 commands: 225 * rate_n_flags format is used in following iwlagn commands:
214 * REPLY_RX (response only) 226 * REPLY_RX (response only)
227 * REPLY_RX_MPDU (response only)
215 * REPLY_TX (both command and response) 228 * REPLY_TX (both command and response)
216 * REPLY_TX_LINK_QUALITY_CMD 229 * REPLY_TX_LINK_QUALITY_CMD
217 * 230 *
@@ -225,8 +238,9 @@ struct iwl_cmd_header {
225 * 6) 54 Mbps 238 * 6) 54 Mbps
226 * 7) 60 Mbps 239 * 7) 60 Mbps
227 * 240 *
228 * 3: 0) Single stream (SISO) 241 * 4-3: 0) Single stream (SISO)
229 * 1) Dual stream (MIMO) 242 * 1) Dual stream (MIMO)
243 * 2) Triple stream (MIMO)
230 * 244 *
231 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps FAT duplicate data 245 * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps FAT duplicate data
232 * 246 *
@@ -247,8 +261,8 @@ struct iwl_cmd_header {
247 * 110) 11 Mbps 261 * 110) 11 Mbps
248 */ 262 */
249#define RATE_MCS_CODE_MSK 0x7 263#define RATE_MCS_CODE_MSK 0x7
250#define RATE_MCS_MIMO_POS 3 264#define RATE_MCS_SPATIAL_POS 3
251#define RATE_MCS_MIMO_MSK 0x8 265#define RATE_MCS_SPATIAL_MSK 0x18
252#define RATE_MCS_HT_DUP_POS 5 266#define RATE_MCS_HT_DUP_POS 5
253#define RATE_MCS_HT_DUP_MSK 0x20 267#define RATE_MCS_HT_DUP_MSK 0x20
254 268
@@ -278,18 +292,20 @@ struct iwl_cmd_header {
278#define RATE_MCS_SGI_MSK 0x2000 292#define RATE_MCS_SGI_MSK 0x2000
279 293
280/** 294/**
281 * rate_n_flags Tx antenna masks (4965 has 2 transmitters): 295 * rate_n_flags Tx antenna masks
282 * bit14:15 01 B inactive, A active 296 * 4965 has 2 transmitters
283 * 10 B active, A inactive 297 * 5100 has 1 transmitter B
284 * 11 Both active 298 * 5150 has 1 transmitter A
299 * 5300 has 3 transmitters
300 * 5350 has 3 transmitters
301 * bit14:16
285 */ 302 */
286#define RATE_MCS_ANT_POS 14 303#define RATE_MCS_ANT_POS 14
287#define RATE_MCS_ANT_A_MSK 0x04000 304#define RATE_MCS_ANT_A_MSK 0x04000
288#define RATE_MCS_ANT_B_MSK 0x08000 305#define RATE_MCS_ANT_B_MSK 0x08000
289#define RATE_MCS_ANT_C_MSK 0x10000 306#define RATE_MCS_ANT_C_MSK 0x10000
290#define RATE_MCS_ANT_ABC_MSK 0x1C000 307#define RATE_MCS_ANT_ABC_MSK 0x1C000
291 308#define RATE_ANT_NUM 3
292#define RATE_MCS_ANT_INIT_IND 1
293 309
294#define POWER_TABLE_NUM_ENTRIES 33 310#define POWER_TABLE_NUM_ENTRIES 33
295#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 311#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
@@ -340,7 +356,7 @@ struct iwl4965_tx_power_db {
340} __attribute__ ((packed)); 356} __attribute__ ((packed));
341 357
342/** 358/**
343 * Commad REPLY_TX_POWER_DBM_CMD = 0x98 359 * Command REPLY_TX_POWER_DBM_CMD = 0x98
344 * struct iwl5000_tx_power_dbm_cmd 360 * struct iwl5000_tx_power_dbm_cmd
345 */ 361 */
346#define IWL50_TX_POWER_AUTO 0x7f 362#define IWL50_TX_POWER_AUTO 0x7f
@@ -359,7 +375,7 @@ struct iwl5000_tx_power_dbm_cmd {
359 * 375 *
360 *****************************************************************************/ 376 *****************************************************************************/
361 377
362#define UCODE_VALID_OK __constant_cpu_to_le32(0x1) 378#define UCODE_VALID_OK cpu_to_le32(0x1)
363#define INITIALIZE_SUBTYPE (9) 379#define INITIALIZE_SUBTYPE (9)
364 380
365/* 381/*
@@ -376,7 +392,7 @@ struct iwl5000_tx_power_dbm_cmd {
376 * calculating txpower settings: 392 * calculating txpower settings:
377 * 393 *
378 * 1) Power supply voltage indication. The voltage sensor outputs higher 394 * 1) Power supply voltage indication. The voltage sensor outputs higher
379 * values for lower voltage, and vice versa. 395 * values for lower voltage, and vice verse.
380 * 396 *
381 * 2) Temperature measurement parameters, for each of two channel widths 397 * 2) Temperature measurement parameters, for each of two channel widths
382 * (20 MHz and 40 MHz) supported by the radios. Temperature sensing 398 * (20 MHz and 40 MHz) supported by the radios. Temperature sensing
@@ -477,11 +493,6 @@ struct iwl_alive_resp {
477} __attribute__ ((packed)); 493} __attribute__ ((packed));
478 494
479 495
480union tsf {
481 u8 byte[8];
482 __le16 word[4];
483 __le32 dw[2];
484};
485 496
486/* 497/*
487 * REPLY_ERROR = 0x2 (response only, not a command) 498 * REPLY_ERROR = 0x2 (response only, not a command)
@@ -492,7 +503,7 @@ struct iwl_error_resp {
492 u8 reserved1; 503 u8 reserved1;
493 __le16 bad_cmd_seq_num; 504 __le16 bad_cmd_seq_num;
494 __le32 error_info; 505 __le32 error_info;
495 union tsf timestamp; 506 __le64 timestamp;
496} __attribute__ ((packed)); 507} __attribute__ ((packed));
497 508
498/****************************************************************************** 509/******************************************************************************
@@ -513,75 +524,75 @@ enum {
513}; 524};
514 525
515 526
516#define RXON_RX_CHAIN_DRIVER_FORCE_MSK __constant_cpu_to_le16(0x1 << 0) 527#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
517#define RXON_RX_CHAIN_VALID_MSK __constant_cpu_to_le16(0x7 << 1) 528#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
518#define RXON_RX_CHAIN_VALID_POS (1) 529#define RXON_RX_CHAIN_VALID_POS (1)
519#define RXON_RX_CHAIN_FORCE_SEL_MSK __constant_cpu_to_le16(0x7 << 4) 530#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4)
520#define RXON_RX_CHAIN_FORCE_SEL_POS (4) 531#define RXON_RX_CHAIN_FORCE_SEL_POS (4)
521#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK __constant_cpu_to_le16(0x7 << 7) 532#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7)
522#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7) 533#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
523#define RXON_RX_CHAIN_CNT_MSK __constant_cpu_to_le16(0x3 << 10) 534#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10)
524#define RXON_RX_CHAIN_CNT_POS (10) 535#define RXON_RX_CHAIN_CNT_POS (10)
525#define RXON_RX_CHAIN_MIMO_CNT_MSK __constant_cpu_to_le16(0x3 << 12) 536#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12)
526#define RXON_RX_CHAIN_MIMO_CNT_POS (12) 537#define RXON_RX_CHAIN_MIMO_CNT_POS (12)
527#define RXON_RX_CHAIN_MIMO_FORCE_MSK __constant_cpu_to_le16(0x1 << 14) 538#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14)
528#define RXON_RX_CHAIN_MIMO_FORCE_POS (14) 539#define RXON_RX_CHAIN_MIMO_FORCE_POS (14)
529 540
530/* rx_config flags */ 541/* rx_config flags */
531/* band & modulation selection */ 542/* band & modulation selection */
532#define RXON_FLG_BAND_24G_MSK __constant_cpu_to_le32(1 << 0) 543#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0)
533#define RXON_FLG_CCK_MSK __constant_cpu_to_le32(1 << 1) 544#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1)
534/* auto detection enable */ 545/* auto detection enable */
535#define RXON_FLG_AUTO_DETECT_MSK __constant_cpu_to_le32(1 << 2) 546#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2)
536/* TGg protection when tx */ 547/* TGg protection when tx */
537#define RXON_FLG_TGG_PROTECT_MSK __constant_cpu_to_le32(1 << 3) 548#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3)
538/* cck short slot & preamble */ 549/* cck short slot & preamble */
539#define RXON_FLG_SHORT_SLOT_MSK __constant_cpu_to_le32(1 << 4) 550#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4)
540#define RXON_FLG_SHORT_PREAMBLE_MSK __constant_cpu_to_le32(1 << 5) 551#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5)
541/* antenna selection */ 552/* antenna selection */
542#define RXON_FLG_DIS_DIV_MSK __constant_cpu_to_le32(1 << 7) 553#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7)
543#define RXON_FLG_ANT_SEL_MSK __constant_cpu_to_le32(0x0f00) 554#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00)
544#define RXON_FLG_ANT_A_MSK __constant_cpu_to_le32(1 << 8) 555#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
545#define RXON_FLG_ANT_B_MSK __constant_cpu_to_le32(1 << 9) 556#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
546/* radar detection enable */ 557/* radar detection enable */
547#define RXON_FLG_RADAR_DETECT_MSK __constant_cpu_to_le32(1 << 12) 558#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12)
548#define RXON_FLG_TGJ_NARROW_BAND_MSK __constant_cpu_to_le32(1 << 13) 559#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13)
549/* rx response to host with 8-byte TSF 560/* rx response to host with 8-byte TSF
550* (according to ON_AIR deassertion) */ 561* (according to ON_AIR deassertion) */
551#define RXON_FLG_TSF2HOST_MSK __constant_cpu_to_le32(1 << 15) 562#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
552 563
553 564
554/* HT flags */ 565/* HT flags */
555#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) 566#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
556#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK __constant_cpu_to_le32(0x1 << 22) 567#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
557 568
558#define RXON_FLG_HT_OPERATING_MODE_POS (23) 569#define RXON_FLG_HT_OPERATING_MODE_POS (23)
559 570
560#define RXON_FLG_HT_PROT_MSK __constant_cpu_to_le32(0x1 << 23) 571#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23)
561#define RXON_FLG_FAT_PROT_MSK __constant_cpu_to_le32(0x2 << 23) 572#define RXON_FLG_FAT_PROT_MSK cpu_to_le32(0x2 << 23)
562 573
563#define RXON_FLG_CHANNEL_MODE_POS (25) 574#define RXON_FLG_CHANNEL_MODE_POS (25)
564#define RXON_FLG_CHANNEL_MODE_MSK __constant_cpu_to_le32(0x3 << 25) 575#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25)
565#define RXON_FLG_CHANNEL_MODE_PURE_40_MSK __constant_cpu_to_le32(0x1 << 25) 576#define RXON_FLG_CHANNEL_MODE_PURE_40_MSK cpu_to_le32(0x1 << 25)
566#define RXON_FLG_CHANNEL_MODE_MIXED_MSK __constant_cpu_to_le32(0x2 << 25) 577#define RXON_FLG_CHANNEL_MODE_MIXED_MSK cpu_to_le32(0x2 << 25)
567/* CTS to self (if spec allows) flag */ 578/* CTS to self (if spec allows) flag */
568#define RXON_FLG_SELF_CTS_EN __constant_cpu_to_le32(0x1<<30) 579#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30)
569 580
570/* rx_config filter flags */ 581/* rx_config filter flags */
571/* accept all data frames */ 582/* accept all data frames */
572#define RXON_FILTER_PROMISC_MSK __constant_cpu_to_le32(1 << 0) 583#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0)
573/* pass control & management to host */ 584/* pass control & management to host */
574#define RXON_FILTER_CTL2HOST_MSK __constant_cpu_to_le32(1 << 1) 585#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1)
575/* accept multi-cast */ 586/* accept multi-cast */
576#define RXON_FILTER_ACCEPT_GRP_MSK __constant_cpu_to_le32(1 << 2) 587#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2)
577/* don't decrypt uni-cast frames */ 588/* don't decrypt uni-cast frames */
578#define RXON_FILTER_DIS_DECRYPT_MSK __constant_cpu_to_le32(1 << 3) 589#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3)
579/* don't decrypt multi-cast frames */ 590/* don't decrypt multi-cast frames */
580#define RXON_FILTER_DIS_GRP_DECRYPT_MSK __constant_cpu_to_le32(1 << 4) 591#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
581/* STA is associated */ 592/* STA is associated */
582#define RXON_FILTER_ASSOC_MSK __constant_cpu_to_le32(1 << 5) 593#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5)
583/* transfer to host non bssid beacons in associated state */ 594/* transfer to host non bssid beacons in associated state */
584#define RXON_FILTER_BCON_AWARE_MSK __constant_cpu_to_le32(1 << 6) 595#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
585 596
586/** 597/**
587 * REPLY_RXON = 0x10 (command, has simple generic response) 598 * REPLY_RXON = 0x10 (command, has simple generic response)
@@ -620,7 +631,7 @@ struct iwl4965_rxon_cmd {
620 u8 ofdm_ht_dual_stream_basic_rates; 631 u8 ofdm_ht_dual_stream_basic_rates;
621} __attribute__ ((packed)); 632} __attribute__ ((packed));
622 633
623/* 5000 HW just extend this cmmand */ 634/* 5000 HW just extend this command */
624struct iwl_rxon_cmd { 635struct iwl_rxon_cmd {
625 u8 node_addr[6]; 636 u8 node_addr[6];
626 __le16 reserved1; 637 __le16 reserved1;
@@ -679,8 +690,8 @@ struct iwl4965_rxon_assoc_cmd {
679/* 690/*
680 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) 691 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
681 */ 692 */
682struct iwl4965_rxon_time_cmd { 693struct iwl_rxon_time_cmd {
683 union tsf timestamp; 694 __le64 timestamp;
684 __le16 beacon_interval; 695 __le16 beacon_interval;
685 __le16 atim_window; 696 __le16 atim_window;
686 __le32 beacon_init_val; 697 __le32 beacon_init_val;
@@ -691,7 +702,7 @@ struct iwl4965_rxon_time_cmd {
691/* 702/*
692 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) 703 * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
693 */ 704 */
694struct iwl4965_channel_switch_cmd { 705struct iwl_channel_switch_cmd {
695 u8 band; 706 u8 band;
696 u8 expect_beacon; 707 u8 expect_beacon;
697 __le16 channel; 708 __le16 channel;
@@ -704,7 +715,7 @@ struct iwl4965_channel_switch_cmd {
704/* 715/*
705 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) 716 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
706 */ 717 */
707struct iwl4965_csa_notification { 718struct iwl_csa_notification {
708 __le16 band; 719 __le16 band;
709 __le16 channel; 720 __le16 channel;
710 __le32 status; /* 0 - OK, 1 - fail */ 721 __le32 status; /* 0 - OK, 1 - fail */
@@ -741,9 +752,9 @@ struct iwl_ac_qos {
741} __attribute__ ((packed)); 752} __attribute__ ((packed));
742 753
743/* QoS flags defines */ 754/* QoS flags defines */
744#define QOS_PARAM_FLG_UPDATE_EDCA_MSK __constant_cpu_to_le32(0x01) 755#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01)
745#define QOS_PARAM_FLG_TGN_MSK __constant_cpu_to_le32(0x02) 756#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02)
746#define QOS_PARAM_FLG_TXOP_TYPE_MSK __constant_cpu_to_le32(0x10) 757#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10)
747 758
748/* Number of Access Categories (AC) (EDCA), queues 0..3 */ 759/* Number of Access Categories (AC) (EDCA), queues 0..3 */
749#define AC_NUM 4 760#define AC_NUM 4
@@ -780,34 +791,34 @@ struct iwl_qosparam_cmd {
780#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ 791#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
781#define IWL_INVALID_STATION 255 792#define IWL_INVALID_STATION 255
782 793
783#define STA_FLG_PWR_SAVE_MSK __constant_cpu_to_le32(1 << 8); 794#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8);
784#define STA_FLG_RTS_MIMO_PROT_MSK __constant_cpu_to_le32(1 << 17) 795#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
785#define STA_FLG_AGG_MPDU_8US_MSK __constant_cpu_to_le32(1 << 18) 796#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
786#define STA_FLG_MAX_AGG_SIZE_POS (19) 797#define STA_FLG_MAX_AGG_SIZE_POS (19)
787#define STA_FLG_MAX_AGG_SIZE_MSK __constant_cpu_to_le32(3 << 19) 798#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19)
788#define STA_FLG_FAT_EN_MSK __constant_cpu_to_le32(1 << 21) 799#define STA_FLG_FAT_EN_MSK cpu_to_le32(1 << 21)
789#define STA_FLG_MIMO_DIS_MSK __constant_cpu_to_le32(1 << 22) 800#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22)
790#define STA_FLG_AGG_MPDU_DENSITY_POS (23) 801#define STA_FLG_AGG_MPDU_DENSITY_POS (23)
791#define STA_FLG_AGG_MPDU_DENSITY_MSK __constant_cpu_to_le32(7 << 23) 802#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23)
792 803
793/* Use in mode field. 1: modify existing entry, 0: add new station entry */ 804/* Use in mode field. 1: modify existing entry, 0: add new station entry */
794#define STA_CONTROL_MODIFY_MSK 0x01 805#define STA_CONTROL_MODIFY_MSK 0x01
795 806
796/* key flags __le16*/ 807/* key flags __le16*/
797#define STA_KEY_FLG_ENCRYPT_MSK __constant_cpu_to_le16(0x0007) 808#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007)
798#define STA_KEY_FLG_NO_ENC __constant_cpu_to_le16(0x0000) 809#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000)
799#define STA_KEY_FLG_WEP __constant_cpu_to_le16(0x0001) 810#define STA_KEY_FLG_WEP cpu_to_le16(0x0001)
800#define STA_KEY_FLG_CCMP __constant_cpu_to_le16(0x0002) 811#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002)
801#define STA_KEY_FLG_TKIP __constant_cpu_to_le16(0x0003) 812#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003)
802 813
803#define STA_KEY_FLG_KEYID_POS 8 814#define STA_KEY_FLG_KEYID_POS 8
804#define STA_KEY_FLG_INVALID __constant_cpu_to_le16(0x0800) 815#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800)
805/* wep key is either from global key (0) or from station info array (1) */ 816/* wep key is either from global key (0) or from station info array (1) */
806#define STA_KEY_FLG_MAP_KEY_MSK __constant_cpu_to_le16(0x0008) 817#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008)
807 818
808/* wep key in STA: 5-bytes (0) or 13-bytes (1) */ 819/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
809#define STA_KEY_FLG_KEY_SIZE_MSK __constant_cpu_to_le16(0x1000) 820#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
810#define STA_KEY_MULTICAST_MSK __constant_cpu_to_le16(0x4000) 821#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
811#define STA_KEY_MAX_NUM 8 822#define STA_KEY_MAX_NUM 8
812 823
813/* Flags indicate whether to modify vs. don't change various station params */ 824/* Flags indicate whether to modify vs. don't change various station params */
@@ -1013,33 +1024,14 @@ struct iwl_wep_cmd {
1013 * 1024 *
1014 *****************************************************************************/ 1025 *****************************************************************************/
1015 1026
1016struct iwl4965_rx_frame_stats { 1027#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0)
1017 u8 phy_count; 1028#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1)
1018 u8 id;
1019 u8 rssi;
1020 u8 agc;
1021 __le16 sig_avg;
1022 __le16 noise_diff;
1023 u8 payload[0];
1024} __attribute__ ((packed));
1025
1026struct iwl4965_rx_frame_hdr {
1027 __le16 channel;
1028 __le16 phy_flags;
1029 u8 reserved1;
1030 u8 rate;
1031 __le16 len;
1032 u8 payload[0];
1033} __attribute__ ((packed));
1034
1035#define RX_RES_STATUS_NO_CRC32_ERROR __constant_cpu_to_le32(1 << 0)
1036#define RX_RES_STATUS_NO_RXE_OVERFLOW __constant_cpu_to_le32(1 << 1)
1037 1029
1038#define RX_RES_PHY_FLAGS_BAND_24_MSK __constant_cpu_to_le16(1 << 0) 1030#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0)
1039#define RX_RES_PHY_FLAGS_MOD_CCK_MSK __constant_cpu_to_le16(1 << 1) 1031#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1)
1040#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK __constant_cpu_to_le16(1 << 2) 1032#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2)
1041#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK __constant_cpu_to_le16(1 << 3) 1033#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3)
1042#define RX_RES_PHY_FLAGS_ANTENNA_MSK __constant_cpu_to_le16(0xf0) 1034#define RX_RES_PHY_FLAGS_ANTENNA_MSK cpu_to_le16(0xf0)
1043 1035
1044#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) 1036#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
1045#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) 1037#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
@@ -1062,26 +1054,6 @@ struct iwl4965_rx_frame_hdr {
1062#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) 1054#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
1063#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) 1055#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
1064 1056
1065struct iwl4965_rx_frame_end {
1066 __le32 status;
1067 __le64 timestamp;
1068 __le32 beacon_timestamp;
1069} __attribute__ ((packed));
1070
1071/*
1072 * REPLY_3945_RX = 0x1b (response only, not a command)
1073 *
1074 * NOTE: DO NOT dereference from casts to this structure
1075 * It is provided only for calculating minimum data set size.
1076 * The actual offsets of the hdr and end are dynamic based on
1077 * stats.phy_count
1078 */
1079struct iwl4965_rx_frame {
1080 struct iwl4965_rx_frame_stats stats;
1081 struct iwl4965_rx_frame_hdr hdr;
1082 struct iwl4965_rx_frame_end end;
1083} __attribute__ ((packed));
1084
1085/* Fixed (non-configurable) rx data from phy */ 1057/* Fixed (non-configurable) rx data from phy */
1086 1058
1087#define IWL49_RX_RES_PHY_CNT 14 1059#define IWL49_RX_RES_PHY_CNT 14
@@ -1111,7 +1083,7 @@ struct iwl4965_rx_non_cfg_phy {
1111#define IWL50_OFDM_RSSI_C_BIT_POS 0 1083#define IWL50_OFDM_RSSI_C_BIT_POS 0
1112 1084
1113struct iwl5000_non_cfg_phy { 1085struct iwl5000_non_cfg_phy {
1114 __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* upto 8 phy entries */ 1086 __le32 non_cfg_phy[IWL50_RX_RES_PHY_CNT]; /* up to 8 phy entries */
1115} __attribute__ ((packed)); 1087} __attribute__ ((packed));
1116 1088
1117 1089
@@ -1167,24 +1139,24 @@ struct iwl4965_rx_mpdu_res_start {
1167 1139
1168/* REPLY_TX Tx flags field */ 1140/* REPLY_TX Tx flags field */
1169 1141
1170/* 1: Use RTS/CTS protocol or CTS-to-self if spec alows it 1142/* 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
1171 * before this frame. if CTS-to-self required check 1143 * before this frame. if CTS-to-self required check
1172 * RXON_FLG_SELF_CTS_EN status. */ 1144 * RXON_FLG_SELF_CTS_EN status. */
1173#define TX_CMD_FLG_RTS_CTS_MSK __constant_cpu_to_le32(1 << 0) 1145#define TX_CMD_FLG_RTS_CTS_MSK cpu_to_le32(1 << 0)
1174 1146
1175/* 1: Use Request-To-Send protocol before this frame. 1147/* 1: Use Request-To-Send protocol before this frame.
1176 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */ 1148 * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. */
1177#define TX_CMD_FLG_RTS_MSK __constant_cpu_to_le32(1 << 1) 1149#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
1178 1150
1179/* 1: Transmit Clear-To-Send to self before this frame. 1151/* 1: Transmit Clear-To-Send to self before this frame.
1180 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames. 1152 * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
1181 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. */ 1153 * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. */
1182#define TX_CMD_FLG_CTS_MSK __constant_cpu_to_le32(1 << 2) 1154#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
1183 1155
1184/* 1: Expect ACK from receiving station 1156/* 1: Expect ACK from receiving station
1185 * 0: Don't expect ACK (MAC header's duration field s/b 0) 1157 * 0: Don't expect ACK (MAC header's duration field s/b 0)
1186 * Set this for unicast frames, but not broadcast/multicast. */ 1158 * Set this for unicast frames, but not broadcast/multicast. */
1187#define TX_CMD_FLG_ACK_MSK __constant_cpu_to_le32(1 << 3) 1159#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
1188 1160
1189/* For 4965: 1161/* For 4965:
1190 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). 1162 * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
@@ -1192,40 +1164,40 @@ struct iwl4965_rx_mpdu_res_start {
1192 * uCode walks through table for additional Tx attempts. 1164 * uCode walks through table for additional Tx attempts.
1193 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. 1165 * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
1194 * This rate will be used for all Tx attempts; it will not be scaled. */ 1166 * This rate will be used for all Tx attempts; it will not be scaled. */
1195#define TX_CMD_FLG_STA_RATE_MSK __constant_cpu_to_le32(1 << 4) 1167#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
1196 1168
1197/* 1: Expect immediate block-ack. 1169/* 1: Expect immediate block-ack.
1198 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ 1170 * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
1199#define TX_CMD_FLG_IMM_BA_RSP_MASK __constant_cpu_to_le32(1 << 6) 1171#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
1200 1172
1201/* 1: Frame requires full Tx-Op protection. 1173/* 1: Frame requires full Tx-Op protection.
1202 * Set this if either RTS or CTS Tx Flag gets set. */ 1174 * Set this if either RTS or CTS Tx Flag gets set. */
1203#define TX_CMD_FLG_FULL_TXOP_PROT_MSK __constant_cpu_to_le32(1 << 7) 1175#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
1204 1176
1205/* Tx antenna selection field; used only for 3945, reserved (0) for 4965. 1177/* Tx antenna selection field; used only for 3945, reserved (0) for 4965.
1206 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */ 1178 * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
1207#define TX_CMD_FLG_ANT_SEL_MSK __constant_cpu_to_le32(0xf00) 1179#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
1208#define TX_CMD_FLG_ANT_A_MSK __constant_cpu_to_le32(1 << 8) 1180#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
1209#define TX_CMD_FLG_ANT_B_MSK __constant_cpu_to_le32(1 << 9) 1181#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
1210 1182
1211/* 1: Ignore Bluetooth priority for this frame. 1183/* 1: Ignore Bluetooth priority for this frame.
1212 * 0: Delay Tx until Bluetooth device is done (normal usage). */ 1184 * 0: Delay Tx until Bluetooth device is done (normal usage). */
1213#define TX_CMD_FLG_BT_DIS_MSK __constant_cpu_to_le32(1 << 12) 1185#define TX_CMD_FLG_BT_DIS_MSK cpu_to_le32(1 << 12)
1214 1186
1215/* 1: uCode overrides sequence control field in MAC header. 1187/* 1: uCode overrides sequence control field in MAC header.
1216 * 0: Driver provides sequence control field in MAC header. 1188 * 0: Driver provides sequence control field in MAC header.
1217 * Set this for management frames, non-QOS data frames, non-unicast frames, 1189 * Set this for management frames, non-QOS data frames, non-unicast frames,
1218 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ 1190 * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
1219#define TX_CMD_FLG_SEQ_CTL_MSK __constant_cpu_to_le32(1 << 13) 1191#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
1220 1192
1221/* 1: This frame is non-last MPDU; more fragments are coming. 1193/* 1: This frame is non-last MPDU; more fragments are coming.
1222 * 0: Last fragment, or not using fragmentation. */ 1194 * 0: Last fragment, or not using fragmentation. */
1223#define TX_CMD_FLG_MORE_FRAG_MSK __constant_cpu_to_le32(1 << 14) 1195#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
1224 1196
1225/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame. 1197/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
1226 * 0: No TSF required in outgoing frame. 1198 * 0: No TSF required in outgoing frame.
1227 * Set this for transmitting beacons and probe responses. */ 1199 * Set this for transmitting beacons and probe responses. */
1228#define TX_CMD_FLG_TSF_MSK __constant_cpu_to_le32(1 << 16) 1200#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
1229 1201
1230/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword 1202/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
1231 * alignment of frame's payload data field. 1203 * alignment of frame's payload data field.
@@ -1233,14 +1205,14 @@ struct iwl4965_rx_mpdu_res_start {
1233 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4 1205 * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
1234 * field (but not both). Driver must align frame data (i.e. data following 1206 * field (but not both). Driver must align frame data (i.e. data following
1235 * MAC header) to DWORD boundary. */ 1207 * MAC header) to DWORD boundary. */
1236#define TX_CMD_FLG_MH_PAD_MSK __constant_cpu_to_le32(1 << 20) 1208#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
1237 1209
1238/* accelerate aggregation support 1210/* accelerate aggregation support
1239 * 0 - no CCMP encryption; 1 - CCMP encryption */ 1211 * 0 - no CCMP encryption; 1 - CCMP encryption */
1240#define TX_CMD_FLG_AGG_CCMP_MSK __constant_cpu_to_le32(1 << 22) 1212#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
1241 1213
1242/* HCCA-AP - disable duration overwriting. */ 1214/* HCCA-AP - disable duration overwriting. */
1243#define TX_CMD_FLG_DUR_MSK __constant_cpu_to_le32(1 << 25) 1215#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
1244 1216
1245 1217
1246/* 1218/*
@@ -1266,7 +1238,7 @@ struct iwl4965_rx_mpdu_res_start {
1266 * Used for managing Tx retries when expecting block-acks. 1238 * Used for managing Tx retries when expecting block-acks.
1267 * Driver should set these fields to 0. 1239 * Driver should set these fields to 0.
1268 */ 1240 */
1269struct iwl4965_dram_scratch { 1241struct iwl_dram_scratch {
1270 u8 try_cnt; /* Tx attempts */ 1242 u8 try_cnt; /* Tx attempts */
1271 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ 1243 u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
1272 __le16 reserved; 1244 __le16 reserved;
@@ -1297,9 +1269,9 @@ struct iwl_tx_cmd {
1297 1269
1298 __le32 tx_flags; /* TX_CMD_FLG_* */ 1270 __le32 tx_flags; /* TX_CMD_FLG_* */
1299 1271
1300 /* 4965's uCode may modify this field of the Tx command (in host DRAM!). 1272 /* uCode may modify this field of the Tx command (in host DRAM!).
1301 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ 1273 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
1302 struct iwl4965_dram_scratch scratch; 1274 struct iwl_dram_scratch scratch;
1303 1275
1304 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ 1276 /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
1305 __le32 rate_n_flags; /* RATE_MCS_* */ 1277 __le32 rate_n_flags; /* RATE_MCS_* */
@@ -1411,21 +1383,21 @@ enum {
1411}; 1383};
1412 1384
1413enum { 1385enum {
1414 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ 1386 TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
1415 TX_STATUS_DELAY_MSK = 0x00000040, 1387 TX_STATUS_DELAY_MSK = 0x00000040,
1416 TX_STATUS_ABORT_MSK = 0x00000080, 1388 TX_STATUS_ABORT_MSK = 0x00000080,
1417 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ 1389 TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
1418 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ 1390 TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
1419 TX_RESERVED = 0x00780000, /* bits 19:22 */ 1391 TX_RESERVED = 0x00780000, /* bits 19:22 */
1420 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ 1392 TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
1421 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ 1393 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1422}; 1394};
1423 1395
1424static inline int iwl_is_tx_success(u32 status) 1396static inline bool iwl_is_tx_success(u32 status)
1425{ 1397{
1426 status &= TX_STATUS_MSK; 1398 status &= TX_STATUS_MSK;
1427 return (status == TX_STATUS_SUCCESS) 1399 return (status == TX_STATUS_SUCCESS) ||
1428 || (status == TX_STATUS_DIRECT_DONE); 1400 (status == TX_STATUS_DIRECT_DONE);
1429} 1401}
1430 1402
1431 1403
@@ -1450,10 +1422,9 @@ enum {
1450 AGG_TX_STATE_DELAY_TX_MSK = 0x400 1422 AGG_TX_STATE_DELAY_TX_MSK = 0x400
1451}; 1423};
1452 1424
1453#define AGG_TX_STATE_LAST_SENT_MSK \ 1425#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
1454(AGG_TX_STATE_LAST_SENT_TTL_MSK | \ 1426 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \
1455 AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \ 1427 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK)
1456 AGG_TX_STATE_LAST_SENT_BT_KILL_MSK)
1457 1428
1458/* # tx attempts for first frame in aggregation */ 1429/* # tx attempts for first frame in aggregation */
1459#define AGG_TX_STATE_TRY_CNT_POS 12 1430#define AGG_TX_STATE_TRY_CNT_POS 12
@@ -1526,6 +1497,28 @@ struct iwl4965_tx_resp {
1526 } u; 1497 } u;
1527} __attribute__ ((packed)); 1498} __attribute__ ((packed));
1528 1499
1500/*
1501 * definitions for initial rate index field
1502 * bits [3:0] initial rate index
1503 * bits [6:4] rate table color, used for the initial rate
1504 * bit-7 invalid rate indication
1505 * i.e. rate was not chosen from rate table
1506 * or rate table color was changed during frame retries
1507 * refer tlc rate info
1508 */
1509
1510#define IWL50_TX_RES_INIT_RATE_INDEX_POS 0
1511#define IWL50_TX_RES_INIT_RATE_INDEX_MSK 0x0f
1512#define IWL50_TX_RES_RATE_TABLE_COLOR_POS 4
1513#define IWL50_TX_RES_RATE_TABLE_COLOR_MSK 0x70
1514#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80
1515
1516/* refer to ra_tid */
1517#define IWL50_TX_RES_TID_POS 0
1518#define IWL50_TX_RES_TID_MSK 0x0f
1519#define IWL50_TX_RES_RA_POS 4
1520#define IWL50_TX_RES_RA_MSK 0xf0
1521
1529struct iwl5000_tx_resp { 1522struct iwl5000_tx_resp {
1530 u8 frame_count; /* 1 no aggregation, >1 aggregation */ 1523 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1531 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ 1524 u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
@@ -1540,14 +1533,17 @@ struct iwl5000_tx_resp {
1540 * For agg: RTS + CTS + aggregation tx time + block-ack time. */ 1533 * For agg: RTS + CTS + aggregation tx time + block-ack time. */
1541 __le16 wireless_media_time; /* uSecs */ 1534 __le16 wireless_media_time; /* uSecs */
1542 1535
1543 __le16 reserved; 1536 u8 pa_status; /* RF power amplifier measurement (not used) */
1544 __le32 pa_power1; /* RF power amplifier measurement (not used) */ 1537 u8 pa_integ_res_a[3];
1545 __le32 pa_power2; 1538 u8 pa_integ_res_b[3];
1539 u8 pa_integ_res_C[3];
1546 1540
1547 __le32 tfd_info; 1541 __le32 tfd_info;
1548 __le16 seq_ctl; 1542 __le16 seq_ctl;
1549 __le16 byte_cnt; 1543 __le16 byte_cnt;
1550 __le32 tlc_info; 1544 u8 tlc_info;
1545 u8 ra_tid; /* tid (0:3), sta_id (4:7) */
1546 __le16 frame_ctrl;
1551 /* 1547 /*
1552 * For non-agg: frame status TX_STATUS_* 1548 * For non-agg: frame status TX_STATUS_*
1553 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status 1549 * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status
@@ -1742,7 +1738,7 @@ struct iwl_link_qual_agg_params {
1742 * match the modulation characteristics of the history set. 1738 * match the modulation characteristics of the history set.
1743 * 1739 *
1744 * When using block-ack (aggregation), all frames are transmitted at the same 1740 * When using block-ack (aggregation), all frames are transmitted at the same
1745 * rate, since there is no per-attempt acknowledgement from the destination 1741 * rate, since there is no per-attempt acknowledgment from the destination
1746 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in 1742 * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
1747 * rate_n_flags field. After receiving a block-ack, the driver can update 1743 * rate_n_flags field. After receiving a block-ack, the driver can update
1748 * history for the entire block all at once. 1744 * history for the entire block all at once.
@@ -1881,9 +1877,9 @@ struct iwl_link_quality_cmd {
1881 * 1877 *
1882 * 3945 and 4965 support hardware handshake with Bluetooth device on 1878 * 3945 and 4965 support hardware handshake with Bluetooth device on
1883 * same platform. Bluetooth device alerts wireless device when it will Tx; 1879 * same platform. Bluetooth device alerts wireless device when it will Tx;
1884 * wireless device can delay or kill its own Tx to accomodate. 1880 * wireless device can delay or kill its own Tx to accommodate.
1885 */ 1881 */
1886struct iwl4965_bt_cmd { 1882struct iwl_bt_cmd {
1887 u8 flags; 1883 u8 flags;
1888 u8 lead_time; 1884 u8 lead_time;
1889 u8 max_kill; 1885 u8 max_kill;
@@ -1909,18 +1905,18 @@ struct iwl4965_bt_cmd {
1909 RXON_FILTER_ASSOC_MSK | \ 1905 RXON_FILTER_ASSOC_MSK | \
1910 RXON_FILTER_BCON_AWARE_MSK) 1906 RXON_FILTER_BCON_AWARE_MSK)
1911 1907
1912struct iwl4965_measure_channel { 1908struct iwl_measure_channel {
1913 __le32 duration; /* measurement duration in extended beacon 1909 __le32 duration; /* measurement duration in extended beacon
1914 * format */ 1910 * format */
1915 u8 channel; /* channel to measure */ 1911 u8 channel; /* channel to measure */
1916 u8 type; /* see enum iwl4965_measure_type */ 1912 u8 type; /* see enum iwl_measure_type */
1917 __le16 reserved; 1913 __le16 reserved;
1918} __attribute__ ((packed)); 1914} __attribute__ ((packed));
1919 1915
1920/* 1916/*
1921 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) 1917 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
1922 */ 1918 */
1923struct iwl4965_spectrum_cmd { 1919struct iwl_spectrum_cmd {
1924 __le16 len; /* number of bytes starting from token */ 1920 __le16 len; /* number of bytes starting from token */
1925 u8 token; /* token id */ 1921 u8 token; /* token id */
1926 u8 id; /* measurement id -- 0 or 1 */ 1922 u8 id; /* measurement id -- 0 or 1 */
@@ -1933,13 +1929,13 @@ struct iwl4965_spectrum_cmd {
1933 __le32 filter_flags; /* rxon filter flags */ 1929 __le32 filter_flags; /* rxon filter flags */
1934 __le16 channel_count; /* minimum 1, maximum 10 */ 1930 __le16 channel_count; /* minimum 1, maximum 10 */
1935 __le16 reserved3; 1931 __le16 reserved3;
1936 struct iwl4965_measure_channel channels[10]; 1932 struct iwl_measure_channel channels[10];
1937} __attribute__ ((packed)); 1933} __attribute__ ((packed));
1938 1934
1939/* 1935/*
1940 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) 1936 * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
1941 */ 1937 */
1942struct iwl4965_spectrum_resp { 1938struct iwl_spectrum_resp {
1943 u8 token; 1939 u8 token;
1944 u8 id; /* id of the prior command replaced, or 0xff */ 1940 u8 id; /* id of the prior command replaced, or 0xff */
1945 __le16 status; /* 0 - command will be handled 1941 __le16 status; /* 0 - command will be handled
@@ -1947,12 +1943,12 @@ struct iwl4965_spectrum_resp {
1947 * measurement) */ 1943 * measurement) */
1948} __attribute__ ((packed)); 1944} __attribute__ ((packed));
1949 1945
1950enum iwl4965_measurement_state { 1946enum iwl_measurement_state {
1951 IWL_MEASUREMENT_START = 0, 1947 IWL_MEASUREMENT_START = 0,
1952 IWL_MEASUREMENT_STOP = 1, 1948 IWL_MEASUREMENT_STOP = 1,
1953}; 1949};
1954 1950
1955enum iwl4965_measurement_status { 1951enum iwl_measurement_status {
1956 IWL_MEASUREMENT_OK = 0, 1952 IWL_MEASUREMENT_OK = 0,
1957 IWL_MEASUREMENT_CONCURRENT = 1, 1953 IWL_MEASUREMENT_CONCURRENT = 1,
1958 IWL_MEASUREMENT_CSA_CONFLICT = 2, 1954 IWL_MEASUREMENT_CSA_CONFLICT = 2,
@@ -1965,18 +1961,18 @@ enum iwl4965_measurement_status {
1965 1961
1966#define NUM_ELEMENTS_IN_HISTOGRAM 8 1962#define NUM_ELEMENTS_IN_HISTOGRAM 8
1967 1963
1968struct iwl4965_measurement_histogram { 1964struct iwl_measurement_histogram {
1969 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ 1965 __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
1970 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ 1966 __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
1971} __attribute__ ((packed)); 1967} __attribute__ ((packed));
1972 1968
1973/* clear channel availability counters */ 1969/* clear channel availability counters */
1974struct iwl4965_measurement_cca_counters { 1970struct iwl_measurement_cca_counters {
1975 __le32 ofdm; 1971 __le32 ofdm;
1976 __le32 cck; 1972 __le32 cck;
1977} __attribute__ ((packed)); 1973} __attribute__ ((packed));
1978 1974
1979enum iwl4965_measure_type { 1975enum iwl_measure_type {
1980 IWL_MEASURE_BASIC = (1 << 0), 1976 IWL_MEASURE_BASIC = (1 << 0),
1981 IWL_MEASURE_CHANNEL_LOAD = (1 << 1), 1977 IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
1982 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), 1978 IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
@@ -1989,7 +1985,7 @@ enum iwl4965_measure_type {
1989/* 1985/*
1990 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) 1986 * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
1991 */ 1987 */
1992struct iwl4965_spectrum_notification { 1988struct iwl_spectrum_notification {
1993 u8 id; /* measurement id -- 0 or 1 */ 1989 u8 id; /* measurement id -- 0 or 1 */
1994 u8 token; 1990 u8 token;
1995 u8 channel_index; /* index in measurement channel list */ 1991 u8 channel_index; /* index in measurement channel list */
@@ -1997,7 +1993,7 @@ struct iwl4965_spectrum_notification {
1997 __le32 start_time; /* lower 32-bits of TSF */ 1993 __le32 start_time; /* lower 32-bits of TSF */
1998 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ 1994 u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
1999 u8 channel; 1995 u8 channel;
2000 u8 type; /* see enum iwl4965_measurement_type */ 1996 u8 type; /* see enum iwl_measurement_type */
2001 u8 reserved1; 1997 u8 reserved1;
2002 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only 1998 /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
2003 * valid if applicable for measurement type requested. */ 1999 * valid if applicable for measurement type requested. */
@@ -2007,9 +2003,9 @@ struct iwl4965_spectrum_notification {
2007 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - 2003 u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
2008 * unidentified */ 2004 * unidentified */
2009 u8 reserved2[3]; 2005 u8 reserved2[3];
2010 struct iwl4965_measurement_histogram histogram; 2006 struct iwl_measurement_histogram histogram;
2011 __le32 stop_time; /* lower 32-bits of TSF */ 2007 __le32 stop_time; /* lower 32-bits of TSF */
2012 __le32 status; /* see iwl4965_measurement_status */ 2008 __le32 status; /* see iwl_measurement_status */
2013} __attribute__ ((packed)); 2009} __attribute__ ((packed));
2014 2010
2015/****************************************************************************** 2011/******************************************************************************
@@ -2043,15 +2039,15 @@ struct iwl4965_spectrum_notification {
2043 * '11' Illegal set 2039 * '11' Illegal set
2044 * 2040 *
2045 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then 2041 * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
2046 * ucode assume sleep over DTIM is allowed and we don't need to wakeup 2042 * ucode assume sleep over DTIM is allowed and we don't need to wake up
2047 * for every DTIM. 2043 * for every DTIM.
2048 */ 2044 */
2049#define IWL_POWER_VEC_SIZE 5 2045#define IWL_POWER_VEC_SIZE 5
2050 2046
2051#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK __constant_cpu_to_le16(1 << 0) 2047#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(1 << 0)
2052#define IWL_POWER_SLEEP_OVER_DTIM_MSK __constant_cpu_to_le16(1 << 2) 2048#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(1 << 2)
2053#define IWL_POWER_PCI_PM_MSK __constant_cpu_to_le16(1 << 3) 2049#define IWL_POWER_PCI_PM_MSK cpu_to_le16(1 << 3)
2054#define IWL_POWER_FAST_PD __constant_cpu_to_le16(1 << 4) 2050#define IWL_POWER_FAST_PD cpu_to_le16(1 << 4)
2055 2051
2056struct iwl_powertable_cmd { 2052struct iwl_powertable_cmd {
2057 __le16 flags; 2053 __le16 flags;
@@ -2067,7 +2063,7 @@ struct iwl_powertable_cmd {
2067 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) 2063 * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
2068 * 3945 and 4965 identical. 2064 * 3945 and 4965 identical.
2069 */ 2065 */
2070struct iwl4965_sleep_notification { 2066struct iwl_sleep_notification {
2071 u8 pm_sleep_mode; 2067 u8 pm_sleep_mode;
2072 u8 pm_wakeup_src; 2068 u8 pm_wakeup_src;
2073 __le16 reserved; 2069 __le16 reserved;
@@ -2097,14 +2093,14 @@ enum {
2097#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */ 2093#define CARD_STATE_CMD_DISABLE 0x00 /* Put card to sleep */
2098#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */ 2094#define CARD_STATE_CMD_ENABLE 0x01 /* Wake up card */
2099#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */ 2095#define CARD_STATE_CMD_HALT 0x02 /* Power down permanently */
2100struct iwl4965_card_state_cmd { 2096struct iwl_card_state_cmd {
2101 __le32 status; /* CARD_STATE_CMD_* request new power state */ 2097 __le32 status; /* CARD_STATE_CMD_* request new power state */
2102} __attribute__ ((packed)); 2098} __attribute__ ((packed));
2103 2099
2104/* 2100/*
2105 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) 2101 * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
2106 */ 2102 */
2107struct iwl4965_card_state_notif { 2103struct iwl_card_state_notif {
2108 __le32 flags; 2104 __le32 flags;
2109} __attribute__ ((packed)); 2105} __attribute__ ((packed));
2110 2106
@@ -2125,8 +2121,8 @@ struct iwl_ct_kill_config {
2125 * 2121 *
2126 *****************************************************************************/ 2122 *****************************************************************************/
2127 2123
2128#define SCAN_CHANNEL_TYPE_PASSIVE __constant_cpu_to_le32(0) 2124#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
2129#define SCAN_CHANNEL_TYPE_ACTIVE __constant_cpu_to_le32(1) 2125#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
2130 2126
2131/** 2127/**
2132 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table 2128 * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
@@ -2167,7 +2163,7 @@ struct iwl_scan_channel {
2167 * struct iwl_ssid_ie - directed scan network information element 2163 * struct iwl_ssid_ie - directed scan network information element
2168 * 2164 *
2169 * Up to 4 of these may appear in REPLY_SCAN_CMD, selected by "type" field 2165 * Up to 4 of these may appear in REPLY_SCAN_CMD, selected by "type" field
2170 * in struct iwl4965_scan_channel; each channel may select different ssids from 2166 * in struct iwl_scan_channel; each channel may select different ssids from
2171 * among the 4 entries. SSID IEs get transmitted in reverse order of entry. 2167 * among the 4 entries. SSID IEs get transmitted in reverse order of entry.
2172 */ 2168 */
2173struct iwl_ssid_ie { 2169struct iwl_ssid_ie {
@@ -2177,8 +2173,8 @@ struct iwl_ssid_ie {
2177} __attribute__ ((packed)); 2173} __attribute__ ((packed));
2178 2174
2179#define PROBE_OPTION_MAX 0x14 2175#define PROBE_OPTION_MAX 0x14
2180#define TX_CMD_LIFE_TIME_INFINITE __constant_cpu_to_le32(0xFFFFFFFF) 2176#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2181#define IWL_GOOD_CRC_TH __constant_cpu_to_le16(1) 2177#define IWL_GOOD_CRC_TH cpu_to_le16(1)
2182#define IWL_MAX_SCAN_SIZE 1024 2178#define IWL_MAX_SCAN_SIZE 1024
2183 2179
2184/* 2180/*
@@ -2267,7 +2263,7 @@ struct iwl_scan_cmd {
2267 * Number of channels in list is specified by channel_count. 2263 * Number of channels in list is specified by channel_count.
2268 * Each channel in list is of type: 2264 * Each channel in list is of type:
2269 * 2265 *
2270 * struct iwl4965_scan_channel channels[0]; 2266 * struct iwl_scan_channel channels[0];
2271 * 2267 *
2272 * NOTE: Only one band of channels can be scanned per pass. You 2268 * NOTE: Only one band of channels can be scanned per pass. You
2273 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait 2269 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
@@ -2278,7 +2274,7 @@ struct iwl_scan_cmd {
2278} __attribute__ ((packed)); 2274} __attribute__ ((packed));
2279 2275
2280/* Can abort will notify by complete notification with abort status. */ 2276/* Can abort will notify by complete notification with abort status. */
2281#define CAN_ABORT_STATUS __constant_cpu_to_le32(0x1) 2277#define CAN_ABORT_STATUS cpu_to_le32(0x1)
2282/* complete notification statuses */ 2278/* complete notification statuses */
2283#define ABORT_STATUS 0x2 2279#define ABORT_STATUS 0x2
2284 2280
@@ -2422,6 +2418,8 @@ struct statistics_rx_ht_phy {
2422 __le32 reserved2; 2418 __le32 reserved2;
2423} __attribute__ ((packed)); 2419} __attribute__ ((packed));
2424 2420
2421#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1)
2422
2425struct statistics_rx_non_phy { 2423struct statistics_rx_non_phy {
2426 __le32 bogus_cts; /* CTS received when not expecting CTS */ 2424 __le32 bogus_cts; /* CTS received when not expecting CTS */
2427 __le32 bogus_ack; /* ACK received when not expecting ACK */ 2425 __le32 bogus_ack; /* ACK received when not expecting ACK */
@@ -2540,8 +2538,8 @@ struct statistics_general {
2540 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag 2538 * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
2541 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. 2539 * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
2542 */ 2540 */
2543#define IWL_STATS_CONF_CLEAR_STATS __constant_cpu_to_le32(0x1) /* see above */ 2541#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
2544#define IWL_STATS_CONF_DISABLE_NOTIF __constant_cpu_to_le32(0x2)/* see above */ 2542#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
2545struct iwl_statistics_cmd { 2543struct iwl_statistics_cmd {
2546 __le32 configuration_flags; /* IWL_STATS_CONF_* */ 2544 __le32 configuration_flags; /* IWL_STATS_CONF_* */
2547} __attribute__ ((packed)); 2545} __attribute__ ((packed));
@@ -2561,8 +2559,8 @@ struct iwl_statistics_cmd {
2561 * appropriately so that each notification contains statistics for only the 2559 * appropriately so that each notification contains statistics for only the
2562 * one channel that has just been scanned. 2560 * one channel that has just been scanned.
2563 */ 2561 */
2564#define STATISTICS_REPLY_FLG_BAND_24G_MSK __constant_cpu_to_le32(0x2) 2562#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
2565#define STATISTICS_REPLY_FLG_FAT_MODE_MSK __constant_cpu_to_le32(0x8) 2563#define STATISTICS_REPLY_FLG_FAT_MODE_MSK cpu_to_le32(0x8)
2566struct iwl_notif_statistics { 2564struct iwl_notif_statistics {
2567 __le32 flag; 2565 __le32 flag;
2568 struct statistics_rx rx; 2566 struct statistics_rx rx;
@@ -2578,7 +2576,7 @@ struct iwl_notif_statistics {
2578 * then this notification will be sent. */ 2576 * then this notification will be sent. */
2579#define CONSECUTIVE_MISSED_BCONS_TH 20 2577#define CONSECUTIVE_MISSED_BCONS_TH 20
2580 2578
2581struct iwl4965_missed_beacon_notif { 2579struct iwl_missed_beacon_notif {
2582 __le32 consequtive_missed_beacons; 2580 __le32 consequtive_missed_beacons;
2583 __le32 total_missed_becons; 2581 __le32 total_missed_becons;
2584 __le32 num_expected_beacons; 2582 __le32 num_expected_beacons;
@@ -2778,8 +2776,8 @@ struct iwl4965_missed_beacon_notif {
2778#define HD_OFDM_ENERGY_TH_IN_INDEX (10) 2776#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
2779 2777
2780/* Control field in struct iwl_sensitivity_cmd */ 2778/* Control field in struct iwl_sensitivity_cmd */
2781#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE __constant_cpu_to_le16(0) 2779#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
2782#define SENSITIVITY_CMD_CONTROL_WORK_TABLE __constant_cpu_to_le16(1) 2780#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
2783 2781
2784/** 2782/**
2785 * struct iwl_sensitivity_cmd 2783 * struct iwl_sensitivity_cmd
@@ -2849,56 +2847,26 @@ struct iwl_sensitivity_cmd {
2849 * 1-0: amount of gain, units of 1.5 dB 2847 * 1-0: amount of gain, units of 1.5 dB
2850 */ 2848 */
2851 2849
2852/* "Differential Gain" opcode used in REPLY_PHY_CALIBRATION_CMD. */ 2850/* Phy calibration command for series */
2853#define PHY_CALIBRATE_DIFF_GAIN_CMD (7)
2854
2855struct iwl4965_calibration_cmd {
2856 u8 opCode; /* PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
2857 u8 flags; /* not used */
2858 __le16 reserved;
2859 s8 diff_gain_a; /* see above */
2860 s8 diff_gain_b;
2861 s8 diff_gain_c;
2862 u8 reserved1;
2863} __attribute__ ((packed));
2864
2865/* Phy calibration command for 5000 series */
2866
2867enum {
2868 IWL5000_PHY_CALIBRATE_DC_CMD = 8,
2869 IWL5000_PHY_CALIBRATE_LO_CMD = 9,
2870 IWL5000_PHY_CALIBRATE_RX_BB_CMD = 10,
2871 IWL5000_PHY_CALIBRATE_TX_IQ_CMD = 11,
2872 IWL5000_PHY_CALIBRATE_RX_IQ_CMD = 12,
2873 IWL5000_PHY_CALIBRATION_NOISE_CMD = 13,
2874 IWL5000_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
2875 IWL5000_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
2876 IWL5000_PHY_CALIBRATE_BASE_BAND_CMD = 16,
2877 IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
2878 IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD = 18,
2879 IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD = 19,
2880};
2881 2851
2882enum { 2852enum {
2883 CALIBRATION_CFG_CMD = 0x65, 2853 IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
2884 CALIBRATION_RES_NOTIFICATION = 0x66, 2854 IWL_PHY_CALIBRATE_DC_CMD = 8,
2885 CALIBRATION_COMPLETE_NOTIFICATION = 0x67 2855 IWL_PHY_CALIBRATE_LO_CMD = 9,
2856 IWL_PHY_CALIBRATE_RX_BB_CMD = 10,
2857 IWL_PHY_CALIBRATE_TX_IQ_CMD = 11,
2858 IWL_PHY_CALIBRATE_RX_IQ_CMD = 12,
2859 IWL_PHY_CALIBRATION_NOISE_CMD = 13,
2860 IWL_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
2861 IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
2862 IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
2863 IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
2864 IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD = 18,
2865 IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD = 19,
2886}; 2866};
2887 2867
2888struct iwl_cal_crystal_freq_cmd {
2889 u8 cap_pin1;
2890 u8 cap_pin2;
2891} __attribute__ ((packed));
2892
2893struct iwl5000_calibration {
2894 u8 op_code;
2895 u8 first_group;
2896 u8 num_groups;
2897 u8 all_data_valid;
2898 struct iwl_cal_crystal_freq_cmd data;
2899} __attribute__ ((packed));
2900 2868
2901#define IWL_CALIB_INIT_CFG_ALL __constant_cpu_to_le32(0xffffffff) 2869#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(0xffffffff)
2902 2870
2903struct iwl_calib_cfg_elmnt_s { 2871struct iwl_calib_cfg_elmnt_s {
2904 __le32 is_enable; 2872 __le32 is_enable;
@@ -2914,32 +2882,52 @@ struct iwl_calib_cfg_status_s {
2914 __le32 flags; 2882 __le32 flags;
2915} __attribute__ ((packed)); 2883} __attribute__ ((packed));
2916 2884
2917struct iwl5000_calib_cfg_cmd { 2885struct iwl_calib_cfg_cmd {
2918 struct iwl_calib_cfg_status_s ucd_calib_cfg; 2886 struct iwl_calib_cfg_status_s ucd_calib_cfg;
2919 struct iwl_calib_cfg_status_s drv_calib_cfg; 2887 struct iwl_calib_cfg_status_s drv_calib_cfg;
2920 __le32 reserved1; 2888 __le32 reserved1;
2921} __attribute__ ((packed)); 2889} __attribute__ ((packed));
2922 2890
2923struct iwl5000_calib_hdr { 2891struct iwl_calib_hdr {
2924 u8 op_code; 2892 u8 op_code;
2925 u8 first_group; 2893 u8 first_group;
2926 u8 groups_num; 2894 u8 groups_num;
2927 u8 data_valid; 2895 u8 data_valid;
2928} __attribute__ ((packed)); 2896} __attribute__ ((packed));
2929 2897
2930struct iwl5000_calibration_chain_noise_reset_cmd { 2898struct iwl_calib_cmd {
2931 u8 op_code; /* IWL5000_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ 2899 struct iwl_calib_hdr hdr;
2932 u8 flags; /* not used */ 2900 u8 data[0];
2933 __le16 reserved;
2934} __attribute__ ((packed)); 2901} __attribute__ ((packed));
2935 2902
2936struct iwl5000_calibration_chain_noise_gain_cmd { 2903/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
2937 u8 op_code; /* IWL5000_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */ 2904struct iwl_calib_diff_gain_cmd {
2938 u8 flags; /* not used */ 2905 struct iwl_calib_hdr hdr;
2939 __le16 reserved; 2906 s8 diff_gain_a; /* see above */
2907 s8 diff_gain_b;
2908 s8 diff_gain_c;
2909 u8 reserved1;
2910} __attribute__ ((packed));
2911
2912struct iwl_calib_xtal_freq_cmd {
2913 struct iwl_calib_hdr hdr;
2914 u8 cap_pin1;
2915 u8 cap_pin2;
2916 u8 pad[2];
2917} __attribute__ ((packed));
2918
2919/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
2920struct iwl_calib_chain_noise_reset_cmd {
2921 struct iwl_calib_hdr hdr;
2922 u8 data[0];
2923};
2924
2925/* IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */
2926struct iwl_calib_chain_noise_gain_cmd {
2927 struct iwl_calib_hdr hdr;
2940 u8 delta_gain_1; 2928 u8 delta_gain_1;
2941 u8 delta_gain_2; 2929 u8 delta_gain_2;
2942 __le16 reserved1; 2930 u8 pad[2];
2943} __attribute__ ((packed)); 2931} __attribute__ ((packed));
2944 2932
2945/****************************************************************************** 2933/******************************************************************************
@@ -2999,11 +2987,11 @@ struct iwl_wimax_coex_event_entry {
2999 2987
3000/* COEX flag masks */ 2988/* COEX flag masks */
3001 2989
3002/* Staion table is valid */ 2990/* Station table is valid */
3003#define COEX_FLAGS_STA_TABLE_VALID_MSK (0x1) 2991#define COEX_FLAGS_STA_TABLE_VALID_MSK (0x1)
3004/* UnMask wakeup src at unassociated sleep */ 2992/* UnMask wake up src at unassociated sleep */
3005#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK (0x4) 2993#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK (0x4)
3006/* UnMask wakeup src at associated sleep */ 2994/* UnMask wake up src at associated sleep */
3007#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK (0x8) 2995#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK (0x8)
3008/* Enable CoEx feature. */ 2996/* Enable CoEx feature. */
3009#define COEX_FLAGS_COEX_ENABLE_MSK (0x80) 2997#define COEX_FLAGS_COEX_ENABLE_MSK (0x80)
@@ -3025,26 +3013,22 @@ struct iwl_rx_packet {
3025 struct iwl_cmd_header hdr; 3013 struct iwl_cmd_header hdr;
3026 union { 3014 union {
3027 struct iwl_alive_resp alive_frame; 3015 struct iwl_alive_resp alive_frame;
3028 struct iwl4965_rx_frame rx_frame; 3016 struct iwl_spectrum_notification spectrum_notif;
3029 struct iwl4965_tx_resp tx_resp; 3017 struct iwl_csa_notification csa_notif;
3030 struct iwl4965_spectrum_notification spectrum_notif;
3031 struct iwl4965_csa_notification csa_notif;
3032 struct iwl_error_resp err_resp; 3018 struct iwl_error_resp err_resp;
3033 struct iwl4965_card_state_notif card_state_notif; 3019 struct iwl_card_state_notif card_state_notif;
3034 struct iwl4965_beacon_notif beacon_status;
3035 struct iwl_add_sta_resp add_sta; 3020 struct iwl_add_sta_resp add_sta;
3036 struct iwl_rem_sta_resp rem_sta; 3021 struct iwl_rem_sta_resp rem_sta;
3037 struct iwl4965_sleep_notification sleep_notif; 3022 struct iwl_sleep_notification sleep_notif;
3038 struct iwl4965_spectrum_resp spectrum; 3023 struct iwl_spectrum_resp spectrum;
3039 struct iwl_notif_statistics stats; 3024 struct iwl_notif_statistics stats;
3040 struct iwl_compressed_ba_resp compressed_ba; 3025 struct iwl_compressed_ba_resp compressed_ba;
3041 struct iwl4965_missed_beacon_notif missed_beacon; 3026 struct iwl_missed_beacon_notif missed_beacon;
3042 struct iwl5000_calibration calib;
3043 __le32 status; 3027 __le32 status;
3044 u8 raw[0]; 3028 u8 raw[0];
3045 } u; 3029 } u;
3046} __attribute__ ((packed)); 3030} __attribute__ ((packed));
3047 3031
3048#define IWL_RX_FRAME_SIZE (4 + sizeof(struct iwl4965_rx_frame)) 3032int iwl_agn_check_rxon_cmd(struct iwl_rxon_cmd *rxon);
3049 3033
3050#endif /* __iwl4965_commands_h__ */ 3034#endif /* __iwl_commands_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 01a845851338..73d7973707eb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -22,7 +22,7 @@
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Tomas Winkler <tomas.winkler@intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/ 27 *****************************************************************************/
28 28
@@ -30,19 +30,19 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <net/mac80211.h> 31#include <net/mac80211.h>
32 32
33struct iwl_priv; /* FIXME: remove */
34#include "iwl-debug.h"
35#include "iwl-eeprom.h" 33#include "iwl-eeprom.h"
36#include "iwl-dev.h" /* FIXME: remove */ 34#include "iwl-dev.h" /* FIXME: remove */
35#include "iwl-debug.h"
37#include "iwl-core.h" 36#include "iwl-core.h"
38#include "iwl-io.h" 37#include "iwl-io.h"
39#include "iwl-rfkill.h" 38#include "iwl-rfkill.h"
40#include "iwl-power.h" 39#include "iwl-power.h"
40#include "iwl-sta.h"
41 41
42 42
43MODULE_DESCRIPTION("iwl core"); 43MODULE_DESCRIPTION("iwl core");
44MODULE_VERSION(IWLWIFI_VERSION); 44MODULE_VERSION(IWLWIFI_VERSION);
45MODULE_AUTHOR(DRV_COPYRIGHT); 45MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
46MODULE_LICENSE("GPL"); 46MODULE_LICENSE("GPL");
47 47
48#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ 48#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
@@ -88,26 +88,27 @@ EXPORT_SYMBOL(iwl_rates);
88 * translate ucode response to mac80211 tx status control values 88 * translate ucode response to mac80211 tx status control values
89 */ 89 */
90void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, 90void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
91 struct ieee80211_tx_info *control) 91 struct ieee80211_tx_info *info)
92{ 92{
93 int rate_index; 93 int rate_index;
94 struct ieee80211_tx_rate *r = &info->control.rates[0];
94 95
95 control->antenna_sel_tx = 96 info->antenna_sel_tx =
96 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); 97 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
97 if (rate_n_flags & RATE_MCS_HT_MSK) 98 if (rate_n_flags & RATE_MCS_HT_MSK)
98 control->flags |= IEEE80211_TX_CTL_OFDM_HT; 99 r->flags |= IEEE80211_TX_RC_MCS;
99 if (rate_n_flags & RATE_MCS_GF_MSK) 100 if (rate_n_flags & RATE_MCS_GF_MSK)
100 control->flags |= IEEE80211_TX_CTL_GREEN_FIELD; 101 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
101 if (rate_n_flags & RATE_MCS_FAT_MSK) 102 if (rate_n_flags & RATE_MCS_FAT_MSK)
102 control->flags |= IEEE80211_TX_CTL_40_MHZ_WIDTH; 103 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
103 if (rate_n_flags & RATE_MCS_DUP_MSK) 104 if (rate_n_flags & RATE_MCS_DUP_MSK)
104 control->flags |= IEEE80211_TX_CTL_DUP_DATA; 105 r->flags |= IEEE80211_TX_RC_DUP_DATA;
105 if (rate_n_flags & RATE_MCS_SGI_MSK) 106 if (rate_n_flags & RATE_MCS_SGI_MSK)
106 control->flags |= IEEE80211_TX_CTL_SHORT_GI; 107 r->flags |= IEEE80211_TX_RC_SHORT_GI;
107 rate_index = iwl_hwrate_to_plcp_idx(rate_n_flags); 108 rate_index = iwl_hwrate_to_plcp_idx(rate_n_flags);
108 if (control->band == IEEE80211_BAND_5GHZ) 109 if (info->band == IEEE80211_BAND_5GHZ)
109 rate_index -= IWL_FIRST_OFDM_RATE; 110 rate_index -= IWL_FIRST_OFDM_RATE;
110 control->tx_rate_idx = rate_index; 111 r->idx = rate_index;
111} 112}
112EXPORT_SYMBOL(iwl_hwrate_to_tx_control); 113EXPORT_SYMBOL(iwl_hwrate_to_tx_control);
113 114
@@ -119,7 +120,9 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
119 if (rate_n_flags & RATE_MCS_HT_MSK) { 120 if (rate_n_flags & RATE_MCS_HT_MSK) {
120 idx = (rate_n_flags & 0xff); 121 idx = (rate_n_flags & 0xff);
121 122
122 if (idx >= IWL_RATE_MIMO2_6M_PLCP) 123 if (idx >= IWL_RATE_MIMO3_6M_PLCP)
124 idx = idx - IWL_RATE_MIMO3_6M_PLCP;
125 else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
123 idx = idx - IWL_RATE_MIMO2_6M_PLCP; 126 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
124 127
125 idx += IWL_FIRST_OFDM_RATE; 128 idx += IWL_FIRST_OFDM_RATE;
@@ -140,7 +143,17 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
140} 143}
141EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx); 144EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
142 145
143 146u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
147{
148 int i;
149 u8 ind = ant;
150 for (i = 0; i < RATE_ANT_NUM - 1; i++) {
151 ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
152 if (priv->hw_params.valid_tx_ant & BIT(ind))
153 return ind;
154 }
155 return ant;
156}
144 157
145const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 158const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
146EXPORT_SYMBOL(iwl_bcast_addr); 159EXPORT_SYMBOL(iwl_bcast_addr);
@@ -177,52 +190,6 @@ void iwl_hw_detect(struct iwl_priv *priv)
177} 190}
178EXPORT_SYMBOL(iwl_hw_detect); 191EXPORT_SYMBOL(iwl_hw_detect);
179 192
180/* Tell nic where to find the "keep warm" buffer */
181int iwl_kw_init(struct iwl_priv *priv)
182{
183 unsigned long flags;
184 int ret;
185
186 spin_lock_irqsave(&priv->lock, flags);
187 ret = iwl_grab_nic_access(priv);
188 if (ret)
189 goto out;
190
191 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG,
192 priv->kw.dma_addr >> 4);
193 iwl_release_nic_access(priv);
194out:
195 spin_unlock_irqrestore(&priv->lock, flags);
196 return ret;
197}
198
199int iwl_kw_alloc(struct iwl_priv *priv)
200{
201 struct pci_dev *dev = priv->pci_dev;
202 struct iwl_kw *kw = &priv->kw;
203
204 kw->size = IWL_KW_SIZE;
205 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
206 if (!kw->v_addr)
207 return -ENOMEM;
208
209 return 0;
210}
211
212/**
213 * iwl_kw_free - Free the "keep warm" buffer
214 */
215void iwl_kw_free(struct iwl_priv *priv)
216{
217 struct pci_dev *dev = priv->pci_dev;
218 struct iwl_kw *kw = &priv->kw;
219
220 if (kw->v_addr) {
221 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
222 memset(kw, 0, sizeof(*kw));
223 }
224}
225
226int iwl_hw_nic_init(struct iwl_priv *priv) 193int iwl_hw_nic_init(struct iwl_priv *priv)
227{ 194{
228 unsigned long flags; 195 unsigned long flags;
@@ -271,55 +238,30 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
271} 238}
272EXPORT_SYMBOL(iwl_hw_nic_init); 239EXPORT_SYMBOL(iwl_hw_nic_init);
273 240
274/**
275 * iwl_clear_stations_table - Clear the driver's station table
276 *
277 * NOTE: This does not clear or otherwise alter the device's station table.
278 */
279void iwl_clear_stations_table(struct iwl_priv *priv)
280{
281 unsigned long flags;
282
283 spin_lock_irqsave(&priv->sta_lock, flags);
284
285 if (iwl_is_alive(priv) &&
286 !test_bit(STATUS_EXIT_PENDING, &priv->status) &&
287 iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL))
288 IWL_ERROR("Couldn't clear the station table\n");
289
290 priv->num_stations = 0;
291 memset(priv->stations, 0, sizeof(priv->stations));
292
293 /* clean ucode key table bit map */
294 priv->ucode_key_table = 0;
295
296 spin_unlock_irqrestore(&priv->sta_lock, flags);
297}
298EXPORT_SYMBOL(iwl_clear_stations_table);
299
300void iwl_reset_qos(struct iwl_priv *priv) 241void iwl_reset_qos(struct iwl_priv *priv)
301{ 242{
302 u16 cw_min = 15; 243 u16 cw_min = 15;
303 u16 cw_max = 1023; 244 u16 cw_max = 1023;
304 u8 aifs = 2; 245 u8 aifs = 2;
305 u8 is_legacy = 0; 246 bool is_legacy = false;
306 unsigned long flags; 247 unsigned long flags;
307 int i; 248 int i;
308 249
309 spin_lock_irqsave(&priv->lock, flags); 250 spin_lock_irqsave(&priv->lock, flags);
310 priv->qos_data.qos_active = 0; 251 /* QoS always active in AP and ADHOC mode
252 * In STA mode wait for association
253 */
254 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
255 priv->iw_mode == NL80211_IFTYPE_AP)
256 priv->qos_data.qos_active = 1;
257 else
258 priv->qos_data.qos_active = 0;
311 259
312 if (priv->iw_mode == NL80211_IFTYPE_ADHOC) { 260 /* check for legacy mode */
313 if (priv->qos_data.qos_enable) 261 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
314 priv->qos_data.qos_active = 1; 262 (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
315 if (!(priv->active_rate & 0xfff0)) { 263 (priv->iw_mode == NL80211_IFTYPE_STATION &&
316 cw_min = 31; 264 (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
317 is_legacy = 1;
318 }
319 } else if (priv->iw_mode == NL80211_IFTYPE_AP) {
320 if (priv->qos_data.qos_enable)
321 priv->qos_data.qos_active = 1;
322 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
323 cw_min = 31; 265 cw_min = 31;
324 is_legacy = 1; 266 is_legacy = 1;
325 } 267 }
@@ -385,10 +327,10 @@ void iwl_reset_qos(struct iwl_priv *priv)
385} 327}
386EXPORT_SYMBOL(iwl_reset_qos); 328EXPORT_SYMBOL(iwl_reset_qos);
387 329
388#define MAX_BIT_RATE_40_MHZ 0x96 /* 150 Mbps */ 330#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
389#define MAX_BIT_RATE_20_MHZ 0x48 /* 72 Mbps */ 331#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
390static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv, 332static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
391 struct ieee80211_ht_info *ht_info, 333 struct ieee80211_sta_ht_cap *ht_info,
392 enum ieee80211_band band) 334 enum ieee80211_band band)
393{ 335{
394 u16 max_bit_rate = 0; 336 u16 max_bit_rate = 0;
@@ -396,45 +338,46 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
396 u8 tx_chains_num = priv->hw_params.tx_chains_num; 338 u8 tx_chains_num = priv->hw_params.tx_chains_num;
397 339
398 ht_info->cap = 0; 340 ht_info->cap = 0;
399 memset(ht_info->supp_mcs_set, 0, 16); 341 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
400 342
401 ht_info->ht_supported = 1; 343 ht_info->ht_supported = true;
402 344
403 ht_info->cap |= (u16)IEEE80211_HT_CAP_GRN_FLD; 345 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
404 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20; 346 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
405 ht_info->cap |= (u16)(IEEE80211_HT_CAP_SM_PS & 347 ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
406 (WLAN_HT_CAP_SM_PS_DISABLED << 2)); 348 (WLAN_HT_CAP_SM_PS_DISABLED << 2));
407 349
408 max_bit_rate = MAX_BIT_RATE_20_MHZ; 350 max_bit_rate = MAX_BIT_RATE_20_MHZ;
409 if (priv->hw_params.fat_channel & BIT(band)) { 351 if (priv->hw_params.fat_channel & BIT(band)) {
410 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH; 352 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
411 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40; 353 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
412 ht_info->supp_mcs_set[4] = 0x01; 354 ht_info->mcs.rx_mask[4] = 0x01;
413 max_bit_rate = MAX_BIT_RATE_40_MHZ; 355 max_bit_rate = MAX_BIT_RATE_40_MHZ;
414 } 356 }
415 357
416 if (priv->cfg->mod_params->amsdu_size_8K) 358 if (priv->cfg->mod_params->amsdu_size_8K)
417 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU; 359 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
418 360
419 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; 361 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
420 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; 362 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
421 363
422 ht_info->supp_mcs_set[0] = 0xFF; 364 ht_info->mcs.rx_mask[0] = 0xFF;
423 if (rx_chains_num >= 2) 365 if (rx_chains_num >= 2)
424 ht_info->supp_mcs_set[1] = 0xFF; 366 ht_info->mcs.rx_mask[1] = 0xFF;
425 if (rx_chains_num >= 3) 367 if (rx_chains_num >= 3)
426 ht_info->supp_mcs_set[2] = 0xFF; 368 ht_info->mcs.rx_mask[2] = 0xFF;
427 369
428 /* Highest supported Rx data rate */ 370 /* Highest supported Rx data rate */
429 max_bit_rate *= rx_chains_num; 371 max_bit_rate *= rx_chains_num;
430 ht_info->supp_mcs_set[10] = (u8)(max_bit_rate & 0x00FF); 372 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
431 ht_info->supp_mcs_set[11] = (u8)((max_bit_rate & 0xFF00) >> 8); 373 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
432 374
433 /* Tx MCS capabilities */ 375 /* Tx MCS capabilities */
434 ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED; 376 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
435 if (tx_chains_num != rx_chains_num) { 377 if (tx_chains_num != rx_chains_num) {
436 ht_info->supp_mcs_set[12] |= IEEE80211_HT_CAP_MCS_TX_RX_DIFF; 378 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
437 ht_info->supp_mcs_set[12] |= ((tx_chains_num - 1) << 2); 379 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
380 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
438 } 381 }
439} 382}
440 383
@@ -498,7 +441,7 @@ static int iwlcore_init_geos(struct iwl_priv *priv)
498 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE; 441 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
499 442
500 if (priv->cfg->sku & IWL_SKU_N) 443 if (priv->cfg->sku & IWL_SKU_N)
501 iwlcore_init_ht_hw_capab(priv, &sband->ht_info, 444 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
502 IEEE80211_BAND_5GHZ); 445 IEEE80211_BAND_5GHZ);
503 446
504 sband = &priv->bands[IEEE80211_BAND_2GHZ]; 447 sband = &priv->bands[IEEE80211_BAND_2GHZ];
@@ -508,7 +451,7 @@ static int iwlcore_init_geos(struct iwl_priv *priv)
508 sband->n_bitrates = IWL_RATE_COUNT; 451 sband->n_bitrates = IWL_RATE_COUNT;
509 452
510 if (priv->cfg->sku & IWL_SKU_N) 453 if (priv->cfg->sku & IWL_SKU_N)
511 iwlcore_init_ht_hw_capab(priv, &sband->ht_info, 454 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap,
512 IEEE80211_BAND_2GHZ); 455 IEEE80211_BAND_2GHZ);
513 456
514 priv->ieee_channels = channels; 457 priv->ieee_channels = channels;
@@ -598,8 +541,8 @@ static void iwlcore_free_geos(struct iwl_priv *priv)
598static bool is_single_rx_stream(struct iwl_priv *priv) 541static bool is_single_rx_stream(struct iwl_priv *priv)
599{ 542{
600 return !priv->current_ht_config.is_ht || 543 return !priv->current_ht_config.is_ht ||
601 ((priv->current_ht_config.supp_mcs_set[1] == 0) && 544 ((priv->current_ht_config.mcs.rx_mask[1] == 0) &&
602 (priv->current_ht_config.supp_mcs_set[2] == 0)); 545 (priv->current_ht_config.mcs.rx_mask[2] == 0));
603} 546}
604 547
605static u8 iwl_is_channel_extension(struct iwl_priv *priv, 548static u8 iwl_is_channel_extension(struct iwl_priv *priv,
@@ -612,10 +555,10 @@ static u8 iwl_is_channel_extension(struct iwl_priv *priv,
612 if (!is_channel_valid(ch_info)) 555 if (!is_channel_valid(ch_info))
613 return 0; 556 return 0;
614 557
615 if (extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) 558 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
616 return !(ch_info->fat_extension_channel & 559 return !(ch_info->fat_extension_channel &
617 IEEE80211_CHAN_NO_FAT_ABOVE); 560 IEEE80211_CHAN_NO_FAT_ABOVE);
618 else if (extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) 561 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
619 return !(ch_info->fat_extension_channel & 562 return !(ch_info->fat_extension_channel &
620 IEEE80211_CHAN_NO_FAT_BELOW); 563 IEEE80211_CHAN_NO_FAT_BELOW);
621 564
@@ -623,24 +566,24 @@ static u8 iwl_is_channel_extension(struct iwl_priv *priv,
623} 566}
624 567
625u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv, 568u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
626 struct ieee80211_ht_info *sta_ht_inf) 569 struct ieee80211_sta_ht_cap *sta_ht_inf)
627{ 570{
628 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config; 571 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
629 572
630 if ((!iwl_ht_conf->is_ht) || 573 if ((!iwl_ht_conf->is_ht) ||
631 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) || 574 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
632 (iwl_ht_conf->extension_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE)) 575 (iwl_ht_conf->extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_NONE))
633 return 0; 576 return 0;
634 577
635 if (sta_ht_inf) { 578 if (sta_ht_inf) {
636 if ((!sta_ht_inf->ht_supported) || 579 if ((!sta_ht_inf->ht_supported) ||
637 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH))) 580 (!(sta_ht_inf->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)))
638 return 0; 581 return 0;
639 } 582 }
640 583
641 return iwl_is_channel_extension(priv, priv->band, 584 return iwl_is_channel_extension(priv, priv->band,
642 iwl_ht_conf->control_channel, 585 le16_to_cpu(priv->staging_rxon.channel),
643 iwl_ht_conf->extension_chan_offset); 586 iwl_ht_conf->extension_chan_offset);
644} 587}
645EXPORT_SYMBOL(iwl_is_fat_tx_allowed); 588EXPORT_SYMBOL(iwl_is_fat_tx_allowed);
646 589
@@ -665,22 +608,15 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
665 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | 608 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
666 RXON_FLG_CHANNEL_MODE_PURE_40_MSK); 609 RXON_FLG_CHANNEL_MODE_PURE_40_MSK);
667 610
668 if (le16_to_cpu(rxon->channel) != ht_info->control_channel) {
669 IWL_DEBUG_ASSOC("control diff than current %d %d\n",
670 le16_to_cpu(rxon->channel),
671 ht_info->control_channel);
672 return;
673 }
674
675 /* Note: control channel is opposite of extension channel */ 611 /* Note: control channel is opposite of extension channel */
676 switch (ht_info->extension_chan_offset) { 612 switch (ht_info->extension_chan_offset) {
677 case IEEE80211_HT_IE_CHA_SEC_ABOVE: 613 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
678 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 614 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
679 break; 615 break;
680 case IEEE80211_HT_IE_CHA_SEC_BELOW: 616 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
681 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 617 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
682 break; 618 break;
683 case IEEE80211_HT_IE_CHA_SEC_NONE: 619 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
684 default: 620 default:
685 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK; 621 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
686 break; 622 break;
@@ -694,14 +630,12 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
694 630
695 IWL_DEBUG_ASSOC("supported HT rate 0x%X 0x%X 0x%X " 631 IWL_DEBUG_ASSOC("supported HT rate 0x%X 0x%X 0x%X "
696 "rxon flags 0x%X operation mode :0x%X " 632 "rxon flags 0x%X operation mode :0x%X "
697 "extension channel offset 0x%x " 633 "extension channel offset 0x%x\n",
698 "control chan %d\n", 634 ht_info->mcs.rx_mask[0],
699 ht_info->supp_mcs_set[0], 635 ht_info->mcs.rx_mask[1],
700 ht_info->supp_mcs_set[1], 636 ht_info->mcs.rx_mask[2],
701 ht_info->supp_mcs_set[2],
702 le32_to_cpu(rxon->flags), ht_info->ht_protection, 637 le32_to_cpu(rxon->flags), ht_info->ht_protection,
703 ht_info->extension_chan_offset, 638 ht_info->extension_chan_offset);
704 ht_info->control_channel);
705 return; 639 return;
706} 640}
707EXPORT_SYMBOL(iwl_set_rxon_ht); 641EXPORT_SYMBOL(iwl_set_rxon_ht);
@@ -745,7 +679,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
745 break; 679 break;
746 case WLAN_HT_CAP_SM_PS_INVALID: 680 case WLAN_HT_CAP_SM_PS_INVALID:
747 default: 681 default:
748 IWL_ERROR("invalide mimo ps mode %d\n", 682 IWL_ERROR("invalid mimo ps mode %d\n",
749 priv->current_ht_config.sm_ps); 683 priv->current_ht_config.sm_ps);
750 WARN_ON(1); 684 WARN_ON(1);
751 idle_cnt = -1; 685 idle_cnt = -1;
@@ -871,11 +805,14 @@ int iwl_setup_mac(struct iwl_priv *priv)
871 805
872 /* Tell mac80211 our characteristics */ 806 /* Tell mac80211 our characteristics */
873 hw->flags = IEEE80211_HW_SIGNAL_DBM | 807 hw->flags = IEEE80211_HW_SIGNAL_DBM |
874 IEEE80211_HW_NOISE_DBM; 808 IEEE80211_HW_NOISE_DBM |
809 IEEE80211_HW_AMPDU_AGGREGATION;
875 hw->wiphy->interface_modes = 810 hw->wiphy->interface_modes =
876 BIT(NL80211_IFTYPE_AP) |
877 BIT(NL80211_IFTYPE_STATION) | 811 BIT(NL80211_IFTYPE_STATION) |
878 BIT(NL80211_IFTYPE_ADHOC); 812 BIT(NL80211_IFTYPE_ADHOC);
813
814 hw->wiphy->fw_handles_regulatory = true;
815
879 /* Default value; 4 EDCA QOS priorities */ 816 /* Default value; 4 EDCA QOS priorities */
880 hw->queues = 4; 817 hw->queues = 4;
881 /* queues to support 11n aggregation */ 818 /* queues to support 11n aggregation */
@@ -948,16 +885,12 @@ int iwl_init_drv(struct iwl_priv *priv)
948 885
949 priv->iw_mode = NL80211_IFTYPE_STATION; 886 priv->iw_mode = NL80211_IFTYPE_STATION;
950 887
951 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
952 priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED; 888 priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
953 889
954 /* Choose which receivers/antennas to use */ 890 /* Choose which receivers/antennas to use */
955 iwl_set_rxon_chain(priv); 891 iwl_set_rxon_chain(priv);
956 iwl_init_scan_params(priv); 892 iwl_init_scan_params(priv);
957 893
958 if (priv->cfg->mod_params->enable_qos)
959 priv->qos_data.qos_enable = 1;
960
961 iwl_reset_qos(priv); 894 iwl_reset_qos(priv);
962 895
963 priv->qos_data.qos_active = 0; 896 priv->qos_data.qos_active = 0;
@@ -1025,6 +958,30 @@ void iwl_uninit_drv(struct iwl_priv *priv)
1025} 958}
1026EXPORT_SYMBOL(iwl_uninit_drv); 959EXPORT_SYMBOL(iwl_uninit_drv);
1027 960
961
962void iwl_disable_interrupts(struct iwl_priv *priv)
963{
964 clear_bit(STATUS_INT_ENABLED, &priv->status);
965
966 /* disable interrupts from uCode/NIC to host */
967 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
968
969 /* acknowledge/clear/reset any interrupts still pending
970 * from uCode or flow handler (Rx/Tx DMA) */
971 iwl_write32(priv, CSR_INT, 0xffffffff);
972 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
973 IWL_DEBUG_ISR("Disabled interrupts\n");
974}
975EXPORT_SYMBOL(iwl_disable_interrupts);
976
977void iwl_enable_interrupts(struct iwl_priv *priv)
978{
979 IWL_DEBUG_ISR("Enabling interrupts\n");
980 set_bit(STATUS_INT_ENABLED, &priv->status);
981 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
982}
983EXPORT_SYMBOL(iwl_enable_interrupts);
984
1028int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags) 985int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags)
1029{ 986{
1030 u32 stat_flags = 0; 987 u32 stat_flags = 0;
@@ -1172,24 +1129,47 @@ int iwl_verify_ucode(struct iwl_priv *priv)
1172} 1129}
1173EXPORT_SYMBOL(iwl_verify_ucode); 1130EXPORT_SYMBOL(iwl_verify_ucode);
1174 1131
1132
1133static const char *desc_lookup_text[] = {
1134 "OK",
1135 "FAIL",
1136 "BAD_PARAM",
1137 "BAD_CHECKSUM",
1138 "NMI_INTERRUPT_WDG",
1139 "SYSASSERT",
1140 "FATAL_ERROR",
1141 "BAD_COMMAND",
1142 "HW_ERROR_TUNE_LOCK",
1143 "HW_ERROR_TEMPERATURE",
1144 "ILLEGAL_CHAN_FREQ",
1145 "VCC_NOT_STABLE",
1146 "FH_ERROR",
1147 "NMI_INTERRUPT_HOST",
1148 "NMI_INTERRUPT_ACTION_PT",
1149 "NMI_INTERRUPT_UNKNOWN",
1150 "UCODE_VERSION_MISMATCH",
1151 "HW_ERROR_ABS_LOCK",
1152 "HW_ERROR_CAL_LOCK_FAIL",
1153 "NMI_INTERRUPT_INST_ACTION_PT",
1154 "NMI_INTERRUPT_DATA_ACTION_PT",
1155 "NMI_TRM_HW_ER",
1156 "NMI_INTERRUPT_TRM",
1157 "NMI_INTERRUPT_BREAK_POINT"
1158 "DEBUG_0",
1159 "DEBUG_1",
1160 "DEBUG_2",
1161 "DEBUG_3",
1162 "UNKNOWN"
1163};
1164
1175static const char *desc_lookup(int i) 1165static const char *desc_lookup(int i)
1176{ 1166{
1177 switch (i) { 1167 int max = ARRAY_SIZE(desc_lookup_text) - 1;
1178 case 1:
1179 return "FAIL";
1180 case 2:
1181 return "BAD_PARAM";
1182 case 3:
1183 return "BAD_CHECKSUM";
1184 case 4:
1185 return "NMI_INTERRUPT";
1186 case 5:
1187 return "SYSASSERT";
1188 case 6:
1189 return "FATAL_ERROR";
1190 }
1191 1168
1192 return "UNKNOWN"; 1169 if (i < 0 || i > max)
1170 i = max;
1171
1172 return desc_lookup_text[i];
1193} 1173}
1194 1174
1195#define ERROR_START_OFFSET (1 * sizeof(u32)) 1175#define ERROR_START_OFFSET (1 * sizeof(u32))
@@ -1235,9 +1215,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1235 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); 1215 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1236 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); 1216 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1237 1217
1238 IWL_ERROR("Desc Time " 1218 IWL_ERROR("Desc Time "
1239 "data1 data2 line\n"); 1219 "data1 data2 line\n");
1240 IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n", 1220 IWL_ERROR("%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
1241 desc_lookup(desc), desc, time, data1, data2, line); 1221 desc_lookup(desc), desc, time, data1, data2, line);
1242 IWL_ERROR("blink1 blink2 ilink1 ilink2\n"); 1222 IWL_ERROR("blink1 blink2 ilink1 ilink2\n");
1243 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, 1223 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
@@ -1377,6 +1357,7 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1377} 1357}
1378EXPORT_SYMBOL(iwl_rf_kill_ct_config); 1358EXPORT_SYMBOL(iwl_rf_kill_ct_config);
1379 1359
1360
1380/* 1361/*
1381 * CARD_STATE_CMD 1362 * CARD_STATE_CMD
1382 * 1363 *
@@ -1465,6 +1446,16 @@ int iwl_radio_kill_sw_enable_radio(struct iwl_priv *priv)
1465 return 0; 1446 return 0;
1466 } 1447 }
1467 1448
1449 /* when driver is up while rfkill is on, it wont receive
1450 * any CARD_STATE_NOTIFICATION notifications so we have to
1451 * restart it in here
1452 */
1453 if (priv->is_open && !test_bit(STATUS_ALIVE, &priv->status)) {
1454 clear_bit(STATUS_RF_KILL_SW, &priv->status);
1455 if (!iwl_is_rfkill(priv))
1456 queue_work(priv->workqueue, &priv->up);
1457 }
1458
1468 /* If the driver is already loaded, it will receive 1459 /* If the driver is already loaded, it will receive
1469 * CARD_STATE_NOTIFICATION notifications and the handler will 1460 * CARD_STATE_NOTIFICATION notifications and the handler will
1470 * call restart to reload the driver. 1461 * call restart to reload the driver.
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 288b6a800e03..7c3a20a986bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -72,6 +72,7 @@ struct iwl_cmd;
72 72
73#define IWLWIFI_VERSION "1.3.27k" 73#define IWLWIFI_VERSION "1.3.27k"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2008 Intel Corporation" 74#define DRV_COPYRIGHT "Copyright(c) 2003-2008 Intel Corporation"
75#define DRV_AUTHOR "<ilw@linux.intel.com>"
75 76
76#define IWL_PCI_DEVICE(dev, subdev, cfg) \ 77#define IWL_PCI_DEVICE(dev, subdev, cfg) \
77 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ 78 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
@@ -100,12 +101,8 @@ struct iwl_hcmd_utils_ops {
100}; 101};
101 102
102struct iwl_lib_ops { 103struct iwl_lib_ops {
103 /* set hw dependant perameters */ 104 /* set hw dependent parameters */
104 int (*set_hw_params)(struct iwl_priv *priv); 105 int (*set_hw_params)(struct iwl_priv *priv);
105 /* ucode shared memory */
106 int (*alloc_shared_mem)(struct iwl_priv *priv);
107 void (*free_shared_mem)(struct iwl_priv *priv);
108 int (*shared_mem_rx_idx)(struct iwl_priv *priv);
109 /* Handling TX */ 106 /* Handling TX */
110 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv, 107 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq, 108 struct iwl_tx_queue *txq,
@@ -157,22 +154,53 @@ struct iwl_ops {
157struct iwl_mod_params { 154struct iwl_mod_params {
158 int disable; /* def: 0 = enable radio */ 155 int disable; /* def: 0 = enable radio */
159 int sw_crypto; /* def: 0 = using hardware encryption */ 156 int sw_crypto; /* def: 0 = using hardware encryption */
160 int debug; /* def: 0 = minimal debug log messages */ 157 u32 debug; /* def: 0 = minimal debug log messages */
161 int disable_hw_scan; /* def: 0 = use h/w scan */ 158 int disable_hw_scan; /* def: 0 = use h/w scan */
162 int num_of_queues; /* def: HW dependent */ 159 int num_of_queues; /* def: HW dependent */
163 int num_of_ampdu_queues;/* def: HW dependent */ 160 int num_of_ampdu_queues;/* def: HW dependent */
164 int enable_qos; /* def: 1 = use quality of service */
165 int disable_11n; /* def: 0 = disable 11n capabilities */ 161 int disable_11n; /* def: 0 = disable 11n capabilities */
166 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 162 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
167 int antenna; /* def: 0 = both antennas (use diversity) */ 163 int antenna; /* def: 0 = both antennas (use diversity) */
168 int restart_fw; /* def: 1 = restart firmware */ 164 int restart_fw; /* def: 1 = restart firmware */
169}; 165};
170 166
167/**
168 * struct iwl_cfg
169 * @fw_name_pre: Firmware filename prefix. The api version and extension
170 * (.ucode) will be added to filename before loading from disk. The
171 * filename is constructed as fw_name_pre<api>.ucode.
172 * @ucode_api_max: Highest version of uCode API supported by driver.
173 * @ucode_api_min: Lowest version of uCode API supported by driver.
174 *
175 * We enable the driver to be backward compatible wrt API version. The
176 * driver specifies which APIs it supports (with @ucode_api_max being the
177 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
178 * it has a supported API version. The firmware's API version will be
179 * stored in @iwl_priv, enabling the driver to make runtime changes based
180 * on firmware version used.
181 *
182 * For example,
183 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
184 * Driver interacts with Firmware API version >= 2.
185 * } else {
186 * Driver interacts with Firmware API version 1.
187 * }
188 *
189 * The ideal usage of this infrastructure is to treat a new ucode API
190 * release as a new hardware revision. That is, through utilizing the
191 * iwl_hcmd_utils_ops etc. we accommodate different command structures
192 * and flows between hardware versions (4965/5000) as well as their API
193 * versions.
194 */
171struct iwl_cfg { 195struct iwl_cfg {
172 const char *name; 196 const char *name;
173 const char *fw_name; 197 const char *fw_name_pre;
198 const unsigned int ucode_api_max;
199 const unsigned int ucode_api_min;
174 unsigned int sku; 200 unsigned int sku;
175 int eeprom_size; 201 int eeprom_size;
202 u16 eeprom_ver;
203 u16 eeprom_calib_ver;
176 const struct iwl_ops *ops; 204 const struct iwl_ops *ops;
177 const struct iwl_mod_params *mod_params; 205 const struct iwl_mod_params *mod_params;
178}; 206};
@@ -184,22 +212,17 @@ struct iwl_cfg {
184struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 212struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
185 struct ieee80211_ops *hw_ops); 213 struct ieee80211_ops *hw_ops);
186void iwl_hw_detect(struct iwl_priv *priv); 214void iwl_hw_detect(struct iwl_priv *priv);
187void iwl_clear_stations_table(struct iwl_priv *priv);
188void iwl_reset_qos(struct iwl_priv *priv); 215void iwl_reset_qos(struct iwl_priv *priv);
189void iwl_set_rxon_chain(struct iwl_priv *priv); 216void iwl_set_rxon_chain(struct iwl_priv *priv);
190int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch); 217int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
191void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info); 218void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info);
192u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv, 219u8 iwl_is_fat_tx_allowed(struct iwl_priv *priv,
193 struct ieee80211_ht_info *sta_ht_inf); 220 struct ieee80211_sta_ht_cap *sta_ht_inf);
194int iwl_hw_nic_init(struct iwl_priv *priv); 221int iwl_hw_nic_init(struct iwl_priv *priv);
195int iwl_setup_mac(struct iwl_priv *priv); 222int iwl_setup_mac(struct iwl_priv *priv);
196int iwl_set_hw_params(struct iwl_priv *priv); 223int iwl_set_hw_params(struct iwl_priv *priv);
197int iwl_init_drv(struct iwl_priv *priv); 224int iwl_init_drv(struct iwl_priv *priv);
198void iwl_uninit_drv(struct iwl_priv *priv); 225void iwl_uninit_drv(struct iwl_priv *priv);
199/* "keep warm" functions */
200int iwl_kw_init(struct iwl_priv *priv);
201int iwl_kw_alloc(struct iwl_priv *priv);
202void iwl_kw_free(struct iwl_priv *priv);
203 226
204/***************************************************** 227/*****************************************************
205* RX 228* RX
@@ -212,8 +235,6 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
212void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 235void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
213void iwl_rx_replenish(struct iwl_priv *priv); 236void iwl_rx_replenish(struct iwl_priv *priv);
214int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 237int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
215int iwl_rx_agg_start(struct iwl_priv *priv, const u8 *addr, int tid, u16 ssn);
216int iwl_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid);
217int iwl_rx_queue_restock(struct iwl_priv *priv); 238int iwl_rx_queue_restock(struct iwl_priv *priv);
218int iwl_rx_queue_space(const struct iwl_rx_queue *q); 239int iwl_rx_queue_space(const struct iwl_rx_queue *q);
219void iwl_rx_allocate(struct iwl_priv *priv); 240void iwl_rx_allocate(struct iwl_priv *priv);
@@ -237,7 +258,6 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
237int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn); 258int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
238int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid); 259int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
239int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id); 260int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
240
241/***************************************************** 261/*****************************************************
242 * TX power 262 * TX power
243 ****************************************************/ 263 ****************************************************/
@@ -259,6 +279,13 @@ void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
259 struct ieee80211_tx_info *info); 279 struct ieee80211_tx_info *info);
260int iwl_hwrate_to_plcp_idx(u32 rate_n_flags); 280int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
261 281
282u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx);
283
284static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
285{
286 return BIT(ant_idx) << RATE_MCS_ANT_POS;
287}
288
262static inline u8 iwl_hw_get_rate(__le32 rate_n_flags) 289static inline u8 iwl_hw_get_rate(__le32 rate_n_flags)
263{ 290{
264 return le32_to_cpu(rate_n_flags) & 0xFF; 291 return le32_to_cpu(rate_n_flags) & 0xFF;
@@ -289,6 +316,14 @@ int iwl_send_calib_results(struct iwl_priv *priv);
289int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len); 316int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
290void iwl_calib_free_results(struct iwl_priv *priv); 317void iwl_calib_free_results(struct iwl_priv *priv);
291 318
319/*******************************************************************************
320 * Spectrum Measureemtns in iwl-spectrum.c
321 ******************************************************************************/
322#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
323void iwl_setup_spectrum_handlers(struct iwl_priv *priv);
324#else
325static inline void iwl_setup_spectrum_handlers(struct iwl_priv *priv) {}
326#endif
292/***************************************************** 327/*****************************************************
293 * S e n d i n g H o s t C o m m a n d s * 328 * S e n d i n g H o s t C o m m a n d s *
294 *****************************************************/ 329 *****************************************************/
@@ -308,11 +343,18 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
308int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 343int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
309 344
310/***************************************************** 345/*****************************************************
346 * PCI *
347 *****************************************************/
348void iwl_disable_interrupts(struct iwl_priv *priv);
349void iwl_enable_interrupts(struct iwl_priv *priv);
350
351/*****************************************************
311* Error Handling Debugging 352* Error Handling Debugging
312******************************************************/ 353******************************************************/
313void iwl_dump_nic_error_log(struct iwl_priv *priv); 354void iwl_dump_nic_error_log(struct iwl_priv *priv);
314void iwl_dump_nic_event_log(struct iwl_priv *priv); 355void iwl_dump_nic_event_log(struct iwl_priv *priv);
315 356
357
316/*************** DRIVER STATUS FUNCTIONS *****/ 358/*************** DRIVER STATUS FUNCTIONS *****/
317 359
318#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ 360#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 662edf4f8d22..f34ede44ed10 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -60,6 +60,8 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#ifndef __iwl_csr_h__
64#define __iwl_csr_h__
63/*=== CSR (control and status registers) ===*/ 65/*=== CSR (control and status registers) ===*/
64#define CSR_BASE (0x000) 66#define CSR_BASE (0x000)
65 67
@@ -214,6 +216,8 @@
214/* EEPROM REG */ 216/* EEPROM REG */
215#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) 217#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
216#define CSR_EEPROM_REG_BIT_CMD (0x00000002) 218#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
219#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC)
220#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
217 221
218/* EEPROM GP */ 222/* EEPROM GP */
219#define CSR_EEPROM_GP_VALID_MSK (0x00000006) 223#define CSR_EEPROM_GP_VALID_MSK (0x00000006)
@@ -286,4 +290,4 @@
286#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004) 290#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
287 291
288 292
289 293#endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index e548d67f87fd..56c13b458de7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -21,7 +21,7 @@
21 * file called LICENSE. 21 * file called LICENSE.
22 * 22 *
23 * Contact Information: 23 * Contact Information:
24 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 * 26 *
27 *****************************************************************************/ 27 *****************************************************************************/
@@ -40,6 +40,13 @@ do { if ((priv->debug_level & (level)) && net_ratelimit()) \
40 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \ 40 dev_printk(KERN_ERR, &(priv->hw->wiphy->dev), "%c %s " fmt, \
41 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0) 41 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
42 42
43#define iwl_print_hex_dump(priv, level, p, len) \
44do { \
45 if (priv->debug_level & level) \
46 print_hex_dump(KERN_DEBUG, "iwl data: ", \
47 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
48} while (0)
49
43#ifdef CONFIG_IWLWIFI_DEBUGFS 50#ifdef CONFIG_IWLWIFI_DEBUGFS
44struct iwl_debugfs { 51struct iwl_debugfs {
45 const char *name; 52 const char *name;
@@ -53,6 +60,7 @@ struct iwl_debugfs {
53 struct dentry *file_rx_statistics; 60 struct dentry *file_rx_statistics;
54 struct dentry *file_tx_statistics; 61 struct dentry *file_tx_statistics;
55 struct dentry *file_log_event; 62 struct dentry *file_log_event;
63 struct dentry *file_channels;
56 } dbgfs_data_files; 64 } dbgfs_data_files;
57 struct dir_rf_files { 65 struct dir_rf_files {
58 struct dentry *file_disable_sensitivity; 66 struct dentry *file_disable_sensitivity;
@@ -70,6 +78,9 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
70#else 78#else
71#define IWL_DEBUG(level, fmt, args...) 79#define IWL_DEBUG(level, fmt, args...)
72#define IWL_DEBUG_LIMIT(level, fmt, args...) 80#define IWL_DEBUG_LIMIT(level, fmt, args...)
81static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
82 void *p, u32 len)
83{}
73#endif /* CONFIG_IWLWIFI_DEBUG */ 84#endif /* CONFIG_IWLWIFI_DEBUG */
74 85
75 86
@@ -85,29 +96,25 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
85#endif /* CONFIG_IWLWIFI_DEBUGFS */ 96#endif /* CONFIG_IWLWIFI_DEBUGFS */
86 97
87/* 98/*
88 * To use the debug system; 99 * To use the debug system:
89 * 100 *
90 * If you are defining a new debug classification, simply add it to the #define 101 * If you are defining a new debug classification, simply add it to the #define
91 * list here in the form of: 102 * list here in the form of
92 * 103 *
93 * #define IWL_DL_xxxx VALUE 104 * #define IWL_DL_xxxx VALUE
94 * 105 *
95 * shifting value to the left one bit from the previous entry. xxxx should be 106 * where xxxx should be the name of the classification (for example, WEP).
96 * the name of the classification (for example, WEP)
97 * 107 *
98 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your 108 * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
99 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want 109 * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
100 * to send output to that classification. 110 * to send output to that classification.
101 * 111 *
102 * To add your debug level to the list of levels seen when you perform 112 * The active debug levels can be accessed via files
103 *
104 * % cat /proc/net/iwl/debug_level
105 *
106 * you simply need to add your entry to the iwl_debug_levels array.
107 * 113 *
108 * If you do not see debug_level in /proc/net/iwl then you do not have 114 * /sys/module/iwlagn/parameters/debug{50}
109 * CONFIG_IWLWIFI_DEBUG defined in your kernel configuration 115 * /sys/class/net/wlan0/device/debug_level
110 * 116 *
117 * when CONFIG_IWLWIFI_DEBUG=y.
111 */ 118 */
112 119
113#define IWL_DL_INFO (1 << 0) 120#define IWL_DL_INFO (1 << 0)
@@ -183,6 +190,8 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
183#define IWL_DEBUG_STATS(f, a...) IWL_DEBUG(IWL_DL_STATS, f, ## a) 190#define IWL_DEBUG_STATS(f, a...) IWL_DEBUG(IWL_DL_STATS, f, ## a)
184#define IWL_DEBUG_STATS_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_STATS, f, ## a) 191#define IWL_DEBUG_STATS_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_STATS, f, ## a)
185#define IWL_DEBUG_TX_REPLY(f, a...) IWL_DEBUG(IWL_DL_TX_REPLY, f, ## a) 192#define IWL_DEBUG_TX_REPLY(f, a...) IWL_DEBUG(IWL_DL_TX_REPLY, f, ## a)
193#define IWL_DEBUG_TX_REPLY_LIMIT(f, a...) \
194 IWL_DEBUG_LIMIT(IWL_DL_TX_REPLY, f, ## a)
186#define IWL_DEBUG_QOS(f, a...) IWL_DEBUG(IWL_DL_QOS, f, ## a) 195#define IWL_DEBUG_QOS(f, a...) IWL_DEBUG(IWL_DL_QOS, f, ## a)
187#define IWL_DEBUG_RADIO(f, a...) IWL_DEBUG(IWL_DL_RADIO, f, ## a) 196#define IWL_DEBUG_RADIO(f, a...) IWL_DEBUG(IWL_DL_RADIO, f, ## a)
188#define IWL_DEBUG_POWER(f, a...) IWL_DEBUG(IWL_DL_POWER, f, ## a) 197#define IWL_DEBUG_POWER(f, a...) IWL_DEBUG(IWL_DL_POWER, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 20db0eb636a8..d5253a179dec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -22,7 +22,7 @@
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Tomas Winkler <tomas.winkler@intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/ 27 *****************************************************************************/
28 28
@@ -58,7 +58,8 @@
58#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ 58#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
59 dbgfs->dbgfs_##parent##_files.file_##name = \ 59 dbgfs->dbgfs_##parent##_files.file_##name = \
60 debugfs_create_bool(#name, 0644, dbgfs->dir_##parent, ptr); \ 60 debugfs_create_bool(#name, 0644, dbgfs->dir_##parent, ptr); \
61 if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name)) \ 61 if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \
62 || !dbgfs->dbgfs_##parent##_files.file_##name) \
62 goto err; \ 63 goto err; \
63} while (0) 64} while (0)
64 65
@@ -228,7 +229,6 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
228 ssize_t ret; 229 ssize_t ret;
229 /* Add 30 for initial string */ 230 /* Add 30 for initial string */
230 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations); 231 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
231 DECLARE_MAC_BUF(mac);
232 232
233 buf = kmalloc(bufsz, GFP_KERNEL); 233 buf = kmalloc(bufsz, GFP_KERNEL);
234 if (!buf) 234 if (!buf)
@@ -242,7 +242,6 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
242 if (station->used) { 242 if (station->used) {
243 pos += scnprintf(buf + pos, bufsz - pos, 243 pos += scnprintf(buf + pos, bufsz - pos,
244 "station %d:\ngeneral data:\n", i+1); 244 "station %d:\ngeneral data:\n", i+1);
245 print_mac(mac, station->sta.sta.addr);
246 pos += scnprintf(buf + pos, bufsz - pos, "id: %u\n", 245 pos += scnprintf(buf + pos, bufsz - pos, "id: %u\n",
247 station->sta.sta.sta_id); 246 station->sta.sta.sta_id);
248 pos += scnprintf(buf + pos, bufsz - pos, "mode: %u\n", 247 pos += scnprintf(buf + pos, bufsz - pos, "mode: %u\n",
@@ -349,12 +348,86 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
349 return count; 348 return count;
350} 349}
351 350
351
352
353static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
354 size_t count, loff_t *ppos)
355{
356 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
357 struct ieee80211_channel *channels = NULL;
358 const struct ieee80211_supported_band *supp_band = NULL;
359 int pos = 0, i, bufsz = PAGE_SIZE;
360 char *buf;
361 ssize_t ret;
362
363 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
364 return -EAGAIN;
365
366 buf = kzalloc(bufsz, GFP_KERNEL);
367 if (!buf) {
368 IWL_ERROR("Can not allocate Buffer\n");
369 return -ENOMEM;
370 }
371
372 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
373 channels = supp_band->channels;
374
375 pos += scnprintf(buf + pos, bufsz - pos,
376 "Displaying %d channels in 2.4GHz band 802.11bg):\n",
377 supp_band->n_channels);
378
379 for (i = 0; i < supp_band->n_channels; i++)
380 pos += scnprintf(buf + pos, bufsz - pos,
381 "%d: %ddBm: BSS%s%s, %s.\n",
382 ieee80211_frequency_to_channel(
383 channels[i].center_freq),
384 channels[i].max_power,
385 channels[i].flags & IEEE80211_CHAN_RADAR ?
386 " (IEEE 802.11h required)" : "",
387 (!(channels[i].flags & IEEE80211_CHAN_NO_IBSS)
388 || (channels[i].flags &
389 IEEE80211_CHAN_RADAR)) ? "" :
390 ", IBSS",
391 channels[i].flags &
392 IEEE80211_CHAN_PASSIVE_SCAN ?
393 "passive only" : "active/passive");
394
395 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
396 channels = supp_band->channels;
397
398 pos += scnprintf(buf + pos, bufsz - pos,
399 "Displaying %d channels in 5.2GHz band (802.11a)\n",
400 supp_band->n_channels);
401
402 for (i = 0; i < supp_band->n_channels; i++)
403 pos += scnprintf(buf + pos, bufsz - pos,
404 "%d: %ddBm: BSS%s%s, %s.\n",
405 ieee80211_frequency_to_channel(
406 channels[i].center_freq),
407 channels[i].max_power,
408 channels[i].flags & IEEE80211_CHAN_RADAR ?
409 " (IEEE 802.11h required)" : "",
410 ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
411 || (channels[i].flags &
412 IEEE80211_CHAN_RADAR)) ? "" :
413 ", IBSS",
414 channels[i].flags &
415 IEEE80211_CHAN_PASSIVE_SCAN ?
416 "passive only" : "active/passive");
417
418 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
419 kfree(buf);
420 return ret;
421}
422
423
352DEBUGFS_READ_WRITE_FILE_OPS(sram); 424DEBUGFS_READ_WRITE_FILE_OPS(sram);
353DEBUGFS_WRITE_FILE_OPS(log_event); 425DEBUGFS_WRITE_FILE_OPS(log_event);
354DEBUGFS_READ_FILE_OPS(eeprom); 426DEBUGFS_READ_FILE_OPS(eeprom);
355DEBUGFS_READ_FILE_OPS(stations); 427DEBUGFS_READ_FILE_OPS(stations);
356DEBUGFS_READ_FILE_OPS(rx_statistics); 428DEBUGFS_READ_FILE_OPS(rx_statistics);
357DEBUGFS_READ_FILE_OPS(tx_statistics); 429DEBUGFS_READ_FILE_OPS(tx_statistics);
430DEBUGFS_READ_FILE_OPS(channels);
358 431
359/* 432/*
360 * Create the debugfs files and directories 433 * Create the debugfs files and directories
@@ -388,6 +461,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
388 DEBUGFS_ADD_FILE(stations, data); 461 DEBUGFS_ADD_FILE(stations, data);
389 DEBUGFS_ADD_FILE(rx_statistics, data); 462 DEBUGFS_ADD_FILE(rx_statistics, data);
390 DEBUGFS_ADD_FILE(tx_statistics, data); 463 DEBUGFS_ADD_FILE(tx_statistics, data);
464 DEBUGFS_ADD_FILE(channels, data);
391 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal); 465 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
392 DEBUGFS_ADD_BOOL(disable_chain_noise, rf, 466 DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
393 &priv->disable_chain_noise_cal); 467 &priv->disable_chain_noise_cal);
@@ -416,6 +490,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
416 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram); 490 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram);
417 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event); 491 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
418 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations); 492 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
493 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels);
419 DEBUGFS_REMOVE(priv->dbgfs->dir_data); 494 DEBUGFS_REMOVE(priv->dbgfs->dir_data);
420 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity); 495 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
421 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise); 496 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 9966d4e384ce..0468fcc1ea98 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
@@ -54,6 +54,7 @@ extern struct iwl_cfg iwl5100_agn_cfg;
54extern struct iwl_cfg iwl5350_agn_cfg; 54extern struct iwl_cfg iwl5350_agn_cfg;
55extern struct iwl_cfg iwl5100_bg_cfg; 55extern struct iwl_cfg iwl5100_bg_cfg;
56extern struct iwl_cfg iwl5100_abg_cfg; 56extern struct iwl_cfg iwl5100_abg_cfg;
57extern struct iwl_cfg iwl5150_agn_cfg;
57 58
58/* CT-KILL constants */ 59/* CT-KILL constants */
59#define CT_KILL_THRESHOLD 110 /* in Celsius */ 60#define CT_KILL_THRESHOLD 110 /* in Celsius */
@@ -113,11 +114,9 @@ struct iwl_queue {
113 * space less than this */ 114 * space less than this */
114} __attribute__ ((packed)); 115} __attribute__ ((packed));
115 116
116#define MAX_NUM_OF_TBS (20)
117
118/* One for each TFD */ 117/* One for each TFD */
119struct iwl_tx_info { 118struct iwl_tx_info {
120 struct sk_buff *skb[MAX_NUM_OF_TBS]; 119 struct sk_buff *skb[IWL_NUM_OF_TBS - 1];
121}; 120};
122 121
123/** 122/**
@@ -135,12 +134,13 @@ struct iwl_tx_info {
135 */ 134 */
136struct iwl_tx_queue { 135struct iwl_tx_queue {
137 struct iwl_queue q; 136 struct iwl_queue q;
138 struct iwl_tfd_frame *bd; 137 struct iwl_tfd *tfds;
139 struct iwl_cmd *cmd[TFD_TX_CMD_SLOTS]; 138 struct iwl_cmd *cmd[TFD_TX_CMD_SLOTS];
140 struct iwl_tx_info *txb; 139 struct iwl_tx_info *txb;
141 int need_update; 140 u8 need_update;
142 int sched_retry; 141 u8 sched_retry;
143 int active; 142 u8 active;
143 u8 swq_id;
144}; 144};
145 145
146#define IWL_NUM_SCAN_RATES (2) 146#define IWL_NUM_SCAN_RATES (2)
@@ -186,12 +186,6 @@ struct iwl_channel_info {
186 u8 fat_extension_channel; /* HT_IE_EXT_CHANNEL_* */ 186 u8 fat_extension_channel; /* HT_IE_EXT_CHANNEL_* */
187}; 187};
188 188
189struct iwl4965_clip_group {
190 /* maximum power level to prevent clipping for each rate, derived by
191 * us from this band's saturation power in EEPROM */
192 const s8 clip_powers[IWL_MAX_RATES];
193};
194
195 189
196#define IWL_TX_FIFO_AC0 0 190#define IWL_TX_FIFO_AC0 0
197#define IWL_TX_FIFO_AC1 1 191#define IWL_TX_FIFO_AC1 1
@@ -253,7 +247,8 @@ struct iwl_cmd_meta {
253 /* The CMD_SIZE_HUGE flag bit indicates that the command 247 /* The CMD_SIZE_HUGE flag bit indicates that the command
254 * structure is stored at the end of the shared queue memory. */ 248 * structure is stored at the end of the shared queue memory. */
255 u32 flags; 249 u32 flags;
256 250 DECLARE_PCI_UNMAP_ADDR(mapping)
251 DECLARE_PCI_UNMAP_LEN(len)
257} __attribute__ ((packed)); 252} __attribute__ ((packed));
258 253
259#define IWL_CMD_MAX_PAYLOAD 320 254#define IWL_CMD_MAX_PAYLOAD 320
@@ -269,24 +264,16 @@ struct iwl_cmd {
269 struct iwl_cmd_meta meta; /* driver data */ 264 struct iwl_cmd_meta meta; /* driver data */
270 struct iwl_cmd_header hdr; /* uCode API */ 265 struct iwl_cmd_header hdr; /* uCode API */
271 union { 266 union {
272 struct iwl_addsta_cmd addsta;
273 struct iwl_led_cmd led;
274 u32 flags; 267 u32 flags;
275 u8 val8; 268 u8 val8;
276 u16 val16; 269 u16 val16;
277 u32 val32; 270 u32 val32;
278 struct iwl4965_bt_cmd bt;
279 struct iwl4965_rxon_time_cmd rxon_time;
280 struct iwl_powertable_cmd powertable;
281 struct iwl_qosparam_cmd qosparam;
282 struct iwl_tx_cmd tx; 271 struct iwl_tx_cmd tx;
283 struct iwl4965_rxon_assoc_cmd rxon_assoc;
284 struct iwl_rem_sta_cmd rm_sta;
285 u8 *indirect;
286 u8 payload[IWL_CMD_MAX_PAYLOAD]; 272 u8 payload[IWL_CMD_MAX_PAYLOAD];
287 } __attribute__ ((packed)) cmd; 273 } __attribute__ ((packed)) cmd;
288} __attribute__ ((packed)); 274} __attribute__ ((packed));
289 275
276
290struct iwl_host_cmd { 277struct iwl_host_cmd {
291 u8 id; 278 u8 id;
292 u16 len; 279 u16 len;
@@ -309,7 +296,6 @@ struct iwl_host_cmd {
309 296
310/** 297/**
311 * struct iwl_rx_queue - Rx queue 298 * struct iwl_rx_queue - Rx queue
312 * @processed: Internal index to last handled Rx packet
313 * @read: Shared index to newest available Rx buffer 299 * @read: Shared index to newest available Rx buffer
314 * @write: Shared index to oldest written Rx packet 300 * @write: Shared index to oldest written Rx packet
315 * @free_count: Number of pre-allocated buffers in rx_free 301 * @free_count: Number of pre-allocated buffers in rx_free
@@ -324,26 +310,19 @@ struct iwl_rx_queue {
324 dma_addr_t dma_addr; 310 dma_addr_t dma_addr;
325 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 311 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
326 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 312 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
327 u32 processed;
328 u32 read; 313 u32 read;
329 u32 write; 314 u32 write;
330 u32 free_count; 315 u32 free_count;
331 struct list_head rx_free; 316 struct list_head rx_free;
332 struct list_head rx_used; 317 struct list_head rx_used;
333 int need_update; 318 int need_update;
319 struct iwl_rb_status *rb_stts;
320 dma_addr_t rb_stts_dma;
334 spinlock_t lock; 321 spinlock_t lock;
335}; 322};
336 323
337#define IWL_SUPPORTED_RATES_IE_LEN 8 324#define IWL_SUPPORTED_RATES_IE_LEN 8
338 325
339#define SCAN_INTERVAL 100
340
341#define MAX_A_CHANNELS 252
342#define MIN_A_CHANNELS 7
343
344#define MAX_B_CHANNELS 14
345#define MIN_B_CHANNELS 1
346
347#define MAX_TID_COUNT 9 326#define MAX_TID_COUNT 9
348 327
349#define IWL_INVALID_RATE 0xFF 328#define IWL_INVALID_RATE 0xFF
@@ -413,9 +392,8 @@ struct iwl_ht_info {
413 u8 max_amsdu_size; 392 u8 max_amsdu_size;
414 u8 ampdu_factor; 393 u8 ampdu_factor;
415 u8 mpdu_density; 394 u8 mpdu_density;
416 u8 supp_mcs_set[16]; 395 struct ieee80211_mcs_info mcs;
417 /* BSS related data */ 396 /* BSS related data */
418 u8 control_channel;
419 u8 extension_chan_offset; 397 u8 extension_chan_offset;
420 u8 tx_chan_width; 398 u8 tx_chan_width;
421 u8 ht_protection; 399 u8 ht_protection;
@@ -444,7 +422,6 @@ union iwl_qos_capabity {
444 422
445/* QoS structures */ 423/* QoS structures */
446struct iwl_qos_info { 424struct iwl_qos_info {
447 int qos_enable;
448 int qos_active; 425 int qos_active;
449 union iwl_qos_capabity qos_cap; 426 union iwl_qos_capabity qos_cap;
450 struct iwl_qosparam_cmd def_qos_parm; 427 struct iwl_qosparam_cmd def_qos_parm;
@@ -470,7 +447,7 @@ struct fw_desc {
470 447
471/* uCode file layout */ 448/* uCode file layout */
472struct iwl_ucode { 449struct iwl_ucode {
473 __le32 ver; /* major/minor/subminor */ 450 __le32 ver; /* major/minor/API/serial */
474 __le32 inst_size; /* bytes of runtime instructions */ 451 __le32 inst_size; /* bytes of runtime instructions */
475 __le32 data_size; /* bytes of runtime data */ 452 __le32 data_size; /* bytes of runtime data */
476 __le32 init_size; /* bytes of initialization instructions */ 453 __le32 init_size; /* bytes of initialization instructions */
@@ -511,11 +488,15 @@ struct iwl_sensitivity_ranges {
511}; 488};
512 489
513 490
514#define IWL_FAT_CHANNEL_52 BIT(IEEE80211_BAND_5GHZ) 491#define KELVIN_TO_CELSIUS(x) ((x)-273)
492#define CELSIUS_TO_KELVIN(x) ((x)+273)
493
515 494
516/** 495/**
517 * struct iwl_hw_params 496 * struct iwl_hw_params
518 * @max_txq_num: Max # Tx queues supported 497 * @max_txq_num: Max # Tx queues supported
498 * @dma_chnl_num: Number of Tx DMA/FIFO channels
499 * @scd_bc_tbls_size: size of scheduler byte count tables
519 * @tx/rx_chains_num: Number of TX/RX chains 500 * @tx/rx_chains_num: Number of TX/RX chains
520 * @valid_tx/rx_ant: usable antennas 501 * @valid_tx/rx_ant: usable antennas
521 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) 502 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
@@ -528,11 +509,13 @@ struct iwl_sensitivity_ranges {
528 * @sw_crypto: 0 for hw, 1 for sw 509 * @sw_crypto: 0 for hw, 1 for sw
529 * @max_xxx_size: for ucode uses 510 * @max_xxx_size: for ucode uses
530 * @ct_kill_threshold: temperature threshold 511 * @ct_kill_threshold: temperature threshold
512 * @calib_init_cfg: setup initial calibrations for the hw
531 * @struct iwl_sensitivity_ranges: range of sensitivity values 513 * @struct iwl_sensitivity_ranges: range of sensitivity values
532 * @first_ampdu_q: first HW queue available for ampdu
533 */ 514 */
534struct iwl_hw_params { 515struct iwl_hw_params {
535 u16 max_txq_num; 516 u8 max_txq_num;
517 u8 dma_chnl_num;
518 u16 scd_bc_tbls_size;
536 u8 tx_chains_num; 519 u8 tx_chains_num;
537 u8 rx_chains_num; 520 u8 rx_chains_num;
538 u8 valid_tx_ant; 521 u8 valid_tx_ant;
@@ -549,22 +532,10 @@ struct iwl_hw_params {
549 u32 max_data_size; 532 u32 max_data_size;
550 u32 max_bsm_size; 533 u32 max_bsm_size;
551 u32 ct_kill_threshold; /* value in hw-dependent units */ 534 u32 ct_kill_threshold; /* value in hw-dependent units */
535 u32 calib_init_cfg;
552 const struct iwl_sensitivity_ranges *sens; 536 const struct iwl_sensitivity_ranges *sens;
553 u8 first_ampdu_q;
554}; 537};
555 538
556#define HT_SHORT_GI_20MHZ (1 << 0)
557#define HT_SHORT_GI_40MHZ (1 << 1)
558
559
560#define IWL_RX_HDR(x) ((struct iwl4965_rx_frame_hdr *)(\
561 x->u.rx_frame.stats.payload + \
562 x->u.rx_frame.stats.phy_count))
563#define IWL_RX_END(x) ((struct iwl4965_rx_frame_end *)(\
564 IWL_RX_HDR(x)->payload + \
565 le16_to_cpu(IWL_RX_HDR(x)->len)))
566#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
567#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
568 539
569/****************************************************************************** 540/******************************************************************************
570 * 541 *
@@ -581,13 +552,8 @@ struct iwl_hw_params {
581 * iwl4965_mac_ <-- mac80211 callback 552 * iwl4965_mac_ <-- mac80211 callback
582 * 553 *
583 ****************************************************************************/ 554 ****************************************************************************/
584struct iwl_addsta_cmd; 555extern void iwl_update_chain_flags(struct iwl_priv *priv);
585extern int iwl_send_add_sta(struct iwl_priv *priv, 556extern int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
586 struct iwl_addsta_cmd *sta, u8 flags);
587extern u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr,
588 int is_ap, u8 flags, struct ieee80211_ht_info *ht_info);
589extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
590extern int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src);
591extern const u8 iwl_bcast_addr[ETH_ALEN]; 557extern const u8 iwl_bcast_addr[ETH_ALEN];
592extern int iwl_rxq_stop(struct iwl_priv *priv); 558extern int iwl_rxq_stop(struct iwl_priv *priv);
593extern void iwl_txq_ctx_stop(struct iwl_priv *priv); 559extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
@@ -611,19 +577,15 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
611} 577}
612 578
613 579
614struct iwl_priv; 580struct iwl_dma_ptr {
615 581 dma_addr_t dma;
616 582 void *addr;
617/* Structures, enum, and defines specific to the 4965 */
618
619#define IWL_KW_SIZE 0x1000 /*4k */
620
621struct iwl_kw {
622 dma_addr_t dma_addr;
623 void *v_addr;
624 size_t size; 583 size_t size;
625}; 584};
626 585
586#define HT_SHORT_GI_20MHZ (1 << 0)
587#define HT_SHORT_GI_40MHZ (1 << 1)
588
627#define IWL_CHANNEL_WIDTH_20MHZ 0 589#define IWL_CHANNEL_WIDTH_20MHZ 0
628#define IWL_CHANNEL_WIDTH_40MHZ 1 590#define IWL_CHANNEL_WIDTH_40MHZ 1
629 591
@@ -638,7 +600,6 @@ struct iwl_kw {
638#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 600#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
639 601
640/* Sensitivity and chain noise calibration */ 602/* Sensitivity and chain noise calibration */
641#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1)
642#define INITIALIZATION_VALUE 0xFFFF 603#define INITIALIZATION_VALUE 0xFFFF
643#define CAL_NUM_OF_BEACONS 20 604#define CAL_NUM_OF_BEACONS 20
644#define MAXIMUM_ALLOWED_PATHLOSS 15 605#define MAXIMUM_ALLOWED_PATHLOSS 15
@@ -691,13 +652,20 @@ enum iwl4965_calib_enabled_state {
691 IWL_CALIB_ENABLED = 1, 652 IWL_CALIB_ENABLED = 1,
692}; 653};
693 654
694struct statistics_general_data { 655
695 u32 beacon_silence_rssi_a; 656/*
696 u32 beacon_silence_rssi_b; 657 * enum iwl_calib
697 u32 beacon_silence_rssi_c; 658 * defines the order in which results of initial calibrations
698 u32 beacon_energy_a; 659 * should be sent to the runtime uCode
699 u32 beacon_energy_b; 660 */
700 u32 beacon_energy_c; 661enum iwl_calib {
662 IWL_CALIB_XTAL,
663 IWL_CALIB_DC,
664 IWL_CALIB_LO,
665 IWL_CALIB_TX_IQ,
666 IWL_CALIB_TX_IQ_PERD,
667 IWL_CALIB_BASE_BAND,
668 IWL_CALIB_MAX
701}; 669};
702 670
703/* Opaque calibration results */ 671/* Opaque calibration results */
@@ -766,7 +734,6 @@ enum {
766 734
767 735
768#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */ 736#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
769#define IWL_CALIB_MAX 3
770 737
771struct iwl_priv { 738struct iwl_priv {
772 739
@@ -790,7 +757,7 @@ struct iwl_priv {
790 757
791#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT 758#ifdef CONFIG_IWLAGN_SPECTRUM_MEASUREMENT
792 /* spectrum measurement report caching */ 759 /* spectrum measurement report caching */
793 struct iwl4965_spectrum_notification measure_report; 760 struct iwl_spectrum_notification measure_report;
794 u8 measurement_status; 761 u8 measurement_status;
795#endif 762#endif
796 /* ucode beacon time */ 763 /* ucode beacon time */
@@ -801,10 +768,6 @@ struct iwl_priv {
801 struct iwl_channel_info *channel_info; /* channel info array */ 768 struct iwl_channel_info *channel_info; /* channel info array */
802 u8 channel_count; /* # of channels */ 769 u8 channel_count; /* # of channels */
803 770
804 /* each calibration channel group in the EEPROM has a derived
805 * clip setting for each rate. */
806 const struct iwl4965_clip_group clip_groups[5];
807
808 /* thermal calibration */ 771 /* thermal calibration */
809 s32 temperature; /* degrees Kelvin */ 772 s32 temperature; /* degrees Kelvin */
810 s32 last_temperature; 773 s32 last_temperature;
@@ -818,12 +781,13 @@ struct iwl_priv {
818 unsigned long scan_start; 781 unsigned long scan_start;
819 unsigned long scan_pass_start; 782 unsigned long scan_pass_start;
820 unsigned long scan_start_tsf; 783 unsigned long scan_start_tsf;
784 struct iwl_scan_cmd *scan;
821 int scan_bands; 785 int scan_bands;
822 int one_direct_scan; 786 int one_direct_scan;
823 u8 direct_ssid_len; 787 u8 direct_ssid_len;
824 u8 direct_ssid[IW_ESSID_MAX_SIZE]; 788 u8 direct_ssid[IW_ESSID_MAX_SIZE];
825 struct iwl_scan_cmd *scan; 789 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
826 u32 scan_tx_ant[IEEE80211_NUM_BANDS]; 790 u8 mgmt_tx_ant;
827 791
828 /* spinlock */ 792 /* spinlock */
829 spinlock_t lock; /* protect general shared data */ 793 spinlock_t lock; /* protect general shared data */
@@ -840,6 +804,8 @@ struct iwl_priv {
840 u8 rev_id; 804 u8 rev_id;
841 805
842 /* uCode images, save to reload in case of failure */ 806 /* uCode images, save to reload in case of failure */
807 u32 ucode_ver; /* version of ucode, copy of
808 iwl_ucode.ver */
843 struct fw_desc ucode_code; /* runtime inst */ 809 struct fw_desc ucode_code; /* runtime inst */
844 struct fw_desc ucode_data; /* runtime data original */ 810 struct fw_desc ucode_data; /* runtime data original */
845 struct fw_desc ucode_data_backup; /* runtime data save/restore */ 811 struct fw_desc ucode_data_backup; /* runtime data save/restore */
@@ -850,7 +816,7 @@ struct iwl_priv {
850 u8 ucode_write_complete; /* the image write is complete */ 816 u8 ucode_write_complete; /* the image write is complete */
851 817
852 818
853 struct iwl4965_rxon_time_cmd rxon_timing; 819 struct iwl_rxon_time_cmd rxon_timing;
854 820
855 /* We declare this const so it can only be 821 /* We declare this const so it can only be
856 * changed via explicit cast within the 822 * changed via explicit cast within the
@@ -882,7 +848,6 @@ struct iwl_priv {
882 u16 active_rate_basic; 848 u16 active_rate_basic;
883 849
884 u8 assoc_station_added; 850 u8 assoc_station_added;
885 u8 use_ant_b_for_management_frame; /* Tx antenna selection */
886 u8 start_calib; 851 u8 start_calib;
887 struct iwl_sensitivity_data sensitivity_data; 852 struct iwl_sensitivity_data sensitivity_data;
888 struct iwl_chain_noise_data chain_noise_data; 853 struct iwl_chain_noise_data chain_noise_data;
@@ -903,12 +868,14 @@ struct iwl_priv {
903 struct iwl_rx_queue rxq; 868 struct iwl_rx_queue rxq;
904 struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES]; 869 struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES];
905 unsigned long txq_ctx_active_msk; 870 unsigned long txq_ctx_active_msk;
906 struct iwl_kw kw; /* keep warm address */ 871 struct iwl_dma_ptr kw; /* keep warm address */
872 struct iwl_dma_ptr scd_bc_tbls;
873
907 u32 scd_base_addr; /* scheduler sram base address */ 874 u32 scd_base_addr; /* scheduler sram base address */
908 875
909 unsigned long status; 876 unsigned long status;
910 877
911 int last_rx_rssi; /* From Rx packet statisitics */ 878 int last_rx_rssi; /* From Rx packet statistics */
912 int last_rx_noise; /* From beacon statistics */ 879 int last_rx_noise; /* From beacon statistics */
913 880
914 /* counts mgmt, ctl, and data packets */ 881 /* counts mgmt, ctl, and data packets */
@@ -923,8 +890,6 @@ struct iwl_priv {
923 unsigned long last_statistics_time; 890 unsigned long last_statistics_time;
924 891
925 /* context information */ 892 /* context information */
926 u8 essid[IW_ESSID_MAX_SIZE];
927 u8 essid_len;
928 u16 rates_mask; 893 u16 rates_mask;
929 894
930 u32 power_mode; 895 u32 power_mode;
@@ -965,11 +930,7 @@ struct iwl_priv {
965 struct ieee80211_vif *vif; 930 struct ieee80211_vif *vif;
966 931
967 struct iwl_hw_params hw_params; 932 struct iwl_hw_params hw_params;
968 /* driver/uCode shared Tx Byte Counts and Rx status */ 933
969 void *shared_virt;
970 int rb_closed_offset;
971 /* Physical Pointer to Tx Byte Counts and Rx status */
972 dma_addr_t shared_phys;
973 934
974 /* Current association information needed to configure the 935 /* Current association information needed to configure the
975 * hardware */ 936 * hardware */
@@ -992,7 +953,6 @@ struct iwl_priv {
992 struct work_struct report_work; 953 struct work_struct report_work;
993 struct work_struct request_scan; 954 struct work_struct request_scan;
994 struct work_struct beacon_update; 955 struct work_struct beacon_update;
995 struct work_struct set_monitor;
996 956
997 struct tasklet_struct irq_tasklet; 957 struct tasklet_struct irq_tasklet;
998 958
@@ -1004,9 +964,6 @@ struct iwl_priv {
1004 s8 tx_power_user_lmt; 964 s8 tx_power_user_lmt;
1005 s8 tx_power_channel_lmt; 965 s8 tx_power_channel_lmt;
1006 966
1007#ifdef CONFIG_PM
1008 u32 pm_state[16];
1009#endif
1010 967
1011#ifdef CONFIG_IWLWIFI_DEBUG 968#ifdef CONFIG_IWLWIFI_DEBUG
1012 /* debugging info */ 969 /* debugging info */
@@ -1091,26 +1048,4 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1091 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; 1048 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
1092} 1049}
1093 1050
1094#ifdef CONFIG_IWLWIFI_DEBUG
1095static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
1096 void *p, u32 len)
1097{
1098 if (!(priv->debug_level & level))
1099 return;
1100
1101 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
1102 p, len, 1);
1103}
1104#else
1105static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
1106 void *p, u32 len)
1107{
1108}
1109#endif
1110
1111extern const struct iwl_channel_info *iwl_get_channel_info(
1112 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
1113
1114/* Requires full declaration of iwl_priv before including */
1115
1116#endif /* __iwl_dev_h__ */ 1051#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 37155755efc5..ce2f47306cea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -169,10 +169,9 @@ int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv)
169 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); 169 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
170 170
171 /* See if we got it */ 171 /* See if we got it */
172 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, 172 ret = iwl_poll_direct_bit(priv, CSR_HW_IF_CONFIG_REG,
173 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, 173 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
174 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, 174 EEPROM_SEM_TIMEOUT);
175 EEPROM_SEM_TIMEOUT);
176 if (ret >= 0) { 175 if (ret >= 0) {
177 IWL_DEBUG_IO("Acquired semaphore after %d tries.\n", 176 IWL_DEBUG_IO("Acquired semaphore after %d tries.\n",
178 count+1); 177 count+1);
@@ -210,10 +209,8 @@ int iwl_eeprom_init(struct iwl_priv *priv)
210{ 209{
211 u16 *e; 210 u16 *e;
212 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 211 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
213 u32 r;
214 int sz = priv->cfg->eeprom_size; 212 int sz = priv->cfg->eeprom_size;
215 int ret; 213 int ret;
216 int i;
217 u16 addr; 214 u16 addr;
218 215
219 /* allocate eeprom */ 216 /* allocate eeprom */
@@ -241,22 +238,19 @@ int iwl_eeprom_init(struct iwl_priv *priv)
241 238
242 /* eeprom is an array of 16bit values */ 239 /* eeprom is an array of 16bit values */
243 for (addr = 0; addr < sz; addr += sizeof(u16)) { 240 for (addr = 0; addr < sz; addr += sizeof(u16)) {
244 _iwl_write32(priv, CSR_EEPROM_REG, addr << 1); 241 u32 r;
245 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD); 242
246 243 _iwl_write32(priv, CSR_EEPROM_REG,
247 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT; 244 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
248 i += IWL_EEPROM_ACCESS_DELAY) {
249 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
250 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
251 break;
252 udelay(IWL_EEPROM_ACCESS_DELAY);
253 }
254 245
255 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { 246 ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG,
247 CSR_EEPROM_REG_READ_VALID_MSK,
248 IWL_EEPROM_ACCESS_TIMEOUT);
249 if (ret < 0) {
256 IWL_ERROR("Time out reading EEPROM[%d]\n", addr); 250 IWL_ERROR("Time out reading EEPROM[%d]\n", addr);
257 ret = -ETIMEDOUT;
258 goto done; 251 goto done;
259 } 252 }
253 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
260 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); 254 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
261 } 255 }
262 ret = 0; 256 ret = 0;
@@ -279,7 +273,23 @@ EXPORT_SYMBOL(iwl_eeprom_free);
279 273
280int iwl_eeprom_check_version(struct iwl_priv *priv) 274int iwl_eeprom_check_version(struct iwl_priv *priv)
281{ 275{
282 return priv->cfg->ops->lib->eeprom_ops.check_version(priv); 276 u16 eeprom_ver;
277 u16 calib_ver;
278
279 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
280 calib_ver = priv->cfg->ops->lib->eeprom_ops.calib_version(priv);
281
282 if (eeprom_ver < priv->cfg->eeprom_ver ||
283 calib_ver < priv->cfg->eeprom_calib_ver)
284 goto err;
285
286 return 0;
287err:
288 IWL_ERROR("Unsupported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
289 eeprom_ver, priv->cfg->eeprom_ver,
290 calib_ver, priv->cfg->eeprom_calib_ver);
291 return -EINVAL;
292
283} 293}
284EXPORT_SYMBOL(iwl_eeprom_check_version); 294EXPORT_SYMBOL(iwl_eeprom_check_version);
285 295
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index d3a2a5b4ac56..603c84bed630 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -68,17 +68,14 @@ struct iwl_priv;
68/* 68/*
69 * EEPROM access time values: 69 * EEPROM access time values:
70 * 70 *
71 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG, 71 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
72 * then clearing (with subsequent read/modify/write) CSR_EEPROM_REG bit
73 * CSR_EEPROM_REG_BIT_CMD (0x2).
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). 72 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. 73 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. 74 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */ 75 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ 76#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79#define IWL_EEPROM_ACCESS_DELAY 10 /* uSec */
80 77
81#define IWL_EEPROM_SEM_TIMEOUT 10 /* milliseconds */ 78#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
82#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ 79#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
83 80
84 81
@@ -147,6 +144,7 @@ struct iwl_eeprom_channel {
147/*5000 calibrations */ 144/*5000 calibrations */
148#define EEPROM_5000_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION) 145#define EEPROM_5000_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
149#define EEPROM_5000_XTAL ((2*0x128) | EEPROM_5000_CALIB_ALL) 146#define EEPROM_5000_XTAL ((2*0x128) | EEPROM_5000_CALIB_ALL)
147#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_5000_CALIB_ALL)
150 148
151/* 5000 links */ 149/* 5000 links */
152#define EEPROM_5000_LINK_HOST (2*0x64) 150#define EEPROM_5000_LINK_HOST (2*0x64)
@@ -174,6 +172,9 @@ struct iwl_eeprom_channel {
174#define EEPROM_5000_REG_BAND_52_FAT_CHANNELS ((0x92)\ 172#define EEPROM_5000_REG_BAND_52_FAT_CHANNELS ((0x92)\
175 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ 173 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
176 174
175/* 5050 Specific */
176#define EEPROM_5050_TX_POWER_VERSION (4)
177#define EEPROM_5050_EEPROM_VERSION (0x21E)
177 178
178/* 2.4 GHz */ 179/* 2.4 GHz */
179extern const u8 iwl_eeprom_band_1[14]; 180extern const u8 iwl_eeprom_band_1[14];
@@ -371,7 +372,7 @@ struct iwl_eeprom_ops {
371 int (*verify_signature) (struct iwl_priv *priv); 372 int (*verify_signature) (struct iwl_priv *priv);
372 int (*acquire_semaphore) (struct iwl_priv *priv); 373 int (*acquire_semaphore) (struct iwl_priv *priv);
373 void (*release_semaphore) (struct iwl_priv *priv); 374 void (*release_semaphore) (struct iwl_priv *priv);
374 int (*check_version) (struct iwl_priv *priv); 375 u16 (*calib_version) (struct iwl_priv *priv);
375 const u8* (*query_addr) (const struct iwl_priv *priv, size_t offset); 376 const u8* (*query_addr) (const struct iwl_priv *priv, size_t offset);
376}; 377};
377 378
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index a72efdf6d1dd..d7da19864550 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -60,6 +60,8 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#ifndef __iwl_fh_h__
64#define __iwl_fh_h__
63 65
64/****************************/ 66/****************************/
65/* Flow Handler Definitions */ 67/* Flow Handler Definitions */
@@ -70,7 +72,7 @@
70 * Addresses are offsets from device's PCI hardware base address. 72 * Addresses are offsets from device's PCI hardware base address.
71 */ 73 */
72#define FH_MEM_LOWER_BOUND (0x1000) 74#define FH_MEM_LOWER_BOUND (0x1000)
73#define FH_MEM_UPPER_BOUND (0x1EF0) 75#define FH_MEM_UPPER_BOUND (0x2000)
74 76
75/** 77/**
76 * Keep-Warm (KW) buffer base address. 78 * Keep-Warm (KW) buffer base address.
@@ -264,6 +266,7 @@
264#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) 266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
265#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) 267#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
266 268
269#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
267 270
268/** 271/**
269 * Rx Shared Status Registers (RSSR) 272 * Rx Shared Status Registers (RSSR)
@@ -290,6 +293,13 @@
290 293
291#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 294#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
292 295
296/* TFDB Area - TFDs buffer table */
297#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
298#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
299#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
300#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
301#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
302
293/** 303/**
294 * Transmit DMA Channel Control/Status Registers (TCSR) 304 * Transmit DMA Channel Control/Status Registers (TCSR)
295 * 305 *
@@ -316,34 +326,41 @@
316#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) 326#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
317 327
318/* Find Control/Status reg for given Tx DMA/FIFO channel */ 328/* Find Control/Status reg for given Tx DMA/FIFO channel */
319#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ 329#define FH49_TCSR_CHNL_NUM (7)
320 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl) 330#define FH50_TCSR_CHNL_NUM (8)
331
332/* TCSR: tx_config register values */
333#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
334 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
335#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
336 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
337#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
338 (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
339
340#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
341#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
321 342
322#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000) 343#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
323#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008) 344#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
324 345
325#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) 346#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
326#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) 347#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
327#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) 348#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
328 349
329#define FH_TCSR_CHNL_NUM (7) 350#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
351#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
352#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
330 353
331#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) 354#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
332#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) 355#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
333#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) 356#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
334 357
335#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) 358#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
336#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) 359#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
337#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) 360#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
338 361
339#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) 362#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
340#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) 363#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
341#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
342 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl)
343#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
344 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl + 0x4)
345#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
346 (FH_TCSR_LOWER_BOUND + 0x20 * _chnl + 0x8)
347 364
348/** 365/**
349 * Tx Shared Status Registers (TSSR) 366 * Tx Shared Status Registers (TSSR)
@@ -360,7 +377,7 @@
360#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0) 377#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
361#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0) 378#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
362 379
363#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) 380#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
364 381
365#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24) 382#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24)
366#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16) 383#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16)
@@ -369,25 +386,99 @@
369 (FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \ 386 (FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) | \
370 FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl)) 387 FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl))
371 388
372
373
374#define FH_REGS_LOWER_BOUND (0x1000)
375#define FH_REGS_UPPER_BOUND (0x2000)
376
377/* Tx service channels */ 389/* Tx service channels */
378#define FH_SRVC_CHNL (9) 390#define FH_SRVC_CHNL (9)
379#define FH_SRVC_LOWER_BOUND (FH_REGS_LOWER_BOUND + 0x9C8) 391#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
380#define FH_SRVC_UPPER_BOUND (FH_REGS_LOWER_BOUND + 0x9D0) 392#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
381#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ 393#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
382 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) 394 (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
383 395
384/* TFDB Area - TFDs buffer table */ 396#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
385#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) 397/* Instruct FH to increment the retry count of a packet when
386#define FH_TFDIB_LOWER_BOUND (FH_REGS_LOWER_BOUND + 0x900) 398 * it is brought from the memory to TX-FIFO
387#define FH_TFDIB_UPPER_BOUND (FH_REGS_LOWER_BOUND + 0x958) 399 */
388#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) 400#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
389#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
390 401
391/* TCSR: tx_config register values */ 402/**
392#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */ 403 * struct iwl_rb_status - reseve buffer status
404 * host memory mapped FH registers
405 * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
406 * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
407 * @finished_rb_num [0:11] - Indicates the index of the current RB
408 * in which the last frame was written to
409 * @finished_fr_num [0:11] - Indicates the index of the RX Frame
410 * which was transfered
411 */
412struct iwl_rb_status {
413 __le16 closed_rb_num;
414 __le16 closed_fr_num;
415 __le16 finished_rb_num;
416 __le16 finished_fr_nam;
417} __attribute__ ((packed));
418
419
420#define TFD_QUEUE_SIZE_MAX (256)
421#define TFD_QUEUE_SIZE_BC_DUP (64)
422#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
423#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
424#define IWL_NUM_OF_TBS 20
425
426static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
427{
428 return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
429}
430/**
431 * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
432 *
433 * This structure contains dma address and length of transmission address
434 *
435 * @lo: low [31:0] portion of the dma address of TX buffer
436 * every even is unaligned on 16 bit boundary
437 * @hi_n_len 0-3 [35:32] portion of dma
438 * 4-15 length of the tx buffer
439 */
440struct iwl_tfd_tb {
441 __le32 lo;
442 __le16 hi_n_len;
443} __attribute__((packed));
444
445/**
446 * struct iwl_tfd
447 *
448 * Transmit Frame Descriptor (TFD)
449 *
450 * @ __reserved1[3] reserved
451 * @ num_tbs 0-4 number of active tbs
452 * 5 reserved
453 * 6-7 padding (not used)
454 * @ tbs[20] transmit frame buffer descriptors
455 * @ __pad padding
456 *
457 * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
458 * Both driver and device share these circular buffers, each of which must be
459 * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
460 *
461 * Driver must indicate the physical address of the base of each
462 * circular buffer via the FH_MEM_CBBC_QUEUE registers.
463 *
464 * Each TFD contains pointer/size information for up to 20 data buffers
465 * in host DRAM. These buffers collectively contain the (one) frame described
466 * by the TFD. Each buffer must be a single contiguous block of memory within
467 * itself, but buffers may be scattered in host DRAM. Each buffer has max size
468 * of (4K - 4). The concatenates all of a TFD's buffers into a single
469 * Tx frame, up to 8 KBytes in size.
470 *
471 * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
472 */
473struct iwl_tfd {
474 u8 __reserved1[3];
475 u8 num_tbs;
476 struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
477 __le32 __pad;
478} __attribute__ ((packed));
479
480
481/* Keep Warm Size */
482#define IWL_KW_SIZE 0x1000 /* 4k */
393 483
484#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 8300f3d00a06..01a2169cecec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -22,7 +22,7 @@
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Tomas Winkler <tomas.winkler@intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/ 27 *****************************************************************************/
28 28
@@ -36,7 +36,7 @@
36#include "iwl-core.h" 36#include "iwl-core.h"
37 37
38 38
39#define IWL_CMD(x) case x : return #x 39#define IWL_CMD(x) case x: return #x
40 40
41const char *get_cmd_string(u8 cmd) 41const char *get_cmd_string(u8 cmd)
42{ 42{
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 41eed6793328..ca4f638ab9d0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -22,7 +22,7 @@
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
@@ -32,110 +32,6 @@
32 32
33#include <linux/ctype.h> 33#include <linux/ctype.h>
34 34
35/*
36 * The structures defined by the hardware/uCode interface
37 * have bit-wise operations. For each bit-field there is
38 * a data symbol in the structure, the start bit position
39 * and the length of the bit-field.
40 *
41 * iwl_get_bits and iwl_set_bits will return or set the
42 * appropriate bits on a 32-bit value.
43 *
44 * IWL_GET_BITS and IWL_SET_BITS use symbol expansion to
45 * expand out to the appropriate call to iwl_get_bits
46 * and iwl_set_bits without having to reference all of the
47 * numerical constants and defines provided in the hardware
48 * definition
49 */
50
51/**
52 * iwl_get_bits - Extract a hardware bit-field value
53 * @src: source hardware value (__le32)
54 * @pos: bit-position (0-based) of first bit of value
55 * @len: length of bit-field
56 *
57 * iwl_get_bits will return the bit-field in cpu endian ordering.
58 *
59 * NOTE: If used from IWL_GET_BITS then pos and len are compile-constants and
60 * will collapse to minimal code by the compiler.
61 */
62static inline u32 iwl_get_bits(__le32 src, u8 pos, u8 len)
63{
64 u32 tmp = le32_to_cpu(src);
65
66 tmp >>= pos;
67 tmp &= (1UL << len) - 1;
68 return tmp;
69}
70
71/**
72 * iwl_set_bits - Set a hardware bit-field value
73 * @dst: Address of __le32 hardware value
74 * @pos: bit-position (0-based) of first bit of value
75 * @len: length of bit-field
76 * @val: cpu endian value to encode into the bit-field
77 *
78 * iwl_set_bits will encode val into dst, masked to be len bits long at bit
79 * position pos.
80 *
81 * NOTE: If used IWL_SET_BITS pos and len will be compile-constants and
82 * will collapse to minimal code by the compiler.
83 */
84static inline void iwl_set_bits(__le32 *dst, u8 pos, u8 len, int val)
85{
86 u32 tmp = le32_to_cpu(*dst);
87
88 tmp &= ~(((1UL << len) - 1) << pos);
89 tmp |= (val & ((1UL << len) - 1)) << pos;
90 *dst = cpu_to_le32(tmp);
91}
92
93static inline void iwl_set_bits16(__le16 *dst, u8 pos, u8 len, int val)
94{
95 u16 tmp = le16_to_cpu(*dst);
96
97 tmp &= ~((1UL << (pos + len)) - (1UL << pos));
98 tmp |= (val & ((1UL << len) - 1)) << pos;
99 *dst = cpu_to_le16(tmp);
100}
101
102/*
103 * The bit-field definitions in iwl-xxxx-hw.h are in the form of:
104 *
105 * struct example {
106 * __le32 val1;
107 * #define IWL_name_POS 8
108 * #define IWL_name_LEN 4
109 * #define IWL_name_SYM val1
110 * };
111 *
112 * The IWL_SET_BITS and IWL_GET_BITS macros are provided to allow the driver
113 * to call:
114 *
115 * struct example bar;
116 * u32 val = IWL_GET_BITS(bar, name);
117 * val = val * 2;
118 * IWL_SET_BITS(bar, name, val);
119 *
120 * All cpu / host ordering, masking, and shifts are performed by the macros
121 * and iwl_{get,set}_bits.
122 *
123 */
124#define IWL_SET_BITS(s, sym, v) \
125 iwl_set_bits(&(s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \
126 IWL_ ## sym ## _LEN, (v))
127
128#define IWL_SET_BITS16(s, sym, v) \
129 iwl_set_bits16(&(s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \
130 IWL_ ## sym ## _LEN, (v))
131
132#define IWL_GET_BITS(s, sym) \
133 iwl_get_bits((s).IWL_ ## sym ## _SYM, IWL_ ## sym ## _POS, \
134 IWL_ ## sym ## _LEN)
135
136
137#define KELVIN_TO_CELSIUS(x) ((x)-273)
138#define CELSIUS_TO_KELVIN(x) ((x)+273)
139#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) 35#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
140 36
141 37
@@ -159,11 +55,6 @@ static inline unsigned long elapsed_jiffies(unsigned long start,
159 return end + (MAX_JIFFY_OFFSET - start) + 1; 55 return end + (MAX_JIFFY_OFFSET - start) + 1;
160} 56}
161 57
162static inline u8 iwl_get_dma_hi_address(dma_addr_t addr)
163{
164 return sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0;
165}
166
167/** 58/**
168 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 59 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
169 * @index -- current index 60 * @index -- current index
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 9740fcc1805e..0a92e7431ada 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -21,7 +21,7 @@
21 * file called LICENSE. 21 * file called LICENSE.
22 * 22 *
23 * Contact Information: 23 * Contact Information:
24 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 * 26 *
27 *****************************************************************************/ 27 *****************************************************************************/
@@ -55,7 +55,7 @@
55 * _iwl_read32.) 55 * _iwl_read32.)
56 * 56 *
57 * These declarations are *extremely* useful in quickly isolating code deltas 57 * These declarations are *extremely* useful in quickly isolating code deltas
58 * which result in misconfiguring of the hardware I/O. In combination with 58 * which result in misconfiguration of the hardware I/O. In combination with
59 * git-bisect and the IO debug level you can quickly determine the specific 59 * git-bisect and the IO debug level you can quickly determine the specific
60 * commit which breaks the IO sequence to the hardware. 60 * commit which breaks the IO sequence to the hardware.
61 * 61 *
@@ -87,17 +87,18 @@ static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
87#define iwl_read32(p, o) _iwl_read32(p, o) 87#define iwl_read32(p, o) _iwl_read32(p, o)
88#endif 88#endif
89 89
90#define IWL_POLL_INTERVAL 10 /* microseconds */
90static inline int _iwl_poll_bit(struct iwl_priv *priv, u32 addr, 91static inline int _iwl_poll_bit(struct iwl_priv *priv, u32 addr,
91 u32 bits, u32 mask, int timeout) 92 u32 bits, u32 mask, int timeout)
92{ 93{
93 int i = 0; 94 int t = 0;
94 95
95 do { 96 do {
96 if ((_iwl_read32(priv, addr) & mask) == (bits & mask)) 97 if ((_iwl_read32(priv, addr) & mask) == (bits & mask))
97 return i; 98 return t;
98 mdelay(10); 99 udelay(IWL_POLL_INTERVAL);
99 i += 10; 100 t += IWL_POLL_INTERVAL;
100 } while (i < timeout); 101 } while (t < timeout);
101 102
102 return -ETIMEDOUT; 103 return -ETIMEDOUT;
103} 104}
@@ -109,7 +110,7 @@ static inline int __iwl_poll_bit(const char *f, u32 l,
109 int ret = _iwl_poll_bit(priv, addr, bits, mask, timeout); 110 int ret = _iwl_poll_bit(priv, addr, bits, mask, timeout);
110 IWL_DEBUG_IO("poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n", 111 IWL_DEBUG_IO("poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
111 addr, bits, mask, 112 addr, bits, mask,
112 unlikely(ret == -ETIMEDOUT)?"timeout":"", f, l); 113 unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
113 return ret; 114 return ret;
114} 115}
115#define iwl_poll_bit(priv, addr, bits, mask, timeout) \ 116#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
@@ -269,19 +270,10 @@ static inline void iwl_write_reg_buf(struct iwl_priv *priv,
269 } 270 }
270} 271}
271 272
272static inline int _iwl_poll_direct_bit(struct iwl_priv *priv, 273static inline int _iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr,
273 u32 addr, u32 mask, int timeout) 274 u32 mask, int timeout)
274{ 275{
275 int i = 0; 276 return _iwl_poll_bit(priv, addr, mask, mask, timeout);
276
277 do {
278 if ((_iwl_read_direct32(priv, addr) & mask) == mask)
279 return i;
280 mdelay(10);
281 i += 10;
282 } while (i < timeout);
283
284 return -ETIMEDOUT;
285} 277}
286 278
287#ifdef CONFIG_IWLWIFI_DEBUG 279#ifdef CONFIG_IWLWIFI_DEBUG
@@ -308,6 +300,7 @@ static inline int __iwl_poll_direct_bit(const char *f, u32 l,
308static inline u32 _iwl_read_prph(struct iwl_priv *priv, u32 reg) 300static inline u32 _iwl_read_prph(struct iwl_priv *priv, u32 reg)
309{ 301{
310 _iwl_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); 302 _iwl_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
303 rmb();
311 return _iwl_read_direct32(priv, HBUS_TARG_PRPH_RDAT); 304 return _iwl_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
312} 305}
313#ifdef CONFIG_IWLWIFI_DEBUG 306#ifdef CONFIG_IWLWIFI_DEBUG
@@ -330,6 +323,7 @@ static inline void _iwl_write_prph(struct iwl_priv *priv,
330{ 323{
331 _iwl_write_direct32(priv, HBUS_TARG_PRPH_WADDR, 324 _iwl_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
332 ((addr & 0x0000FFFF) | (3 << 24))); 325 ((addr & 0x0000FFFF) | (3 << 24)));
326 wmb();
333 _iwl_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val); 327 _iwl_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
334} 328}
335#ifdef CONFIG_IWLWIFI_DEBUG 329#ifdef CONFIG_IWLWIFI_DEBUG
@@ -392,12 +386,14 @@ static inline void iwl_clear_bits_prph(struct iwl_priv
392static inline u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr) 386static inline u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr)
393{ 387{
394 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr); 388 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
389 rmb();
395 return iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); 390 return iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
396} 391}
397 392
398static inline void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val) 393static inline void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
399{ 394{
400 iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); 395 iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
396 wmb();
401 iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, val); 397 iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
402} 398}
403 399
@@ -405,6 +401,7 @@ static inline void iwl_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
405 u32 len, u32 *values) 401 u32 len, u32 *values)
406{ 402{
407 iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); 403 iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
404 wmb();
408 for (; 0 < len; len -= sizeof(u32), values++) 405 for (; 0 < len; len -= sizeof(u32), values++)
409 iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, *values); 406 iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, *values);
410} 407}
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 4eee1b163cd2..11eccd7d268c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
@@ -41,7 +41,6 @@
41#include "iwl-dev.h" 41#include "iwl-dev.h"
42#include "iwl-core.h" 42#include "iwl-core.h"
43#include "iwl-io.h" 43#include "iwl-io.h"
44#include "iwl-helpers.h"
45 44
46#ifdef CONFIG_IWLWIFI_DEBUG 45#ifdef CONFIG_IWLWIFI_DEBUG
47static const char *led_type_str[] = { 46static const char *led_type_str[] = {
@@ -278,7 +277,7 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
278 /* FIXME: + priv->rx_stats[2].bytes; */ 277 /* FIXME: + priv->rx_stats[2].bytes; */
279 s64 tpt = current_tpt - priv->led_tpt; 278 s64 tpt = current_tpt - priv->led_tpt;
280 279
281 if (tpt < 0) /* wrapparound */ 280 if (tpt < 0) /* wraparound */
282 tpt = -tpt; 281 tpt = -tpt;
283 282
284 IWL_DEBUG_LED("tpt %lld current_tpt %llu\n", 283 IWL_DEBUG_LED("tpt %lld current_tpt %llu\n",
@@ -293,7 +292,7 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
293 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE)) 292 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
294 break; 293 break;
295 294
296 IWL_DEBUG_LED("LED BLINK IDX=%d", i); 295 IWL_DEBUG_LED("LED BLINK IDX=%d\n", i);
297 return i; 296 return i;
298} 297}
299 298
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 588c9ad20e83..021e00bcd1be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -19,7 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 60a03d2d2d0e..75ca6a542174 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -22,7 +22,7 @@
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/ 27 *****************************************************************************/
28 28
@@ -39,7 +39,6 @@
39#include "iwl-commands.h" 39#include "iwl-commands.h"
40#include "iwl-debug.h" 40#include "iwl-debug.h"
41#include "iwl-power.h" 41#include "iwl-power.h"
42#include "iwl-helpers.h"
43 42
44/* 43/*
45 * Setting power level allow the card to go to sleep when not busy 44 * Setting power level allow the card to go to sleep when not busy
@@ -80,7 +79,7 @@
80#define IWL_REDUCED_POWER_TEMPERATURE 95 79#define IWL_REDUCED_POWER_TEMPERATURE 95
81 80
82/* default power management (not Tx power) table values */ 81/* default power management (not Tx power) table values */
83/* for tim 0-10 */ 82/* for TIM 0-10 */
84static struct iwl_power_vec_entry range_0[IWL_POWER_MAX] = { 83static struct iwl_power_vec_entry range_0[IWL_POWER_MAX] = {
85 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, 84 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
86 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, 85 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
@@ -91,7 +90,7 @@ static struct iwl_power_vec_entry range_0[IWL_POWER_MAX] = {
91}; 90};
92 91
93 92
94/* for tim = 3-10 */ 93/* for TIM = 3-10 */
95static struct iwl_power_vec_entry range_1[IWL_POWER_MAX] = { 94static struct iwl_power_vec_entry range_1[IWL_POWER_MAX] = {
96 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, 95 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
97 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, 96 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
@@ -101,7 +100,7 @@ static struct iwl_power_vec_entry range_1[IWL_POWER_MAX] = {
101 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2} 100 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2}
102}; 101};
103 102
104/* for tim > 11 */ 103/* for TIM > 11 */
105static struct iwl_power_vec_entry range_2[IWL_POWER_MAX] = { 104static struct iwl_power_vec_entry range_2[IWL_POWER_MAX] = {
106 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, 105 {{NOSLP, SLP_TOUT(0), SLP_TOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
107 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, 106 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
@@ -183,7 +182,7 @@ static int iwl_power_init_handle(struct iwl_priv *priv)
183 return 0; 182 return 0;
184} 183}
185 184
186/* adjust power command according to dtim period and power level*/ 185/* adjust power command according to DTIM period and power level*/
187static int iwl_update_power_command(struct iwl_priv *priv, 186static int iwl_update_power_command(struct iwl_priv *priv,
188 struct iwl_powertable_cmd *cmd, 187 struct iwl_powertable_cmd *cmd,
189 u16 mode) 188 u16 mode)
@@ -257,15 +256,11 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
257 struct iwl_power_mgr *setting = &(priv->power_data); 256 struct iwl_power_mgr *setting = &(priv->power_data);
258 int ret = 0; 257 int ret = 0;
259 u16 uninitialized_var(final_mode); 258 u16 uninitialized_var(final_mode);
259 bool update_chains;
260 260
261 /* Don't update the RX chain when chain noise calibration is running */ 261 /* Don't update the RX chain when chain noise calibration is running */
262 if (priv->chain_noise_data.state != IWL_CHAIN_NOISE_DONE && 262 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
263 priv->chain_noise_data.state != IWL_CHAIN_NOISE_ALIVE) { 263 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
264 IWL_DEBUG_POWER("Cannot update the power, chain noise "
265 "calibration running: %d\n",
266 priv->chain_noise_data.state);
267 return -EAGAIN;
268 }
269 264
270 /* If on battery, set to 3, 265 /* If on battery, set to 3,
271 * if plugged into AC power, set to CAM ("continuously aware mode"), 266 * if plugged into AC power, set to CAM ("continuously aware mode"),
@@ -313,9 +308,12 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
313 else 308 else
314 set_bit(STATUS_POWER_PMI, &priv->status); 309 set_bit(STATUS_POWER_PMI, &priv->status);
315 310
316 if (priv->cfg->ops->lib->update_chain_flags) 311 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
317 priv->cfg->ops->lib->update_chain_flags(priv); 312 priv->cfg->ops->lib->update_chain_flags(priv);
318 313 else
314 IWL_DEBUG_POWER("Cannot update the power, chain noise "
315 "calibration running: %d\n",
316 priv->chain_noise_data.state);
319 if (!ret) 317 if (!ret)
320 setting->power_mode = final_mode; 318 setting->power_mode = final_mode;
321 } 319 }
@@ -325,7 +323,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
325EXPORT_SYMBOL(iwl_power_update_mode); 323EXPORT_SYMBOL(iwl_power_update_mode);
326 324
327/* Allow other iwl code to disable/enable power management active 325/* Allow other iwl code to disable/enable power management active
328 * this will be usefull for rate scale to disable PM during heavy 326 * this will be useful for rate scale to disable PM during heavy
329 * Tx/Rx activities 327 * Tx/Rx activities
330 */ 328 */
331int iwl_power_disable_management(struct iwl_priv *priv, u32 ms) 329int iwl_power_disable_management(struct iwl_priv *priv, u32 ms)
@@ -352,8 +350,8 @@ int iwl_power_disable_management(struct iwl_priv *priv, u32 ms)
352EXPORT_SYMBOL(iwl_power_disable_management); 350EXPORT_SYMBOL(iwl_power_disable_management);
353 351
354/* Allow other iwl code to disable/enable power management active 352/* Allow other iwl code to disable/enable power management active
355 * this will be usefull for rate scale to disable PM during hight 353 * this will be useful for rate scale to disable PM during high
356 * valume activities 354 * volume activities
357 */ 355 */
358int iwl_power_enable_management(struct iwl_priv *priv) 356int iwl_power_enable_management(struct iwl_priv *priv)
359{ 357{
@@ -391,7 +389,7 @@ int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode)
391} 389}
392EXPORT_SYMBOL(iwl_power_set_system_mode); 390EXPORT_SYMBOL(iwl_power_set_system_mode);
393 391
394/* initilize to default */ 392/* initialize to default */
395void iwl_power_initialize(struct iwl_priv *priv) 393void iwl_power_initialize(struct iwl_priv *priv)
396{ 394{
397 395
@@ -443,7 +441,7 @@ static void iwl_bg_set_power_save(struct work_struct *work)
443 441
444 mutex_lock(&priv->mutex); 442 mutex_lock(&priv->mutex);
445 443
446 /* on starting association we disable power managment 444 /* on starting association we disable power management
447 * until association, if association failed then this 445 * until association, if association failed then this
448 * timer will expire and enable PM again. 446 * timer will expire and enable PM again.
449 */ 447 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index df484a90ae64..fa098d8975ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -22,7 +22,7 @@
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/ 27 *****************************************************************************/
28#ifndef __iwl_power_setting_h__ 28#ifndef __iwl_power_setting_h__
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index ee5afd48d3af..b7a5f23351c3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -25,7 +25,7 @@
25 * in the file called LICENSE.GPL. 25 * in the file called LICENSE.GPL.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
@@ -158,9 +158,9 @@
158 * 158 *
159 * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction 159 * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
160 * images in host DRAM. The last register loaded must be the instruction 160 * images in host DRAM. The last register loaded must be the instruction
161 * bytecount register ("1" in MSbit tells initialization uCode to load 161 * byte count register ("1" in MSbit tells initialization uCode to load
162 * the runtime uCode): 162 * the runtime uCode):
163 * BSM_DRAM_INST_BYTECOUNT_REG = bytecount | BSM_DRAM_INST_LOAD 163 * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
164 * 164 *
165 * 5) Wait for "alive" notification, then issue normal runtime commands. 165 * 5) Wait for "alive" notification, then issue normal runtime commands.
166 * 166 *
@@ -244,7 +244,7 @@
244/** 244/**
245 * Tx Scheduler 245 * Tx Scheduler
246 * 246 *
247 * The Tx Scheduler selects the next frame to be transmitted, chosing TFDs 247 * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
248 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in 248 * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
249 * host DRAM. It steers each frame's Tx command (which contains the frame 249 * host DRAM. It steers each frame's Tx command (which contains the frame
250 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the 250 * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
index 5d642298f04c..4b69da30665c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
@@ -22,7 +22,7 @@
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/ 27 *****************************************************************************/
28#include <linux/kernel.h> 28#include <linux/kernel.h>
@@ -34,8 +34,6 @@
34#include "iwl-eeprom.h" 34#include "iwl-eeprom.h"
35#include "iwl-dev.h" 35#include "iwl-dev.h"
36#include "iwl-core.h" 36#include "iwl-core.h"
37#include "iwl-helpers.h"
38
39 37
40/* software rf-kill from user */ 38/* software rf-kill from user */
41static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state) 39static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
@@ -64,7 +62,7 @@ static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
64 iwl_radio_kill_sw_disable_radio(priv); 62 iwl_radio_kill_sw_disable_radio(priv);
65 break; 63 break;
66 default: 64 default:
67 IWL_WARNING("we recieved unexpected RFKILL state %d\n", state); 65 IWL_WARNING("we received unexpected RFKILL state %d\n", state);
68 break; 66 break;
69 } 67 }
70out_unlock: 68out_unlock:
@@ -83,7 +81,7 @@ int iwl_rfkill_init(struct iwl_priv *priv)
83 IWL_DEBUG_RF_KILL("Initializing RFKILL.\n"); 81 IWL_DEBUG_RF_KILL("Initializing RFKILL.\n");
84 priv->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN); 82 priv->rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN);
85 if (!priv->rfkill) { 83 if (!priv->rfkill) {
86 IWL_ERROR("Unable to allocate rfkill device.\n"); 84 IWL_ERROR("Unable to allocate RFKILL device.\n");
87 ret = -ENOMEM; 85 ret = -ENOMEM;
88 goto error; 86 goto error;
89 } 87 }
@@ -99,7 +97,7 @@ int iwl_rfkill_init(struct iwl_priv *priv)
99 97
100 ret = rfkill_register(priv->rfkill); 98 ret = rfkill_register(priv->rfkill);
101 if (ret) { 99 if (ret) {
102 IWL_ERROR("Unable to register rfkill: %d\n", ret); 100 IWL_ERROR("Unable to register RFKILL: %d\n", ret);
103 goto free_rfkill; 101 goto free_rfkill;
104 } 102 }
105 103
@@ -127,7 +125,7 @@ void iwl_rfkill_unregister(struct iwl_priv *priv)
127} 125}
128EXPORT_SYMBOL(iwl_rfkill_unregister); 126EXPORT_SYMBOL(iwl_rfkill_unregister);
129 127
130/* set rf-kill to the right state. */ 128/* set RFKILL to the right state. */
131void iwl_rfkill_set_hw_state(struct iwl_priv *priv) 129void iwl_rfkill_set_hw_state(struct iwl_priv *priv)
132{ 130{
133 if (!priv->rfkill) 131 if (!priv->rfkill)
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.h b/drivers/net/wireless/iwlwifi/iwl-rfkill.h
index 402fd4c781da..86dc055a2e94 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rfkill.h
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.h
@@ -22,7 +22,7 @@
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/ 27 *****************************************************************************/
28#ifndef __iwl_rf_kill_h__ 28#ifndef __iwl_rf_kill_h__
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 0509c16dbe75..c5f1aa0feac8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -22,7 +22,7 @@
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
@@ -218,8 +218,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
218 218
219 /* If we've added more space for the firmware to place data, tell it. 219 /* If we've added more space for the firmware to place data, tell it.
220 * Increment device's write pointer in multiples of 8. */ 220 * Increment device's write pointer in multiples of 8. */
221 if ((write != (rxq->write & ~0x7)) 221 if (write != (rxq->write & ~0x7)) {
222 || (abs(rxq->write - rxq->read) > 7)) {
223 spin_lock_irqsave(&rxq->lock, flags); 222 spin_lock_irqsave(&rxq->lock, flags);
224 rxq->need_update = 1; 223 rxq->need_update = 1;
225 spin_unlock_irqrestore(&rxq->lock, flags); 224 spin_unlock_irqrestore(&rxq->lock, flags);
@@ -245,25 +244,31 @@ void iwl_rx_allocate(struct iwl_priv *priv)
245 struct list_head *element; 244 struct list_head *element;
246 struct iwl_rx_mem_buffer *rxb; 245 struct iwl_rx_mem_buffer *rxb;
247 unsigned long flags; 246 unsigned long flags;
248 spin_lock_irqsave(&rxq->lock, flags); 247
249 while (!list_empty(&rxq->rx_used)) { 248 while (1) {
249 spin_lock_irqsave(&rxq->lock, flags);
250
251 if (list_empty(&rxq->rx_used)) {
252 spin_unlock_irqrestore(&rxq->lock, flags);
253 return;
254 }
250 element = rxq->rx_used.next; 255 element = rxq->rx_used.next;
251 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 256 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
257 list_del(element);
258
259 spin_unlock_irqrestore(&rxq->lock, flags);
252 260
253 /* Alloc a new receive buffer */ 261 /* Alloc a new receive buffer */
254 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256, 262 rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
255 __GFP_NOWARN | GFP_ATOMIC); 263 GFP_KERNEL);
256 if (!rxb->skb) { 264 if (!rxb->skb) {
257 if (net_ratelimit()) 265 printk(KERN_CRIT DRV_NAME
258 printk(KERN_CRIT DRV_NAME 266 "Can not allocate SKB buffers\n");
259 ": Can not allocate SKB buffers\n");
260 /* We don't reschedule replenish work here -- we will 267 /* We don't reschedule replenish work here -- we will
261 * call the restock method and if it still needs 268 * call the restock method and if it still needs
262 * more buffers it will schedule replenish */ 269 * more buffers it will schedule replenish */
263 break; 270 break;
264 } 271 }
265 priv->alloc_rxb_skb++;
266 list_del(element);
267 272
268 /* Get physical address of RB/SKB */ 273 /* Get physical address of RB/SKB */
269 rxb->real_dma_addr = pci_map_single( 274 rxb->real_dma_addr = pci_map_single(
@@ -277,12 +282,15 @@ void iwl_rx_allocate(struct iwl_priv *priv)
277 rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256); 282 rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
278 skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr); 283 skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
279 284
285 spin_lock_irqsave(&rxq->lock, flags);
286
280 list_add_tail(&rxb->list, &rxq->rx_free); 287 list_add_tail(&rxb->list, &rxq->rx_free);
281 rxq->free_count++; 288 rxq->free_count++;
289 priv->alloc_rxb_skb++;
290
291 spin_unlock_irqrestore(&rxq->lock, flags);
282 } 292 }
283 spin_unlock_irqrestore(&rxq->lock, flags);
284} 293}
285EXPORT_SYMBOL(iwl_rx_allocate);
286 294
287void iwl_rx_replenish(struct iwl_priv *priv) 295void iwl_rx_replenish(struct iwl_priv *priv)
288{ 296{
@@ -317,7 +325,10 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
317 325
318 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, 326 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
319 rxq->dma_addr); 327 rxq->dma_addr);
328 pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
329 rxq->rb_stts, rxq->rb_stts_dma);
320 rxq->bd = NULL; 330 rxq->bd = NULL;
331 rxq->rb_stts = NULL;
321} 332}
322EXPORT_SYMBOL(iwl_rx_queue_free); 333EXPORT_SYMBOL(iwl_rx_queue_free);
323 334
@@ -334,7 +345,12 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
334 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 345 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
335 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); 346 rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
336 if (!rxq->bd) 347 if (!rxq->bd)
337 return -ENOMEM; 348 goto err_bd;
349
350 rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status),
351 &rxq->rb_stts_dma);
352 if (!rxq->rb_stts)
353 goto err_rb;
338 354
339 /* Fill the rx_used queue with _all_ of the Rx buffers */ 355 /* Fill the rx_used queue with _all_ of the Rx buffers */
340 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) 356 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
@@ -346,6 +362,12 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
346 rxq->free_count = 0; 362 rxq->free_count = 0;
347 rxq->need_update = 0; 363 rxq->need_update = 0;
348 return 0; 364 return 0;
365
366err_rb:
367 pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
368 rxq->dma_addr);
369err_bd:
370 return -ENOMEM;
349} 371}
350EXPORT_SYMBOL(iwl_rx_queue_alloc); 372EXPORT_SYMBOL(iwl_rx_queue_alloc);
351 373
@@ -412,10 +434,10 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
412 434
413 /* Tell device where in DRAM to update its Rx status */ 435 /* Tell device where in DRAM to update its Rx status */
414 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, 436 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
415 (priv->shared_phys + priv->rb_closed_offset) >> 4); 437 rxq->rb_stts_dma >> 4);
416 438
417 /* Enable Rx DMA 439 /* Enable Rx DMA
418 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set becuase of HW bug in 440 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
419 * the credit mechanism in 5000 HW RX FIFO 441 * the credit mechanism in 5000 HW RX FIFO
420 * Direct rx interrupts to hosts 442 * Direct rx interrupts to hosts
421 * Rx buffer size 4 or 8k 443 * Rx buffer size 4 or 8k
@@ -426,6 +448,7 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
426 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 448 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
427 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 449 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
428 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 450 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
451 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
429 rb_size| 452 rb_size|
430 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| 453 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
431 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 454 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
@@ -453,10 +476,8 @@ int iwl_rxq_stop(struct iwl_priv *priv)
453 476
454 /* stop Rx DMA */ 477 /* stop Rx DMA */
455 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 478 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
456 ret = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, 479 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
457 (1 << 24), 1000); 480 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
458 if (ret < 0)
459 IWL_ERROR("Can't stop Rx DMA.\n");
460 481
461 iwl_release_nic_access(priv); 482 iwl_release_nic_access(priv);
462 spin_unlock_irqrestore(&priv->lock, flags); 483 spin_unlock_irqrestore(&priv->lock, flags);
@@ -470,7 +491,7 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
470 491
471{ 492{
472 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 493 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
473 struct iwl4965_missed_beacon_notif *missed_beacon; 494 struct iwl_missed_beacon_notif *missed_beacon;
474 495
475 missed_beacon = &pkt->u.missed_beacon; 496 missed_beacon = &pkt->u.missed_beacon;
476 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) { 497 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
@@ -485,49 +506,6 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
485} 506}
486EXPORT_SYMBOL(iwl_rx_missed_beacon_notif); 507EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
487 508
488int iwl_rx_agg_start(struct iwl_priv *priv, const u8 *addr, int tid, u16 ssn)
489{
490 unsigned long flags;
491 int sta_id;
492
493 sta_id = iwl_find_station(priv, addr);
494 if (sta_id == IWL_INVALID_STATION)
495 return -ENXIO;
496
497 spin_lock_irqsave(&priv->sta_lock, flags);
498 priv->stations[sta_id].sta.station_flags_msk = 0;
499 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
500 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
501 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
502 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
503 spin_unlock_irqrestore(&priv->sta_lock, flags);
504
505 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
506 CMD_ASYNC);
507}
508EXPORT_SYMBOL(iwl_rx_agg_start);
509
510int iwl_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
511{
512 unsigned long flags;
513 int sta_id;
514
515 sta_id = iwl_find_station(priv, addr);
516 if (sta_id == IWL_INVALID_STATION)
517 return -ENXIO;
518
519 spin_lock_irqsave(&priv->sta_lock, flags);
520 priv->stations[sta_id].sta.station_flags_msk = 0;
521 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
522 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
523 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
524 spin_unlock_irqrestore(&priv->sta_lock, flags);
525
526 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
527 CMD_ASYNC);
528}
529EXPORT_SYMBOL(iwl_rx_agg_stop);
530
531 509
532/* Calculate noise level, based on measurements during network silence just 510/* Calculate noise level, based on measurements during network silence just
533 * before arriving beacon. This measurement can be done only if we know 511 * before arriving beacon. This measurement can be done only if we know
@@ -651,20 +629,24 @@ static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
651 return sig_qual; 629 return sig_qual;
652} 630}
653 631
654#ifdef CONFIG_IWLWIFI_DEBUG 632/* Calc max signal level (dBm) among 3 possible receivers */
633static inline int iwl_calc_rssi(struct iwl_priv *priv,
634 struct iwl_rx_phy_res *rx_resp)
635{
636 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
637}
655 638
639#ifdef CONFIG_IWLWIFI_DEBUG
656/** 640/**
657 * iwl_dbg_report_frame - dump frame to syslog during debug sessions 641 * iwl_dbg_report_frame - dump frame to syslog during debug sessions
658 * 642 *
659 * You may hack this function to show different aspects of received frames, 643 * You may hack this function to show different aspects of received frames,
660 * including selective frame dumps. 644 * including selective frame dumps.
661 * group100 parameter selects whether to show 1 out of 100 good frames. 645 * group100 parameter selects whether to show 1 out of 100 good data frames.
662 * 646 * All beacon and probe response frames are printed.
663 * TODO: This was originally written for 3945, need to audit for
664 * proper operation with 4965.
665 */ 647 */
666static void iwl_dbg_report_frame(struct iwl_priv *priv, 648static void iwl_dbg_report_frame(struct iwl_priv *priv,
667 struct iwl_rx_packet *pkt, 649 struct iwl_rx_phy_res *phy_res, u16 length,
668 struct ieee80211_hdr *header, int group100) 650 struct ieee80211_hdr *header, int group100)
669{ 651{
670 u32 to_us; 652 u32 to_us;
@@ -676,20 +658,9 @@ static void iwl_dbg_report_frame(struct iwl_priv *priv,
676 u16 seq_ctl; 658 u16 seq_ctl;
677 u16 channel; 659 u16 channel;
678 u16 phy_flags; 660 u16 phy_flags;
679 int rate_sym; 661 u32 rate_n_flags;
680 u16 length;
681 u16 status;
682 u16 bcn_tmr;
683 u32 tsf_low; 662 u32 tsf_low;
684 u64 tsf; 663 int rssi;
685 u8 rssi;
686 u8 agc;
687 u16 sig_avg;
688 u16 noise_diff;
689 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
690 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
691 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
692 u8 *data = IWL_RX_DATA(pkt);
693 664
694 if (likely(!(priv->debug_level & IWL_DL_RX))) 665 if (likely(!(priv->debug_level & IWL_DL_RX)))
695 return; 666 return;
@@ -699,22 +670,13 @@ static void iwl_dbg_report_frame(struct iwl_priv *priv,
699 seq_ctl = le16_to_cpu(header->seq_ctrl); 670 seq_ctl = le16_to_cpu(header->seq_ctrl);
700 671
701 /* metadata */ 672 /* metadata */
702 channel = le16_to_cpu(rx_hdr->channel); 673 channel = le16_to_cpu(phy_res->channel);
703 phy_flags = le16_to_cpu(rx_hdr->phy_flags); 674 phy_flags = le16_to_cpu(phy_res->phy_flags);
704 rate_sym = rx_hdr->rate; 675 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
705 length = le16_to_cpu(rx_hdr->len);
706
707 /* end-of-frame status and timestamp */
708 status = le32_to_cpu(rx_end->status);
709 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
710 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
711 tsf = le64_to_cpu(rx_end->timestamp);
712 676
713 /* signal statistics */ 677 /* signal statistics */
714 rssi = rx_stats->rssi; 678 rssi = iwl_calc_rssi(priv, phy_res);
715 agc = rx_stats->agc; 679 tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
716 sig_avg = le16_to_cpu(rx_stats->sig_avg);
717 noise_diff = le16_to_cpu(rx_stats->noise_diff);
718 680
719 to_us = !compare_ether_addr(header->addr1, priv->mac_addr); 681 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
720 682
@@ -768,11 +730,13 @@ static void iwl_dbg_report_frame(struct iwl_priv *priv,
768 else 730 else
769 title = "Frame"; 731 title = "Frame";
770 732
771 rate_idx = iwl_hwrate_to_plcp_idx(rate_sym); 733 rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
772 if (unlikely(rate_idx == -1)) 734 if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
773 bitrate = 0; 735 bitrate = 0;
774 else 736 WARN_ON_ONCE(1);
737 } else {
775 bitrate = iwl_rates[rate_idx].ieee / 2; 738 bitrate = iwl_rates[rate_idx].ieee / 2;
739 }
776 740
777 /* print frame summary. 741 /* print frame summary.
778 * MAC addresses show just the last byte (for brevity), 742 * MAC addresses show just the last byte (for brevity),
@@ -784,24 +748,17 @@ static void iwl_dbg_report_frame(struct iwl_priv *priv,
784 length, rssi, channel, bitrate); 748 length, rssi, channel, bitrate);
785 else { 749 else {
786 /* src/dst addresses assume managed mode */ 750 /* src/dst addresses assume managed mode */
787 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, " 751 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, src=0x%02x, "
788 "src=0x%02x, rssi=%u, tim=%lu usec, " 752 "len=%u, rssi=%d, tim=%lu usec, "
789 "phy=0x%02x, chnl=%d\n", 753 "phy=0x%02x, chnl=%d\n",
790 title, le16_to_cpu(fc), header->addr1[5], 754 title, le16_to_cpu(fc), header->addr1[5],
791 header->addr3[5], rssi, 755 header->addr3[5], length, rssi,
792 tsf_low - priv->scan_start_tsf, 756 tsf_low - priv->scan_start_tsf,
793 phy_flags, channel); 757 phy_flags, channel);
794 } 758 }
795 } 759 }
796 if (print_dump) 760 if (print_dump)
797 iwl_print_hex_dump(priv, IWL_DL_RX, data, length); 761 iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
798}
799#else
800static inline void iwl_dbg_report_frame(struct iwl_priv *priv,
801 struct iwl_rx_packet *pkt,
802 struct ieee80211_hdr *header,
803 int group100)
804{
805} 762}
806#endif 763#endif
807 764
@@ -995,46 +952,6 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
995 rxb->skb = NULL; 952 rxb->skb = NULL;
996} 953}
997 954
998/* Calc max signal level (dBm) among 3 possible receivers */
999static inline int iwl_calc_rssi(struct iwl_priv *priv,
1000 struct iwl_rx_phy_res *rx_resp)
1001{
1002 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
1003}
1004
1005
1006static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1007{
1008 unsigned long flags;
1009
1010 spin_lock_irqsave(&priv->sta_lock, flags);
1011 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
1012 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
1013 priv->stations[sta_id].sta.sta.modify_mask = 0;
1014 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1015 spin_unlock_irqrestore(&priv->sta_lock, flags);
1016
1017 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1018}
1019
1020static void iwl_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
1021{
1022 /* FIXME: need locking over ps_status ??? */
1023 u8 sta_id = iwl_find_station(priv, addr);
1024
1025 if (sta_id != IWL_INVALID_STATION) {
1026 u8 sta_awake = priv->stations[sta_id].
1027 ps_status == STA_PS_STATUS_WAKE;
1028
1029 if (sta_awake && ps_bit)
1030 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
1031 else if (!sta_awake && !ps_bit) {
1032 iwl_sta_modify_ps_wake(priv, sta_id);
1033 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
1034 }
1035 }
1036}
1037
1038/* This is necessary only for a number of statistics, see the caller. */ 955/* This is necessary only for a number of statistics, see the caller. */
1039static int iwl_is_network_packet(struct iwl_priv *priv, 956static int iwl_is_network_packet(struct iwl_priv *priv,
1040 struct ieee80211_hdr *header) 957 struct ieee80211_hdr *header)
@@ -1157,9 +1074,10 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1157 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 1074 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1158 1075
1159 /* Set "1" to report good data frames in groups of 100 */ 1076 /* Set "1" to report good data frames in groups of 100 */
1160 /* FIXME: need to optimze the call: */ 1077#ifdef CONFIG_IWLWIFI_DEBUG
1161 iwl_dbg_report_frame(priv, pkt, header, 1); 1078 if (unlikely(priv->debug_level & IWL_DL_RX))
1162 1079 iwl_dbg_report_frame(priv, rx_start, len, header, 1);
1080#endif
1163 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n", 1081 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
1164 rx_status.signal, rx_status.noise, rx_status.signal, 1082 rx_status.signal, rx_status.noise, rx_status.signal,
1165 (unsigned long long)rx_status.mactime); 1083 (unsigned long long)rx_status.mactime);
@@ -1168,12 +1086,12 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1168 * "antenna number" 1086 * "antenna number"
1169 * 1087 *
1170 * It seems that the antenna field in the phy flags value 1088 * It seems that the antenna field in the phy flags value
1171 * is actually a bitfield. This is undefined by radiotap, 1089 * is actually a bit field. This is undefined by radiotap,
1172 * it wants an actual antenna number but I always get "7" 1090 * it wants an actual antenna number but I always get "7"
1173 * for most legacy frames I receive indicating that the 1091 * for most legacy frames I receive indicating that the
1174 * same frame was received on all three RX chains. 1092 * same frame was received on all three RX chains.
1175 * 1093 *
1176 * I think this field should be removed in favour of a 1094 * I think this field should be removed in favor of a
1177 * new 802.11n radiotap field "RX chains" that is defined 1095 * new 802.11n radiotap field "RX chains" that is defined
1178 * as a bitmask. 1096 * as a bitmask.
1179 */ 1097 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index c89365e2ca58..3c803f6922ef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -22,11 +22,13 @@
22 * in the file called LICENSE.GPL. 22 * in the file called LICENSE.GPL.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Tomas Winkler <tomas.winkler@intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/ 27 *****************************************************************************/
28#include <net/mac80211.h> 28#include <linux/types.h>
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <net/lib80211.h>
31#include <net/mac80211.h>
30 32
31#include "iwl-eeprom.h" 33#include "iwl-eeprom.h"
32#include "iwl-dev.h" 34#include "iwl-dev.h"
@@ -64,54 +66,6 @@
64#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) 66#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
65 67
66 68
67static int scan_tx_ant[3] = {
68 RATE_MCS_ANT_A_MSK, RATE_MCS_ANT_B_MSK, RATE_MCS_ANT_C_MSK
69};
70
71
72
73static int iwl_is_empty_essid(const char *essid, int essid_len)
74{
75 /* Single white space is for Linksys APs */
76 if (essid_len == 1 && essid[0] == ' ')
77 return 1;
78
79 /* Otherwise, if the entire essid is 0, we assume it is hidden */
80 while (essid_len) {
81 essid_len--;
82 if (essid[essid_len] != '\0')
83 return 0;
84 }
85
86 return 1;
87}
88
89
90
91static const char *iwl_escape_essid(const char *essid, u8 essid_len)
92{
93 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
94 const char *s = essid;
95 char *d = escaped;
96
97 if (iwl_is_empty_essid(essid, essid_len)) {
98 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
99 return escaped;
100 }
101
102 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
103 while (essid_len--) {
104 if (*s == '\0') {
105 *d++ = '\\';
106 *d++ = '0';
107 s++;
108 } else
109 *d++ = *s++;
110 }
111 *d = '\0';
112 return escaped;
113}
114
115/** 69/**
116 * iwl_scan_cancel - Cancel any currently executing HW scan 70 * iwl_scan_cancel - Cancel any currently executing HW scan
117 * 71 *
@@ -455,10 +409,11 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
455 409
456void iwl_init_scan_params(struct iwl_priv *priv) 410void iwl_init_scan_params(struct iwl_priv *priv)
457{ 411{
412 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
458 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) 413 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
459 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = RATE_MCS_ANT_INIT_IND; 414 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
460 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 415 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
461 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = RATE_MCS_ANT_INIT_IND; 416 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
462} 417}
463 418
464int iwl_scan_initiate(struct iwl_priv *priv) 419int iwl_scan_initiate(struct iwl_priv *priv)
@@ -550,7 +505,7 @@ static void iwl_ht_cap_to_ie(const struct ieee80211_supported_band *sband,
550{ 505{
551 struct ieee80211_ht_cap *ht_cap; 506 struct ieee80211_ht_cap *ht_cap;
552 507
553 if (!sband || !sband->ht_info.ht_supported) 508 if (!sband || !sband->ht_cap.ht_supported)
554 return; 509 return;
555 510
556 if (*left < sizeof(struct ieee80211_ht_cap)) 511 if (*left < sizeof(struct ieee80211_ht_cap))
@@ -559,12 +514,12 @@ static void iwl_ht_cap_to_ie(const struct ieee80211_supported_band *sband,
559 *pos++ = sizeof(struct ieee80211_ht_cap); 514 *pos++ = sizeof(struct ieee80211_ht_cap);
560 ht_cap = (struct ieee80211_ht_cap *) pos; 515 ht_cap = (struct ieee80211_ht_cap *) pos;
561 516
562 ht_cap->cap_info = cpu_to_le16(sband->ht_info.cap); 517 ht_cap->cap_info = cpu_to_le16(sband->ht_cap.cap);
563 memcpy(ht_cap->supp_mcs_set, sband->ht_info.supp_mcs_set, 16); 518 memcpy(&ht_cap->mcs, &sband->ht_cap.mcs, 16);
564 ht_cap->ampdu_params_info = 519 ht_cap->ampdu_params_info =
565 (sband->ht_info.ampdu_factor & IEEE80211_HT_CAP_AMPDU_FACTOR) | 520 (sband->ht_cap.ampdu_factor & IEEE80211_HT_AMPDU_PARM_FACTOR) |
566 ((sband->ht_info.ampdu_density << 2) & 521 ((sband->ht_cap.ampdu_density << 2) &
567 IEEE80211_HT_CAP_AMPDU_DENSITY); 522 IEEE80211_HT_AMPDU_PARM_DENSITY);
568 *left -= sizeof(struct ieee80211_ht_cap); 523 *left -= sizeof(struct ieee80211_ht_cap);
569} 524}
570 525
@@ -670,23 +625,6 @@ static u16 iwl_fill_probe_req(struct iwl_priv *priv,
670 return (u16)len; 625 return (u16)len;
671} 626}
672 627
673static u32 iwl_scan_tx_ant(struct iwl_priv *priv, enum ieee80211_band band)
674{
675 int i, ind;
676
677 ind = priv->scan_tx_ant[band];
678 for (i = 0; i < priv->hw_params.tx_chains_num; i++) {
679 ind = (ind+1) >= priv->hw_params.tx_chains_num ? 0 : ind+1;
680 if (priv->hw_params.valid_tx_ant & (1 << ind)) {
681 priv->scan_tx_ant[band] = ind;
682 break;
683 }
684 }
685 IWL_DEBUG_SCAN("select TX ANT = %c\n", 'A' + ind);
686 return scan_tx_ant[ind];
687}
688
689
690static void iwl_bg_request_scan(struct work_struct *data) 628static void iwl_bg_request_scan(struct work_struct *data)
691{ 629{
692 struct iwl_priv *priv = 630 struct iwl_priv *priv =
@@ -699,11 +637,13 @@ static void iwl_bg_request_scan(struct work_struct *data)
699 struct iwl_scan_cmd *scan; 637 struct iwl_scan_cmd *scan;
700 struct ieee80211_conf *conf = NULL; 638 struct ieee80211_conf *conf = NULL;
701 int ret = 0; 639 int ret = 0;
702 u32 tx_ant; 640 u32 rate_flags = 0;
703 u16 cmd_len; 641 u16 cmd_len;
704 enum ieee80211_band band; 642 enum ieee80211_band band;
705 u8 n_probes = 2; 643 u8 n_probes = 2;
706 u8 rx_chain = priv->hw_params.valid_rx_ant; 644 u8 rx_chain = priv->hw_params.valid_rx_ant;
645 u8 rate;
646 DECLARE_SSID_BUF(ssid);
707 647
708 conf = ieee80211_get_hw_conf(priv->hw); 648 conf = ieee80211_get_hw_conf(priv->hw);
709 649
@@ -714,7 +654,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
714 goto done; 654 goto done;
715 } 655 }
716 656
717 /* Make sure the scan wasn't cancelled before this queued work 657 /* Make sure the scan wasn't canceled before this queued work
718 * was given the chance to run... */ 658 * was given the chance to run... */
719 if (!test_bit(STATUS_SCANNING, &priv->status)) 659 if (!test_bit(STATUS_SCANNING, &priv->status))
720 goto done; 660 goto done;
@@ -796,20 +736,13 @@ static void iwl_bg_request_scan(struct work_struct *data)
796 /* We should add the ability for user to lock to PASSIVE ONLY */ 736 /* We should add the ability for user to lock to PASSIVE ONLY */
797 if (priv->one_direct_scan) { 737 if (priv->one_direct_scan) {
798 IWL_DEBUG_SCAN("Start direct scan for '%s'\n", 738 IWL_DEBUG_SCAN("Start direct scan for '%s'\n",
799 iwl_escape_essid(priv->direct_ssid, 739 print_ssid(ssid, priv->direct_ssid,
800 priv->direct_ssid_len)); 740 priv->direct_ssid_len));
801 scan->direct_scan[0].id = WLAN_EID_SSID; 741 scan->direct_scan[0].id = WLAN_EID_SSID;
802 scan->direct_scan[0].len = priv->direct_ssid_len; 742 scan->direct_scan[0].len = priv->direct_ssid_len;
803 memcpy(scan->direct_scan[0].ssid, 743 memcpy(scan->direct_scan[0].ssid,
804 priv->direct_ssid, priv->direct_ssid_len); 744 priv->direct_ssid, priv->direct_ssid_len);
805 n_probes++; 745 n_probes++;
806 } else if (!iwl_is_associated(priv) && priv->essid_len) {
807 IWL_DEBUG_SCAN("Start direct scan for '%s' (not associated)\n",
808 iwl_escape_essid(priv->essid, priv->essid_len));
809 scan->direct_scan[0].id = WLAN_EID_SSID;
810 scan->direct_scan[0].len = priv->essid_len;
811 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
812 n_probes++;
813 } else { 746 } else {
814 IWL_DEBUG_SCAN("Start indirect scan.\n"); 747 IWL_DEBUG_SCAN("Start indirect scan.\n");
815 } 748 }
@@ -822,23 +755,16 @@ static void iwl_bg_request_scan(struct work_struct *data)
822 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) { 755 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
823 band = IEEE80211_BAND_2GHZ; 756 band = IEEE80211_BAND_2GHZ;
824 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 757 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
825 tx_ant = iwl_scan_tx_ant(priv, band); 758 if (priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) {
826 if (priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) 759 rate = IWL_RATE_6M_PLCP;
827 scan->tx_cmd.rate_n_flags = 760 } else {
828 iwl_hw_set_rate_n_flags(IWL_RATE_6M_PLCP, 761 rate = IWL_RATE_1M_PLCP;
829 tx_ant); 762 rate_flags = RATE_MCS_CCK_MSK;
830 else 763 }
831 scan->tx_cmd.rate_n_flags =
832 iwl_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
833 tx_ant |
834 RATE_MCS_CCK_MSK);
835 scan->good_CRC_th = 0; 764 scan->good_CRC_th = 0;
836 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { 765 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
837 band = IEEE80211_BAND_5GHZ; 766 band = IEEE80211_BAND_5GHZ;
838 tx_ant = iwl_scan_tx_ant(priv, band); 767 rate = IWL_RATE_6M_PLCP;
839 scan->tx_cmd.rate_n_flags =
840 iwl_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
841 tx_ant);
842 scan->good_CRC_th = IWL_GOOD_CRC_TH; 768 scan->good_CRC_th = IWL_GOOD_CRC_TH;
843 769
844 /* Force use of chains B and C (0x6) for scan Rx for 4965 770 /* Force use of chains B and C (0x6) for scan Rx for 4965
@@ -851,6 +777,11 @@ static void iwl_bg_request_scan(struct work_struct *data)
851 goto done; 777 goto done;
852 } 778 }
853 779
780 priv->scan_tx_ant[band] =
781 iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
782 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
783 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
784
854 /* MIMO is not used here, but value is required */ 785 /* MIMO is not used here, but value is required */
855 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK | 786 scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK |
856 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) | 787 cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) |
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
new file mode 100644
index 000000000000..836c3c80b69e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
@@ -0,0 +1,198 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/pci.h>
34#include <linux/delay.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/wireless.h>
38
39#include <net/mac80211.h>
40
41#include "iwl-eeprom.h"
42#include "iwl-dev.h"
43#include "iwl-core.h"
44#include "iwl-io.h"
45#include "iwl-spectrum.h"
46
47#define BEACON_TIME_MASK_LOW 0x00FFFFFF
48#define BEACON_TIME_MASK_HIGH 0xFF000000
49#define TIME_UNIT 1024
50
51/*
52 * extended beacon time format
53 * time in usec will be changed into a 32-bit value in 8:24 format
54 * the high 1 byte is the beacon counts
55 * the lower 3 bytes is the time in usec within one beacon interval
56 */
57
58/* TOOD: was used in sysfs debug interface need to add to mac */
59#if 0
60static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval)
61{
62 u32 quot;
63 u32 rem;
64 u32 interval = beacon_interval * 1024;
65
66 if (!interval || !usec)
67 return 0;
68
69 quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
70 rem = (usec % interval) & BEACON_TIME_MASK_LOW;
71
72 return (quot << 24) + rem;
73}
74
75/* base is usually what we get from ucode with each received frame,
76 * the same as HW timer counter counting down
77 */
78
79static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
80{
81 u32 base_low = base & BEACON_TIME_MASK_LOW;
82 u32 addon_low = addon & BEACON_TIME_MASK_LOW;
83 u32 interval = beacon_interval * TIME_UNIT;
84 u32 res = (base & BEACON_TIME_MASK_HIGH) +
85 (addon & BEACON_TIME_MASK_HIGH);
86
87 if (base_low > addon_low)
88 res += base_low - addon_low;
89 else if (base_low < addon_low) {
90 res += interval + base_low - addon_low;
91 res += (1 << 24);
92 } else
93 res += (1 << 24);
94
95 return cpu_to_le32(res);
96}
97static int iwl_get_measurement(struct iwl_priv *priv,
98 struct ieee80211_measurement_params *params,
99 u8 type)
100{
101 struct iwl4965_spectrum_cmd spectrum;
102 struct iwl_rx_packet *res;
103 struct iwl_host_cmd cmd = {
104 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
105 .data = (void *)&spectrum,
106 .meta.flags = CMD_WANT_SKB,
107 };
108 u32 add_time = le64_to_cpu(params->start_time);
109 int rc;
110 int spectrum_resp_status;
111 int duration = le16_to_cpu(params->duration);
112
113 if (iwl_is_associated(priv))
114 add_time =
115 iwl_usecs_to_beacons(
116 le64_to_cpu(params->start_time) - priv->last_tsf,
117 le16_to_cpu(priv->rxon_timing.beacon_interval));
118
119 memset(&spectrum, 0, sizeof(spectrum));
120
121 spectrum.channel_count = cpu_to_le16(1);
122 spectrum.flags =
123 RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
124 spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
125 cmd.len = sizeof(spectrum);
126 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
127
128 if (iwl_is_associated(priv))
129 spectrum.start_time =
130 iwl_add_beacon_time(priv->last_beacon_time,
131 add_time,
132 le16_to_cpu(priv->rxon_timing.beacon_interval));
133 else
134 spectrum.start_time = 0;
135
136 spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
137 spectrum.channels[0].channel = params->channel;
138 spectrum.channels[0].type = type;
139 if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
140 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
141 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
142
143 rc = iwl_send_cmd_sync(priv, &cmd);
144 if (rc)
145 return rc;
146
147 res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
148 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
149 IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n");
150 rc = -EIO;
151 }
152
153 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
154 switch (spectrum_resp_status) {
155 case 0: /* Command will be handled */
156 if (res->u.spectrum.id != 0xff) {
157 IWL_DEBUG_INFO
158 ("Replaced existing measurement: %d\n",
159 res->u.spectrum.id);
160 priv->measurement_status &= ~MEASUREMENT_READY;
161 }
162 priv->measurement_status |= MEASUREMENT_ACTIVE;
163 rc = 0;
164 break;
165
166 case 1: /* Command will not be handled */
167 rc = -EAGAIN;
168 break;
169 }
170
171 dev_kfree_skb_any(cmd.meta.u.skb);
172
173 return rc;
174}
175#endif
176
177static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb)
179{
180 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
181 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
182
183 if (!report->state) {
184 IWL_DEBUG(IWL_DL_11H,
185 "Spectrum Measure Notification: Start\n");
186 return;
187 }
188
189 memcpy(&priv->measure_report, report, sizeof(*report));
190 priv->measurement_status |= MEASUREMENT_READY;
191}
192
193void iwl_setup_spectrum_handlers(struct iwl_priv *priv)
194{
195 priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
196 iwl_rx_spectrum_measure_notif;
197}
198EXPORT_SYMBOL(iwl_setup_spectrum_handlers);
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index a40a2174df98..b7d7943e476b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -21,7 +21,7 @@
21 * file called LICENSE. 21 * file called LICENSE.
22 * 22 *
23 * Contact Information: 23 * Contact Information:
24 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 24 * Intel Linux Wireless <ilw@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 * 26 *
27 *****************************************************************************/ 27 *****************************************************************************/
@@ -88,4 +88,5 @@ struct ieee80211_measurement_report {
88 struct ieee80211_basic_report basic[0]; 88 struct ieee80211_basic_report basic[0];
89 } u; 89 } u;
90} __attribute__ ((packed)); 90} __attribute__ ((packed));
91
91#endif 92#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 26f7084d3011..412f66bac1af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -22,7 +22,7 @@
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
@@ -33,8 +33,6 @@
33#include "iwl-dev.h" 33#include "iwl-dev.h"
34#include "iwl-core.h" 34#include "iwl-core.h"
35#include "iwl-sta.h" 35#include "iwl-sta.h"
36#include "iwl-helpers.h"
37
38 36
39#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */ 37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
40#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */ 38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
@@ -45,7 +43,6 @@ u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
45 int start = 0; 43 int start = 0;
46 int ret = IWL_INVALID_STATION; 44 int ret = IWL_INVALID_STATION;
47 unsigned long flags; 45 unsigned long flags;
48 DECLARE_MAC_BUF(mac);
49 46
50 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) || 47 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
51 (priv->iw_mode == NL80211_IFTYPE_AP)) 48 (priv->iw_mode == NL80211_IFTYPE_AP))
@@ -63,8 +60,8 @@ u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
63 goto out; 60 goto out;
64 } 61 }
65 62
66 IWL_DEBUG_ASSOC_LIMIT("can not find STA %s total %d\n", 63 IWL_DEBUG_ASSOC_LIMIT("can not find STA %pM total %d\n",
67 print_mac(mac, addr), priv->num_stations); 64 addr, priv->num_stations);
68 65
69 out: 66 out:
70 spin_unlock_irqrestore(&priv->sta_lock, flags); 67 spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -86,7 +83,6 @@ EXPORT_SYMBOL(iwl_get_ra_sta_id);
86static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) 83static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
87{ 84{
88 unsigned long flags; 85 unsigned long flags;
89 DECLARE_MAC_BUF(mac);
90 86
91 spin_lock_irqsave(&priv->sta_lock, flags); 87 spin_lock_irqsave(&priv->sta_lock, flags);
92 88
@@ -94,8 +90,8 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
94 IWL_ERROR("ACTIVATE a non DRIVER active station %d\n", sta_id); 90 IWL_ERROR("ACTIVATE a non DRIVER active station %d\n", sta_id);
95 91
96 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; 92 priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
97 IWL_DEBUG_ASSOC("Added STA to Ucode: %s\n", 93 IWL_DEBUG_ASSOC("Added STA to Ucode: %pM\n",
98 print_mac(mac, priv->stations[sta_id].sta.sta.addr)); 94 priv->stations[sta_id].sta.sta.addr);
99 95
100 spin_unlock_irqrestore(&priv->sta_lock, flags); 96 spin_unlock_irqrestore(&priv->sta_lock, flags);
101} 97}
@@ -104,7 +100,9 @@ static int iwl_add_sta_callback(struct iwl_priv *priv,
104 struct iwl_cmd *cmd, struct sk_buff *skb) 100 struct iwl_cmd *cmd, struct sk_buff *skb)
105{ 101{
106 struct iwl_rx_packet *res = NULL; 102 struct iwl_rx_packet *res = NULL;
107 u8 sta_id = cmd->cmd.addsta.sta.sta_id; 103 struct iwl_addsta_cmd *addsta =
104 (struct iwl_addsta_cmd *)cmd->cmd.payload;
105 u8 sta_id = addsta->sta.sta_id;
108 106
109 if (!skb) { 107 if (!skb) {
110 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n"); 108 IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n");
@@ -132,7 +130,7 @@ static int iwl_add_sta_callback(struct iwl_priv *priv,
132 return 1; 130 return 1;
133} 131}
134 132
135int iwl_send_add_sta(struct iwl_priv *priv, 133static int iwl_send_add_sta(struct iwl_priv *priv,
136 struct iwl_addsta_cmd *sta, u8 flags) 134 struct iwl_addsta_cmd *sta, u8 flags)
137{ 135{
138 struct iwl_rx_packet *res = NULL; 136 struct iwl_rx_packet *res = NULL;
@@ -180,10 +178,9 @@ int iwl_send_add_sta(struct iwl_priv *priv,
180 178
181 return ret; 179 return ret;
182} 180}
183EXPORT_SYMBOL(iwl_send_add_sta);
184 181
185static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, 182static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
186 struct ieee80211_ht_info *sta_ht_inf) 183 struct ieee80211_sta_ht_cap *sta_ht_inf)
187{ 184{
188 __le32 sta_flags; 185 __le32 sta_flags;
189 u8 mimo_ps_mode; 186 u8 mimo_ps_mode;
@@ -231,13 +228,12 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
231 * iwl_add_station_flags - Add station to tables in driver and device 228 * iwl_add_station_flags - Add station to tables in driver and device
232 */ 229 */
233u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap, 230u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
234 u8 flags, struct ieee80211_ht_info *ht_info) 231 u8 flags, struct ieee80211_sta_ht_cap *ht_info)
235{ 232{
236 int i; 233 int i;
237 int sta_id = IWL_INVALID_STATION; 234 int sta_id = IWL_INVALID_STATION;
238 struct iwl_station_entry *station; 235 struct iwl_station_entry *station;
239 unsigned long flags_spin; 236 unsigned long flags_spin;
240 DECLARE_MAC_BUF(mac);
241 237
242 spin_lock_irqsave(&priv->sta_lock, flags_spin); 238 spin_lock_irqsave(&priv->sta_lock, flags_spin);
243 if (is_ap) 239 if (is_ap)
@@ -273,8 +269,8 @@ u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr, int is_ap,
273 269
274 station = &priv->stations[sta_id]; 270 station = &priv->stations[sta_id];
275 station->used = IWL_STA_DRIVER_ACTIVE; 271 station->used = IWL_STA_DRIVER_ACTIVE;
276 IWL_DEBUG_ASSOC("Add STA to driver ID %d: %s\n", 272 IWL_DEBUG_ASSOC("Add STA to driver ID %d: %pM\n",
277 sta_id, print_mac(mac, addr)); 273 sta_id, addr);
278 priv->num_stations++; 274 priv->num_stations++;
279 275
280 /* Set up the REPLY_ADD_STA command to send to device */ 276 /* Set up the REPLY_ADD_STA command to send to device */
@@ -301,14 +297,11 @@ EXPORT_SYMBOL(iwl_add_station_flags);
301static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr) 297static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
302{ 298{
303 unsigned long flags; 299 unsigned long flags;
304 DECLARE_MAC_BUF(mac);
305
306 u8 sta_id = iwl_find_station(priv, addr); 300 u8 sta_id = iwl_find_station(priv, addr);
307 301
308 BUG_ON(sta_id == IWL_INVALID_STATION); 302 BUG_ON(sta_id == IWL_INVALID_STATION);
309 303
310 IWL_DEBUG_ASSOC("Removed STA from Ucode: %s\n", 304 IWL_DEBUG_ASSOC("Removed STA from Ucode: %pM\n", addr);
311 print_mac(mac, addr));
312 305
313 spin_lock_irqsave(&priv->sta_lock, flags); 306 spin_lock_irqsave(&priv->sta_lock, flags);
314 307
@@ -326,7 +319,9 @@ static int iwl_remove_sta_callback(struct iwl_priv *priv,
326 struct iwl_cmd *cmd, struct sk_buff *skb) 319 struct iwl_cmd *cmd, struct sk_buff *skb)
327{ 320{
328 struct iwl_rx_packet *res = NULL; 321 struct iwl_rx_packet *res = NULL;
329 const char *addr = cmd->cmd.rm_sta.addr; 322 struct iwl_rem_sta_cmd *rm_sta =
323 (struct iwl_rem_sta_cmd *)cmd->cmd.payload;
324 const char *addr = rm_sta->addr;
330 325
331 if (!skb) { 326 if (!skb) {
332 IWL_ERROR("Error: Response NULL in REPLY_REMOVE_STA.\n"); 327 IWL_ERROR("Error: Response NULL in REPLY_REMOVE_STA.\n");
@@ -415,7 +410,6 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
415 int sta_id = IWL_INVALID_STATION; 410 int sta_id = IWL_INVALID_STATION;
416 int i, ret = -EINVAL; 411 int i, ret = -EINVAL;
417 unsigned long flags; 412 unsigned long flags;
418 DECLARE_MAC_BUF(mac);
419 413
420 spin_lock_irqsave(&priv->sta_lock, flags); 414 spin_lock_irqsave(&priv->sta_lock, flags);
421 415
@@ -435,18 +429,18 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
435 if (unlikely(sta_id == IWL_INVALID_STATION)) 429 if (unlikely(sta_id == IWL_INVALID_STATION))
436 goto out; 430 goto out;
437 431
438 IWL_DEBUG_ASSOC("Removing STA from driver:%d %s\n", 432 IWL_DEBUG_ASSOC("Removing STA from driver:%d %pM\n",
439 sta_id, print_mac(mac, addr)); 433 sta_id, addr);
440 434
441 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 435 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
442 IWL_ERROR("Removing %s but non DRIVER active\n", 436 IWL_ERROR("Removing %pM but non DRIVER active\n",
443 print_mac(mac, addr)); 437 addr);
444 goto out; 438 goto out;
445 } 439 }
446 440
447 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { 441 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
448 IWL_ERROR("Removing %s but non UCODE active\n", 442 IWL_ERROR("Removing %pM but non UCODE active\n",
449 print_mac(mac, addr)); 443 addr);
450 goto out; 444 goto out;
451 } 445 }
452 446
@@ -467,6 +461,29 @@ out:
467} 461}
468EXPORT_SYMBOL(iwl_remove_station); 462EXPORT_SYMBOL(iwl_remove_station);
469 463
464/**
465 * iwl_clear_stations_table - Clear the driver's station table
466 *
467 * NOTE: This does not clear or otherwise alter the device's station table.
468 */
469void iwl_clear_stations_table(struct iwl_priv *priv)
470{
471 unsigned long flags;
472
473 spin_lock_irqsave(&priv->sta_lock, flags);
474
475 if (iwl_is_alive(priv) &&
476 !test_bit(STATUS_EXIT_PENDING, &priv->status) &&
477 iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL))
478 IWL_ERROR("Couldn't clear the station table\n");
479
480 priv->num_stations = 0;
481 memset(priv->stations, 0, sizeof(priv->stations));
482
483 spin_unlock_irqrestore(&priv->sta_lock, flags);
484}
485EXPORT_SYMBOL(iwl_clear_stations_table);
486
470static int iwl_get_free_ucode_key_index(struct iwl_priv *priv) 487static int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
471{ 488{
472 int i; 489 int i;
@@ -717,6 +734,55 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
717 return ret; 734 return ret;
718} 735}
719 736
737void iwl_update_tkip_key(struct iwl_priv *priv,
738 struct ieee80211_key_conf *keyconf,
739 const u8 *addr, u32 iv32, u16 *phase1key)
740{
741 u8 sta_id = IWL_INVALID_STATION;
742 unsigned long flags;
743 __le16 key_flags = 0;
744 int i;
745 DECLARE_MAC_BUF(mac);
746
747 sta_id = iwl_find_station(priv, addr);
748 if (sta_id == IWL_INVALID_STATION) {
749 IWL_DEBUG_MAC80211("leave - %pM not in station map.\n",
750 addr);
751 return;
752 }
753
754 if (iwl_scan_cancel(priv)) {
755 /* cancel scan failed, just live w/ bad key and rely
756 briefly on SW decryption */
757 return;
758 }
759
760 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
761 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
762 key_flags &= ~STA_KEY_FLG_INVALID;
763
764 if (sta_id == priv->hw_params.bcast_sta_id)
765 key_flags |= STA_KEY_MULTICAST_MSK;
766
767 spin_lock_irqsave(&priv->sta_lock, flags);
768
769 priv->stations[sta_id].sta.key.key_flags = key_flags;
770 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
771
772 for (i = 0; i < 5; i++)
773 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
774 cpu_to_le16(phase1key[i]);
775
776 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
777 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
778
779 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
780
781 spin_unlock_irqrestore(&priv->sta_lock, flags);
782
783}
784EXPORT_SYMBOL(iwl_update_tkip_key);
785
720int iwl_remove_dynamic_key(struct iwl_priv *priv, 786int iwl_remove_dynamic_key(struct iwl_priv *priv,
721 struct ieee80211_key_conf *keyconf, 787 struct ieee80211_key_conf *keyconf,
722 u8 sta_id) 788 u8 sta_id)
@@ -809,7 +875,7 @@ static void iwl_dump_lq_cmd(struct iwl_priv *priv,
809{ 875{
810 int i; 876 int i;
811 IWL_DEBUG_RATE("lq station id 0x%x\n", lq->sta_id); 877 IWL_DEBUG_RATE("lq station id 0x%x\n", lq->sta_id);
812 IWL_DEBUG_RATE("lq dta 0x%X 0x%X\n", 878 IWL_DEBUG_RATE("lq ant 0x%X 0x%X\n",
813 lq->general_params.single_stream_ant_msk, 879 lq->general_params.single_stream_ant_msk,
814 lq->general_params.dual_stream_ant_msk); 880 lq->general_params.dual_stream_ant_msk);
815 881
@@ -870,7 +936,7 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap)
870 struct iwl_link_quality_cmd link_cmd = { 936 struct iwl_link_quality_cmd link_cmd = {
871 .reserved1 = 0, 937 .reserved1 = 0,
872 }; 938 };
873 u16 rate_flags; 939 u32 rate_flags;
874 940
875 /* Set up the rate scaling to start at selected rate, fall back 941 /* Set up the rate scaling to start at selected rate, fall back
876 * all the way down to 1M in IEEE order, and then spin on 1M */ 942 * all the way down to 1M in IEEE order, and then spin on 1M */
@@ -886,15 +952,16 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap)
886 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) 952 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
887 rate_flags |= RATE_MCS_CCK_MSK; 953 rate_flags |= RATE_MCS_CCK_MSK;
888 954
889 /* Use Tx antenna B only */ 955 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
890 rate_flags |= RATE_MCS_ANT_B_MSK; /*FIXME:RS*/ 956 RATE_MCS_ANT_POS;
891 957
892 link_cmd.rs_table[i].rate_n_flags = 958 link_cmd.rs_table[i].rate_n_flags =
893 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); 959 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
894 r = iwl4965_get_prev_ieee_rate(r); 960 r = iwl_get_prev_ieee_rate(r);
895 } 961 }
896 962
897 link_cmd.general_params.single_stream_ant_msk = 2; 963 link_cmd.general_params.single_stream_ant_msk =
964 first_antenna(priv->hw_params.valid_tx_ant);
898 link_cmd.general_params.dual_stream_ant_msk = 3; 965 link_cmd.general_params.dual_stream_ant_msk = 3;
899 link_cmd.agg_params.agg_dis_start_th = 3; 966 link_cmd.agg_params.agg_dis_start_th = 3;
900 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000); 967 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
@@ -910,24 +977,35 @@ static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, int is_ap)
910 * iwl_rxon_add_station - add station into station table. 977 * iwl_rxon_add_station - add station into station table.
911 * 978 *
912 * there is only one AP station with id= IWL_AP_ID 979 * there is only one AP station with id= IWL_AP_ID
913 * NOTE: mutex must be held before calling this fnction 980 * NOTE: mutex must be held before calling this function
914 */ 981 */
915int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap) 982int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
916{ 983{
984 struct ieee80211_sta *sta;
985 struct ieee80211_sta_ht_cap ht_config;
986 struct ieee80211_sta_ht_cap *cur_ht_config = NULL;
917 u8 sta_id; 987 u8 sta_id;
918 988
919 /* Add station to device's station table */ 989 /* Add station to device's station table */
920 struct ieee80211_conf *conf = &priv->hw->conf; 990
921 struct ieee80211_ht_info *cur_ht_config = &conf->ht_conf; 991 /*
922 992 * XXX: This check is definitely not correct, if we're an AP
923 if ((is_ap) && 993 * it'll always be false which is not what we want, but
924 (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) && 994 * it doesn't look like iwlagn is prepared to be an HT
925 (priv->iw_mode == NL80211_IFTYPE_STATION)) 995 * AP anyway.
926 sta_id = iwl_add_station_flags(priv, addr, is_ap, 996 */
927 0, cur_ht_config); 997 if (priv->current_ht_config.is_ht) {
928 else 998 rcu_read_lock();
929 sta_id = iwl_add_station_flags(priv, addr, is_ap, 999 sta = ieee80211_find_sta(priv->hw, addr);
930 0, NULL); 1000 if (sta) {
1001 memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
1002 cur_ht_config = &ht_config;
1003 }
1004 rcu_read_unlock();
1005 }
1006
1007 sta_id = iwl_add_station_flags(priv, addr, is_ap,
1008 0, cur_ht_config);
931 1009
932 /* Set up default rate scaling table in device's station table */ 1010 /* Set up default rate scaling table in device's station table */
933 iwl_sta_init_lq(priv, addr, is_ap); 1011 iwl_sta_init_lq(priv, addr, is_ap);
@@ -945,7 +1023,6 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
945{ 1023{
946 int sta_id; 1024 int sta_id;
947 u16 fc = le16_to_cpu(hdr->frame_control); 1025 u16 fc = le16_to_cpu(hdr->frame_control);
948 DECLARE_MAC_BUF(mac);
949 1026
950 /* If this frame is broadcast or management, use broadcast station id */ 1027 /* If this frame is broadcast or management, use broadcast station id */
951 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || 1028 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
@@ -980,9 +1057,9 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
980 if (sta_id != IWL_INVALID_STATION) 1057 if (sta_id != IWL_INVALID_STATION)
981 return sta_id; 1058 return sta_id;
982 1059
983 IWL_DEBUG_DROP("Station %s not in station map. " 1060 IWL_DEBUG_DROP("Station %pM not in station map. "
984 "Defaulting to broadcast...\n", 1061 "Defaulting to broadcast...\n",
985 print_mac(mac, hdr->addr1)); 1062 hdr->addr1);
986 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); 1063 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
987 return priv->hw_params.bcast_sta_id; 1064 return priv->hw_params.bcast_sta_id;
988 1065
@@ -999,9 +1076,9 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
999EXPORT_SYMBOL(iwl_get_sta_id); 1076EXPORT_SYMBOL(iwl_get_sta_id);
1000 1077
1001/** 1078/**
1002 * iwl_sta_modify_enable_tid_tx - Enable Tx for this TID in station table 1079 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
1003 */ 1080 */
1004void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv, int sta_id, int tid) 1081void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
1005{ 1082{
1006 unsigned long flags; 1083 unsigned long flags;
1007 1084
@@ -1014,5 +1091,81 @@ void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv, int sta_id, int tid)
1014 1091
1015 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 1092 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1016} 1093}
1017EXPORT_SYMBOL(iwl_sta_modify_enable_tid_tx); 1094EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid);
1095
1096int iwl_sta_rx_agg_start(struct iwl_priv *priv,
1097 const u8 *addr, int tid, u16 ssn)
1098{
1099 unsigned long flags;
1100 int sta_id;
1101
1102 sta_id = iwl_find_station(priv, addr);
1103 if (sta_id == IWL_INVALID_STATION)
1104 return -ENXIO;
1105
1106 spin_lock_irqsave(&priv->sta_lock, flags);
1107 priv->stations[sta_id].sta.station_flags_msk = 0;
1108 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
1109 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
1110 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
1111 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1112 spin_unlock_irqrestore(&priv->sta_lock, flags);
1113
1114 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
1115 CMD_ASYNC);
1116}
1117EXPORT_SYMBOL(iwl_sta_rx_agg_start);
1118
1119int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
1120{
1121 unsigned long flags;
1122 int sta_id;
1123
1124 sta_id = iwl_find_station(priv, addr);
1125 if (sta_id == IWL_INVALID_STATION)
1126 return -ENXIO;
1127
1128 spin_lock_irqsave(&priv->sta_lock, flags);
1129 priv->stations[sta_id].sta.station_flags_msk = 0;
1130 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
1131 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
1132 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1133 spin_unlock_irqrestore(&priv->sta_lock, flags);
1134
1135 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
1136 CMD_ASYNC);
1137}
1138EXPORT_SYMBOL(iwl_sta_rx_agg_stop);
1139
1140static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1141{
1142 unsigned long flags;
1143
1144 spin_lock_irqsave(&priv->sta_lock, flags);
1145 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
1146 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
1147 priv->stations[sta_id].sta.sta.modify_mask = 0;
1148 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1149 spin_unlock_irqrestore(&priv->sta_lock, flags);
1150
1151 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1152}
1153
1154void iwl_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
1155{
1156 /* FIXME: need locking over ps_status ??? */
1157 u8 sta_id = iwl_find_station(priv, addr);
1158
1159 if (sta_id != IWL_INVALID_STATION) {
1160 u8 sta_awake = priv->stations[sta_id].
1161 ps_status == STA_PS_STATUS_WAKE;
1162
1163 if (sta_awake && ps_bit)
1164 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
1165 else if (!sta_awake && !ps_bit) {
1166 iwl_sta_modify_ps_wake(priv, sta_id);
1167 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
1168 }
1169 }
1170}
1018 1171
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 221b93e670a6..9bb7cefc1f3c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -22,7 +22,7 @@
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
@@ -47,9 +47,21 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
47 struct ieee80211_key_conf *key, u8 sta_id); 47 struct ieee80211_key_conf *key, u8 sta_id);
48int iwl_remove_dynamic_key(struct iwl_priv *priv, 48int iwl_remove_dynamic_key(struct iwl_priv *priv,
49 struct ieee80211_key_conf *key, u8 sta_id); 49 struct ieee80211_key_conf *key, u8 sta_id);
50void iwl_update_tkip_key(struct iwl_priv *priv,
51 struct ieee80211_key_conf *keyconf,
52 const u8 *addr, u32 iv32, u16 *phase1key);
53
50int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap); 54int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap);
51int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap); 55int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap);
56void iwl_clear_stations_table(struct iwl_priv *priv);
52int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr); 57int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
53void iwl_sta_modify_enable_tid_tx(struct iwl_priv *priv, int sta_id, int tid);
54int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr); 58int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
59u8 iwl_add_station_flags(struct iwl_priv *priv, const u8 *addr,
60 int is_ap, u8 flags,
61 struct ieee80211_sta_ht_cap *ht_info);
62void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
63int iwl_sta_rx_agg_start(struct iwl_priv *priv,
64 const u8 *addr, int tid, u16 ssn);
65int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid);
66void iwl_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr);
55#endif /* __iwl_sta_h__ */ 67#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 907a53ebc6e4..b0ee86c62685 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -22,7 +22,7 @@
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
@@ -56,96 +56,132 @@ static const u16 default_tid_to_tx_fifo[] = {
56 IWL_TX_FIFO_AC3 56 IWL_TX_FIFO_AC3
57}; 57};
58 58
59static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
60 struct iwl_dma_ptr *ptr, size_t size)
61{
62 ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
63 if (!ptr->addr)
64 return -ENOMEM;
65 ptr->size = size;
66 return 0;
67}
68
69static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
70 struct iwl_dma_ptr *ptr)
71{
72 if (unlikely(!ptr->addr))
73 return;
74
75 pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
76 memset(ptr, 0, sizeof(*ptr));
77}
78
79static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
80{
81 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
82
83 dma_addr_t addr = get_unaligned_le32(&tb->lo);
84 if (sizeof(dma_addr_t) > sizeof(u32))
85 addr |=
86 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
87
88 return addr;
89}
90
91static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
92{
93 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
94
95 return le16_to_cpu(tb->hi_n_len) >> 4;
96}
97
98static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
99 dma_addr_t addr, u16 len)
100{
101 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
102 u16 hi_n_len = len << 4;
103
104 put_unaligned_le32(addr, &tb->lo);
105 if (sizeof(dma_addr_t) > sizeof(u32))
106 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
107
108 tb->hi_n_len = cpu_to_le16(hi_n_len);
109
110 tfd->num_tbs = idx + 1;
111}
112
113static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
114{
115 return tfd->num_tbs & 0x1f;
116}
59 117
60/** 118/**
61 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 119 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
120 * @priv - driver private data
121 * @txq - tx queue
62 * 122 *
63 * Does NOT advance any TFD circular buffer read/write indexes 123 * Does NOT advance any TFD circular buffer read/write indexes
64 * Does NOT free the TFD itself (which is within circular buffer) 124 * Does NOT free the TFD itself (which is within circular buffer)
65 */ 125 */
66static int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) 126static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
67{ 127{
68 struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; 128 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
69 struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr]; 129 struct iwl_tfd *tfd;
70 struct pci_dev *dev = priv->pci_dev; 130 struct pci_dev *dev = priv->pci_dev;
131 int index = txq->q.read_ptr;
71 int i; 132 int i;
72 int counter = 0; 133 int num_tbs;
73 int index, is_odd;
74 134
75 /* Host command buffers stay mapped in memory, nothing to clean */ 135 tfd = &tfd_tmp[index];
76 if (txq->q.id == IWL_CMD_QUEUE_NUM)
77 return 0;
78 136
79 /* Sanity check on number of chunks */ 137 /* Sanity check on number of chunks */
80 counter = IWL_GET_BITS(*bd, num_tbs); 138 num_tbs = iwl_tfd_get_num_tbs(tfd);
81 if (counter > MAX_NUM_OF_TBS) { 139
82 IWL_ERROR("Too many chunks: %i\n", counter); 140 if (num_tbs >= IWL_NUM_OF_TBS) {
141 IWL_ERROR("Too many chunks: %i\n", num_tbs);
83 /* @todo issue fatal error, it is quite serious situation */ 142 /* @todo issue fatal error, it is quite serious situation */
84 return 0; 143 return;
85 } 144 }
86 145
87 /* Unmap chunks, if any. 146 /* Unmap tx_cmd */
88 * TFD info for odd chunks is different format than for even chunks. */ 147 if (num_tbs)
89 for (i = 0; i < counter; i++) { 148 pci_unmap_single(dev,
90 index = i / 2; 149 pci_unmap_addr(&txq->cmd[index]->meta, mapping),
91 is_odd = i & 0x1; 150 pci_unmap_len(&txq->cmd[index]->meta, len),
92
93 if (is_odd)
94 pci_unmap_single(
95 dev,
96 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
97 (IWL_GET_BITS(bd->pa[index],
98 tb2_addr_hi20) << 16),
99 IWL_GET_BITS(bd->pa[index], tb2_len),
100 PCI_DMA_TODEVICE); 151 PCI_DMA_TODEVICE);
101 152
102 else if (i > 0) 153 /* Unmap chunks, if any. */
103 pci_unmap_single(dev, 154 for (i = 1; i < num_tbs; i++) {
104 le32_to_cpu(bd->pa[index].tb1_addr), 155 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
105 IWL_GET_BITS(bd->pa[index], tb1_len), 156 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
106 PCI_DMA_TODEVICE);
107 157
108 /* Free SKB, if any, for this chunk */ 158 if (txq->txb) {
109 if (txq->txb[txq->q.read_ptr].skb[i]) { 159 dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
110 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i]; 160 txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
111
112 dev_kfree_skb(skb);
113 txq->txb[txq->q.read_ptr].skb[i] = NULL;
114 } 161 }
115 } 162 }
116 return 0;
117} 163}
118 164
119static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, 165static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
120 dma_addr_t addr, u16 len) 166 struct iwl_tfd *tfd,
167 dma_addr_t addr, u16 len)
121{ 168{
122 int index, is_odd; 169
123 struct iwl_tfd_frame *tfd = ptr; 170 u32 num_tbs = iwl_tfd_get_num_tbs(tfd);
124 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
125 171
126 /* Each TFD can point to a maximum 20 Tx buffers */ 172 /* Each TFD can point to a maximum 20 Tx buffers */
127 if (num_tbs >= MAX_NUM_OF_TBS) { 173 if (num_tbs >= IWL_NUM_OF_TBS) {
128 IWL_ERROR("Error can not send more than %d chunks\n", 174 IWL_ERROR("Error can not send more than %d chunks\n",
129 MAX_NUM_OF_TBS); 175 IWL_NUM_OF_TBS);
130 return -EINVAL; 176 return -EINVAL;
131 } 177 }
132 178
133 index = num_tbs / 2; 179 BUG_ON(addr & ~DMA_BIT_MASK(36));
134 is_odd = num_tbs & 0x1; 180 if (unlikely(addr & ~IWL_TX_DMA_MASK))
135 181 IWL_ERROR("Unaligned address = %llx\n",
136 if (!is_odd) { 182 (unsigned long long)addr);
137 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
138 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
139 iwl_get_dma_hi_address(addr));
140 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
141 } else {
142 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
143 (u32) (addr & 0xffff));
144 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
145 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
146 }
147 183
148 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); 184 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
149 185
150 return 0; 186 return 0;
151} 187}
@@ -210,7 +246,7 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
210 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 246 struct iwl_tx_queue *txq = &priv->txq[txq_id];
211 struct iwl_queue *q = &txq->q; 247 struct iwl_queue *q = &txq->q;
212 struct pci_dev *dev = priv->pci_dev; 248 struct pci_dev *dev = priv->pci_dev;
213 int i, slots_num, len; 249 int i, len;
214 250
215 if (q->n_bd == 0) 251 if (q->n_bd == 0)
216 return; 252 return;
@@ -221,21 +257,15 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
221 iwl_hw_txq_free_tfd(priv, txq); 257 iwl_hw_txq_free_tfd(priv, txq);
222 258
223 len = sizeof(struct iwl_cmd) * q->n_window; 259 len = sizeof(struct iwl_cmd) * q->n_window;
224 if (q->id == IWL_CMD_QUEUE_NUM)
225 len += IWL_MAX_SCAN_SIZE;
226 260
227 /* De-alloc array of command/tx buffers */ 261 /* De-alloc array of command/tx buffers */
228 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 262 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
229 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
230 for (i = 0; i < slots_num; i++)
231 kfree(txq->cmd[i]); 263 kfree(txq->cmd[i]);
232 if (txq_id == IWL_CMD_QUEUE_NUM)
233 kfree(txq->cmd[slots_num]);
234 264
235 /* De-alloc circular buffer of TFDs */ 265 /* De-alloc circular buffer of TFDs */
236 if (txq->q.n_bd) 266 if (txq->q.n_bd)
237 pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * 267 pci_free_consistent(dev, sizeof(struct iwl_tfd) *
238 txq->q.n_bd, txq->bd, txq->q.dma_addr); 268 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
239 269
240 /* De-alloc array of per-TFD driver data */ 270 /* De-alloc array of per-TFD driver data */
241 kfree(txq->txb); 271 kfree(txq->txb);
@@ -245,6 +275,40 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
245 memset(txq, 0, sizeof(*txq)); 275 memset(txq, 0, sizeof(*txq));
246} 276}
247 277
278
279/**
280 * iwl_cmd_queue_free - Deallocate DMA queue.
281 * @txq: Transmit queue to deallocate.
282 *
283 * Empty queue by removing and destroying all BD's.
284 * Free all buffers.
285 * 0-fill, but do not free "txq" descriptor structure.
286 */
287static void iwl_cmd_queue_free(struct iwl_priv *priv)
288{
289 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
290 struct iwl_queue *q = &txq->q;
291 struct pci_dev *dev = priv->pci_dev;
292 int i, len;
293
294 if (q->n_bd == 0)
295 return;
296
297 len = sizeof(struct iwl_cmd) * q->n_window;
298 len += IWL_MAX_SCAN_SIZE;
299
300 /* De-alloc array of command/tx buffers */
301 for (i = 0; i <= TFD_CMD_SLOTS; i++)
302 kfree(txq->cmd[i]);
303
304 /* De-alloc circular buffer of TFDs */
305 if (txq->q.n_bd)
306 pci_free_consistent(dev, sizeof(struct iwl_tfd) *
307 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
308
309 /* 0-fill queue descriptor structure */
310 memset(txq, 0, sizeof(*txq));
311}
248/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 312/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
249 * DMA services 313 * DMA services
250 * 314 *
@@ -340,13 +404,13 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
340 404
341 /* Circular buffer of transmit frame descriptors (TFDs), 405 /* Circular buffer of transmit frame descriptors (TFDs),
342 * shared with device */ 406 * shared with device */
343 txq->bd = pci_alloc_consistent(dev, 407 txq->tfds = pci_alloc_consistent(dev,
344 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX, 408 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX,
345 &txq->q.dma_addr); 409 &txq->q.dma_addr);
346 410
347 if (!txq->bd) { 411 if (!txq->tfds) {
348 IWL_ERROR("pci_alloc_consistent(%zd) failed\n", 412 IWL_ERROR("pci_alloc_consistent(%zd) failed\n",
349 sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX); 413 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX);
350 goto error; 414 goto error;
351 } 415 }
352 txq->q.id = id; 416 txq->q.id = id;
@@ -370,26 +434,21 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
370static int iwl_hw_tx_queue_init(struct iwl_priv *priv, 434static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
371 struct iwl_tx_queue *txq) 435 struct iwl_tx_queue *txq)
372{ 436{
373 int rc; 437 int ret;
374 unsigned long flags; 438 unsigned long flags;
375 int txq_id = txq->q.id; 439 int txq_id = txq->q.id;
376 440
377 spin_lock_irqsave(&priv->lock, flags); 441 spin_lock_irqsave(&priv->lock, flags);
378 rc = iwl_grab_nic_access(priv); 442 ret = iwl_grab_nic_access(priv);
379 if (rc) { 443 if (ret) {
380 spin_unlock_irqrestore(&priv->lock, flags); 444 spin_unlock_irqrestore(&priv->lock, flags);
381 return rc; 445 return ret;
382 } 446 }
383 447
384 /* Circular buffer (TFD queue in DRAM) physical base address */ 448 /* Circular buffer (TFD queue in DRAM) physical base address */
385 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), 449 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
386 txq->q.dma_addr >> 8); 450 txq->q.dma_addr >> 8);
387 451
388 /* Enable DMA channel, using same id as for TFD queue */
389 iwl_write_direct32(
390 priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
391 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
392 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
393 iwl_release_nic_access(priv); 452 iwl_release_nic_access(priv);
394 spin_unlock_irqrestore(&priv->lock, flags); 453 spin_unlock_irqrestore(&priv->lock, flags);
395 454
@@ -468,16 +527,20 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
468 527
469 /* Tx queues */ 528 /* Tx queues */
470 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 529 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
471 iwl_tx_queue_free(priv, txq_id); 530 if (txq_id == IWL_CMD_QUEUE_NUM)
531 iwl_cmd_queue_free(priv);
532 else
533 iwl_tx_queue_free(priv, txq_id);
472 534
473 /* Keep-warm buffer */ 535 iwl_free_dma_ptr(priv, &priv->kw);
474 iwl_kw_free(priv); 536
537 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
475} 538}
476EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 539EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
477 540
478/** 541/**
479 * iwl_txq_ctx_reset - Reset TX queue context 542 * iwl_txq_ctx_reset - Reset TX queue context
480 * Destroys all DMA structures and initialise them again 543 * Destroys all DMA structures and initialize them again
481 * 544 *
482 * @param priv 545 * @param priv
483 * @return error code 546 * @return error code
@@ -488,13 +551,17 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
488 int txq_id, slots_num; 551 int txq_id, slots_num;
489 unsigned long flags; 552 unsigned long flags;
490 553
491 iwl_kw_free(priv);
492
493 /* Free all tx/cmd queues and keep-warm buffer */ 554 /* Free all tx/cmd queues and keep-warm buffer */
494 iwl_hw_txq_ctx_free(priv); 555 iwl_hw_txq_ctx_free(priv);
495 556
557 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
558 priv->hw_params.scd_bc_tbls_size);
559 if (ret) {
560 IWL_ERROR("Scheduler BC Table allocation failed\n");
561 goto error_bc_tbls;
562 }
496 /* Alloc keep-warm buffer */ 563 /* Alloc keep-warm buffer */
497 ret = iwl_kw_alloc(priv); 564 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
498 if (ret) { 565 if (ret) {
499 IWL_ERROR("Keep Warm allocation failed\n"); 566 IWL_ERROR("Keep Warm allocation failed\n");
500 goto error_kw; 567 goto error_kw;
@@ -509,17 +576,12 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
509 /* Turn off all Tx DMA fifos */ 576 /* Turn off all Tx DMA fifos */
510 priv->cfg->ops->lib->txq_set_sched(priv, 0); 577 priv->cfg->ops->lib->txq_set_sched(priv, 0);
511 578
579 /* Tell NIC where to find the "keep warm" buffer */
580 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
581
512 iwl_release_nic_access(priv); 582 iwl_release_nic_access(priv);
513 spin_unlock_irqrestore(&priv->lock, flags); 583 spin_unlock_irqrestore(&priv->lock, flags);
514 584
515
516 /* Tell nic where to find the keep-warm buffer */
517 ret = iwl_kw_init(priv);
518 if (ret) {
519 IWL_ERROR("kw_init failed\n");
520 goto error_reset;
521 }
522
523 /* Alloc and init all Tx queues, including the command queue (#4) */ 585 /* Alloc and init all Tx queues, including the command queue (#4) */
524 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 586 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
525 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 587 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
@@ -537,8 +599,10 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
537 error: 599 error:
538 iwl_hw_txq_ctx_free(priv); 600 iwl_hw_txq_ctx_free(priv);
539 error_reset: 601 error_reset:
540 iwl_kw_free(priv); 602 iwl_free_dma_ptr(priv, &priv->kw);
541 error_kw: 603 error_kw:
604 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
605 error_bc_tbls:
542 return ret; 606 return ret;
543} 607}
544 608
@@ -547,11 +611,9 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
547 */ 611 */
548void iwl_txq_ctx_stop(struct iwl_priv *priv) 612void iwl_txq_ctx_stop(struct iwl_priv *priv)
549{ 613{
550 614 int ch;
551 int txq_id;
552 unsigned long flags; 615 unsigned long flags;
553 616
554
555 /* Turn off all Tx DMA fifos */ 617 /* Turn off all Tx DMA fifos */
556 spin_lock_irqsave(&priv->lock, flags); 618 spin_lock_irqsave(&priv->lock, flags);
557 if (iwl_grab_nic_access(priv)) { 619 if (iwl_grab_nic_access(priv)) {
@@ -562,12 +624,11 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv)
562 priv->cfg->ops->lib->txq_set_sched(priv, 0); 624 priv->cfg->ops->lib->txq_set_sched(priv, 0);
563 625
564 /* Stop each Tx DMA channel, and wait for it to be idle */ 626 /* Stop each Tx DMA channel, and wait for it to be idle */
565 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 627 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
566 iwl_write_direct32(priv, 628 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
567 FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
568 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, 629 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
569 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE 630 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
570 (txq_id), 200); 631 1000);
571 } 632 }
572 iwl_release_nic_access(priv); 633 iwl_release_nic_access(priv);
573 spin_unlock_irqrestore(&priv->lock, flags); 634 spin_unlock_irqrestore(&priv->lock, flags);
@@ -584,7 +645,7 @@ static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
584 struct iwl_tx_cmd *tx_cmd, 645 struct iwl_tx_cmd *tx_cmd,
585 struct ieee80211_tx_info *info, 646 struct ieee80211_tx_info *info,
586 struct ieee80211_hdr *hdr, 647 struct ieee80211_hdr *hdr,
587 int is_unicast, u8 std_id) 648 u8 std_id)
588{ 649{
589 __le16 fc = hdr->frame_control; 650 __le16 fc = hdr->frame_control;
590 __le32 tx_flags = tx_cmd->tx_flags; 651 __le32 tx_flags = tx_cmd->tx_flags;
@@ -647,11 +708,11 @@ static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
647 __le16 fc, int sta_id, 708 __le16 fc, int sta_id,
648 int is_hcca) 709 int is_hcca)
649{ 710{
711 u32 rate_flags = 0;
712 int rate_idx;
650 u8 rts_retry_limit = 0; 713 u8 rts_retry_limit = 0;
651 u8 data_retry_limit = 0; 714 u8 data_retry_limit = 0;
652 u8 rate_plcp; 715 u8 rate_plcp;
653 u16 rate_flags = 0;
654 int rate_idx;
655 716
656 rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff, 717 rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff,
657 IWL_RATE_COUNT - 1); 718 IWL_RATE_COUNT - 1);
@@ -694,14 +755,8 @@ static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
694 break; 755 break;
695 } 756 }
696 757
697 /* Alternate between antenna A and B for successive frames */ 758 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
698 if (priv->use_ant_b_for_management_frame) { 759 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
699 priv->use_ant_b_for_management_frame = 0;
700 rate_flags |= RATE_MCS_ANT_B_MSK;
701 } else {
702 priv->use_ant_b_for_management_frame = 1;
703 rate_flags |= RATE_MCS_ANT_A_MSK;
704 }
705 } 760 }
706 761
707 tx_cmd->rts_retry_limit = rts_retry_limit; 762 tx_cmd->rts_retry_limit = rts_retry_limit;
@@ -723,7 +778,7 @@ static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
723 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); 778 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
724 if (info->flags & IEEE80211_TX_CTL_AMPDU) 779 if (info->flags & IEEE80211_TX_CTL_AMPDU)
725 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; 780 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
726 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); 781 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
727 break; 782 break;
728 783
729 case ALG_TKIP: 784 case ALG_TKIP:
@@ -767,7 +822,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
767{ 822{
768 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
769 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 824 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
770 struct iwl_tfd_frame *tfd; 825 struct iwl_tfd *tfd;
771 struct iwl_tx_queue *txq; 826 struct iwl_tx_queue *txq;
772 struct iwl_queue *q; 827 struct iwl_queue *q;
773 struct iwl_cmd *out_cmd; 828 struct iwl_cmd *out_cmd;
@@ -776,10 +831,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
776 dma_addr_t phys_addr; 831 dma_addr_t phys_addr;
777 dma_addr_t txcmd_phys; 832 dma_addr_t txcmd_phys;
778 dma_addr_t scratch_phys; 833 dma_addr_t scratch_phys;
779 u16 len, idx, len_org; 834 u16 len, len_org;
780 u16 seq_number = 0; 835 u16 seq_number = 0;
781 __le16 fc; 836 __le16 fc;
782 u8 hdr_len, unicast; 837 u8 hdr_len;
783 u8 sta_id; 838 u8 sta_id;
784 u8 wait_write_ptr = 0; 839 u8 wait_write_ptr = 0;
785 u8 tid = 0; 840 u8 tid = 0;
@@ -799,8 +854,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
799 goto drop_unlock; 854 goto drop_unlock;
800 } 855 }
801 856
802 unicast = !is_multicast_ether_addr(hdr->addr1);
803
804 fc = hdr->frame_control; 857 fc = hdr->frame_control;
805 858
806#ifdef CONFIG_IWLWIFI_DEBUG 859#ifdef CONFIG_IWLWIFI_DEBUG
@@ -830,10 +883,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
830 /* Find (or create) index into station table for destination station */ 883 /* Find (or create) index into station table for destination station */
831 sta_id = iwl_get_sta_id(priv, hdr); 884 sta_id = iwl_get_sta_id(priv, hdr);
832 if (sta_id == IWL_INVALID_STATION) { 885 if (sta_id == IWL_INVALID_STATION) {
833 DECLARE_MAC_BUF(mac); 886 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
834 887 hdr->addr1);
835 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
836 print_mac(mac, hdr->addr1));
837 goto drop; 888 goto drop;
838 } 889 }
839 890
@@ -856,23 +907,22 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
856 priv->stations[sta_id].tid[tid].tfds_in_queue++; 907 priv->stations[sta_id].tid[tid].tfds_in_queue++;
857 } 908 }
858 909
859 /* Descriptor for chosen Tx queue */
860 txq = &priv->txq[txq_id]; 910 txq = &priv->txq[txq_id];
861 q = &txq->q; 911 q = &txq->q;
912 txq->swq_id = swq_id;
862 913
863 spin_lock_irqsave(&priv->lock, flags); 914 spin_lock_irqsave(&priv->lock, flags);
864 915
865 /* Set up first empty TFD within this queue's circular TFD buffer */ 916 /* Set up first empty TFD within this queue's circular TFD buffer */
866 tfd = &txq->bd[q->write_ptr]; 917 tfd = &txq->tfds[q->write_ptr];
867 memset(tfd, 0, sizeof(*tfd)); 918 memset(tfd, 0, sizeof(*tfd));
868 idx = get_cmd_index(q, q->write_ptr, 0);
869 919
870 /* Set up driver data for this TFD */ 920 /* Set up driver data for this TFD */
871 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 921 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
872 txq->txb[q->write_ptr].skb[0] = skb; 922 txq->txb[q->write_ptr].skb[0] = skb;
873 923
874 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 924 /* Set up first empty entry in queue's array of Tx/cmd buffers */
875 out_cmd = txq->cmd[idx]; 925 out_cmd = txq->cmd[q->write_ptr];
876 tx_cmd = &out_cmd->cmd.tx; 926 tx_cmd = &out_cmd->cmd.tx;
877 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 927 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
878 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); 928 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
@@ -912,12 +962,14 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
912 962
913 /* Physical address of this Tx command's header (not MAC header!), 963 /* Physical address of this Tx command's header (not MAC header!),
914 * within command buffer array. */ 964 * within command buffer array. */
915 txcmd_phys = pci_map_single(priv->pci_dev, out_cmd, 965 txcmd_phys = pci_map_single(priv->pci_dev,
916 sizeof(struct iwl_cmd), PCI_DMA_TODEVICE); 966 out_cmd, sizeof(struct iwl_cmd),
917 txcmd_phys += offsetof(struct iwl_cmd, hdr); 967 PCI_DMA_TODEVICE);
918 968 pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
969 pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
919 /* Add buffer containing Tx command and MAC(!) header to TFD's 970 /* Add buffer containing Tx command and MAC(!) header to TFD's
920 * first entry */ 971 * first entry */
972 txcmd_phys += offsetof(struct iwl_cmd, hdr);
921 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 973 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
922 974
923 if (info->control.hw_key) 975 if (info->control.hw_key)
@@ -940,7 +992,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
940 len = (u16)skb->len; 992 len = (u16)skb->len;
941 tx_cmd->len = cpu_to_le16(len); 993 tx_cmd->len = cpu_to_le16(len);
942 /* TODO need this for burst mode later on */ 994 /* TODO need this for burst mode later on */
943 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, unicast, sta_id); 995 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
944 996
945 /* set is_hcca to 0; it probably will never be implemented */ 997 /* set is_hcca to 0; it probably will never be implemented */
946 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0); 998 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
@@ -950,7 +1002,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
950 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 1002 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
951 offsetof(struct iwl_tx_cmd, scratch); 1003 offsetof(struct iwl_tx_cmd, scratch);
952 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1004 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
953 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys); 1005 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
954 1006
955 if (!ieee80211_has_morefrags(hdr->frame_control)) { 1007 if (!ieee80211_has_morefrags(hdr->frame_control)) {
956 txq->need_update = 1; 1008 txq->need_update = 1;
@@ -983,7 +1035,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
983 iwl_txq_update_write_ptr(priv, txq); 1035 iwl_txq_update_write_ptr(priv, txq);
984 spin_unlock_irqrestore(&priv->lock, flags); 1036 spin_unlock_irqrestore(&priv->lock, flags);
985 } else { 1037 } else {
986 ieee80211_stop_queue(priv->hw, swq_id); 1038 ieee80211_stop_queue(priv->hw, txq->swq_id);
987 } 1039 }
988 } 1040 }
989 1041
@@ -1011,7 +1063,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1011{ 1063{
1012 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 1064 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1013 struct iwl_queue *q = &txq->q; 1065 struct iwl_queue *q = &txq->q;
1014 struct iwl_tfd_frame *tfd; 1066 struct iwl_tfd *tfd;
1015 struct iwl_cmd *out_cmd; 1067 struct iwl_cmd *out_cmd;
1016 dma_addr_t phys_addr; 1068 dma_addr_t phys_addr;
1017 unsigned long flags; 1069 unsigned long flags;
@@ -1040,7 +1092,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1040 1092
1041 spin_lock_irqsave(&priv->hcmd_lock, flags); 1093 spin_lock_irqsave(&priv->hcmd_lock, flags);
1042 1094
1043 tfd = &txq->bd[q->write_ptr]; 1095 tfd = &txq->tfds[q->write_ptr];
1044 memset(tfd, 0, sizeof(*tfd)); 1096 memset(tfd, 0, sizeof(*tfd));
1045 1097
1046 1098
@@ -1061,9 +1113,13 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1061 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 1113 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
1062 len = (idx == TFD_CMD_SLOTS) ? 1114 len = (idx == TFD_CMD_SLOTS) ?
1063 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd); 1115 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
1064 phys_addr = pci_map_single(priv->pci_dev, out_cmd, len, 1116
1065 PCI_DMA_TODEVICE); 1117 phys_addr = pci_map_single(priv->pci_dev, out_cmd,
1118 len, PCI_DMA_TODEVICE);
1119 pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
1120 pci_unmap_len_set(&out_cmd->meta, len, len);
1066 phys_addr += offsetof(struct iwl_cmd, hdr); 1121 phys_addr += offsetof(struct iwl_cmd, hdr);
1122
1067 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 1123 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1068 1124
1069#ifdef CONFIG_IWLWIFI_DEBUG 1125#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1113,8 +1169,9 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1113 return 0; 1169 return 0;
1114 } 1170 }
1115 1171
1116 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; 1172 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1117 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1173 q->read_ptr != index;
1174 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1118 1175
1119 tx_info = &txq->txb[txq->q.read_ptr]; 1176 tx_info = &txq->txb[txq->q.read_ptr];
1120 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); 1177 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
@@ -1138,44 +1195,34 @@ EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1138 * need to be reclaimed. As result, some free space forms. If there is 1195 * need to be reclaimed. As result, some free space forms. If there is
1139 * enough free space (> low mark), wake the stack that feeds us. 1196 * enough free space (> low mark), wake the stack that feeds us.
1140 */ 1197 */
1141static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) 1198static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1199 int idx, int cmd_idx)
1142{ 1200{
1143 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 1201 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1144 struct iwl_queue *q = &txq->q; 1202 struct iwl_queue *q = &txq->q;
1145 struct iwl_tfd_frame *bd = &txq->bd[index];
1146 dma_addr_t dma_addr;
1147 int is_odd, buf_len;
1148 int nfreed = 0; 1203 int nfreed = 0;
1149 1204
1150 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1205 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
1151 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " 1206 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
1152 "is out of range [0-%d] %d %d.\n", txq_id, 1207 "is out of range [0-%d] %d %d.\n", txq_id,
1153 index, q->n_bd, q->write_ptr, q->read_ptr); 1208 idx, q->n_bd, q->write_ptr, q->read_ptr);
1154 return; 1209 return;
1155 } 1210 }
1156 1211
1157 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; 1212 pci_unmap_single(priv->pci_dev,
1158 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1213 pci_unmap_addr(&txq->cmd[cmd_idx]->meta, mapping),
1214 pci_unmap_len(&txq->cmd[cmd_idx]->meta, len),
1215 PCI_DMA_TODEVICE);
1216
1217 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1218 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1159 1219
1160 if (nfreed > 1) { 1220 if (nfreed++ > 0) {
1161 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, 1221 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", idx,
1162 q->write_ptr, q->read_ptr); 1222 q->write_ptr, q->read_ptr);
1163 queue_work(priv->workqueue, &priv->restart); 1223 queue_work(priv->workqueue, &priv->restart);
1164 } 1224 }
1165 is_odd = (index/2) & 0x1;
1166 if (is_odd) {
1167 dma_addr = IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1168 (IWL_GET_BITS(bd->pa[index],
1169 tb2_addr_hi20) << 16);
1170 buf_len = IWL_GET_BITS(bd->pa[index], tb2_len);
1171 } else {
1172 dma_addr = le32_to_cpu(bd->pa[index].tb1_addr);
1173 buf_len = IWL_GET_BITS(bd->pa[index], tb1_len);
1174 }
1175 1225
1176 pci_unmap_single(priv->pci_dev, dma_addr, buf_len,
1177 PCI_DMA_TODEVICE);
1178 nfreed++;
1179 } 1226 }
1180} 1227}
1181 1228
@@ -1201,8 +1248,13 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1201 * command queue then there a command routing bug has been introduced 1248 * command queue then there a command routing bug has been introduced
1202 * in the queue management code. */ 1249 * in the queue management code. */
1203 if (WARN(txq_id != IWL_CMD_QUEUE_NUM, 1250 if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
1204 "wrong command queue %d, command id 0x%X\n", txq_id, pkt->hdr.cmd)) 1251 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
1252 txq_id, sequence,
1253 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
1254 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
1255 iwl_print_hex_dump(priv, IWL_DL_INFO , rxb, 32);
1205 return; 1256 return;
1257 }
1206 1258
1207 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); 1259 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1208 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1260 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
@@ -1215,7 +1267,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1215 !cmd->meta.u.callback(priv, cmd, rxb->skb)) 1267 !cmd->meta.u.callback(priv, cmd, rxb->skb))
1216 rxb->skb = NULL; 1268 rxb->skb = NULL;
1217 1269
1218 iwl_hcmd_queue_reclaim(priv, txq_id, index); 1270 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
1219 1271
1220 if (!(cmd->meta.flags & CMD_ASYNC)) { 1272 if (!(cmd->meta.flags & CMD_ASYNC)) {
1221 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1273 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
@@ -1248,15 +1300,14 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1248 int ret; 1300 int ret;
1249 unsigned long flags; 1301 unsigned long flags;
1250 struct iwl_tid_data *tid_data; 1302 struct iwl_tid_data *tid_data;
1251 DECLARE_MAC_BUF(mac);
1252 1303
1253 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 1304 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1254 tx_fifo = default_tid_to_tx_fifo[tid]; 1305 tx_fifo = default_tid_to_tx_fifo[tid];
1255 else 1306 else
1256 return -EINVAL; 1307 return -EINVAL;
1257 1308
1258 IWL_WARNING("%s on ra = %s tid = %d\n", 1309 IWL_WARNING("%s on ra = %pM tid = %d\n",
1259 __func__, print_mac(mac, ra), tid); 1310 __func__, ra, tid);
1260 1311
1261 sta_id = iwl_find_station(priv, ra); 1312 sta_id = iwl_find_station(priv, ra);
1262 if (sta_id == IWL_INVALID_STATION) 1313 if (sta_id == IWL_INVALID_STATION)
@@ -1301,7 +1352,6 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1301 struct iwl_tid_data *tid_data; 1352 struct iwl_tid_data *tid_data;
1302 int ret, write_ptr, read_ptr; 1353 int ret, write_ptr, read_ptr;
1303 unsigned long flags; 1354 unsigned long flags;
1304 DECLARE_MAC_BUF(mac);
1305 1355
1306 if (!ra) { 1356 if (!ra) {
1307 IWL_ERROR("ra = NULL\n"); 1357 IWL_ERROR("ra = NULL\n");
@@ -1362,8 +1412,8 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1362 case IWL_EMPTYING_HW_QUEUE_DELBA: 1412 case IWL_EMPTYING_HW_QUEUE_DELBA:
1363 /* We are reclaiming the last packet of the */ 1413 /* We are reclaiming the last packet of the */
1364 /* aggregated HW queue */ 1414 /* aggregated HW queue */
1365 if (txq_id == tid_data->agg.txq_id && 1415 if ((txq_id == tid_data->agg.txq_id) &&
1366 q->read_ptr == q->write_ptr) { 1416 (q->read_ptr == q->write_ptr)) {
1367 u16 ssn = SEQ_TO_SN(tid_data->seq_number); 1417 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1368 int tx_fifo = default_tid_to_tx_fifo[tid]; 1418 int tx_fifo = default_tid_to_tx_fifo[tid];
1369 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n"); 1419 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
@@ -1414,7 +1464,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1414 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); 1464 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1415 1465
1416 /* Calculate shift to align block-ack bits with our Tx window bits */ 1466 /* Calculate shift to align block-ack bits with our Tx window bits */
1417 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4); 1467 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1418 if (sh < 0) /* tbw something is wrong with indices */ 1468 if (sh < 0) /* tbw something is wrong with indices */
1419 sh += 0x100; 1469 sh += 0x100;
1420 1470
@@ -1436,7 +1486,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1436 ack = bitmap & (1ULL << i); 1486 ack = bitmap & (1ULL << i);
1437 successes += !!ack; 1487 successes += !!ack;
1438 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", 1488 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
1439 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff, 1489 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1440 agg->start_idx + i); 1490 agg->start_idx + i);
1441 } 1491 }
1442 1492
@@ -1464,10 +1514,11 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1464{ 1514{
1465 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1515 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
1466 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; 1516 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1467 int index;
1468 struct iwl_tx_queue *txq = NULL; 1517 struct iwl_tx_queue *txq = NULL;
1469 struct iwl_ht_agg *agg; 1518 struct iwl_ht_agg *agg;
1470 DECLARE_MAC_BUF(mac); 1519 int index;
1520 int sta_id;
1521 int tid;
1471 1522
1472 /* "flow" corresponds to Tx queue */ 1523 /* "flow" corresponds to Tx queue */
1473 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 1524 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
@@ -1482,17 +1533,19 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1482 } 1533 }
1483 1534
1484 txq = &priv->txq[scd_flow]; 1535 txq = &priv->txq[scd_flow];
1485 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg; 1536 sta_id = ba_resp->sta_id;
1537 tid = ba_resp->tid;
1538 agg = &priv->stations[sta_id].tid[tid].agg;
1486 1539
1487 /* Find index just before block-ack window */ 1540 /* Find index just before block-ack window */
1488 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); 1541 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1489 1542
1490 /* TODO: Need to get this copy more safely - now good for debug */ 1543 /* TODO: Need to get this copy more safely - now good for debug */
1491 1544
1492 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, " 1545 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d] Received from %pM, "
1493 "sta_id = %d\n", 1546 "sta_id = %d\n",
1494 agg->wait_for_ba, 1547 agg->wait_for_ba,
1495 print_mac(mac, (u8 *) &ba_resp->sta_addr_lo32), 1548 (u8 *) &ba_resp->sta_addr_lo32,
1496 ba_resp->sta_id); 1549 ba_resp->sta_id);
1497 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " 1550 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1498 "%d, scd_ssn = %d\n", 1551 "%d, scd_ssn = %d\n",
@@ -1513,18 +1566,15 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1513 * transmitted ... if not, it's too late anyway). */ 1566 * transmitted ... if not, it's too late anyway). */
1514 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { 1567 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1515 /* calculate mac80211 ampdu sw queue to wake */ 1568 /* calculate mac80211 ampdu sw queue to wake */
1516 int ampdu_q =
1517 scd_flow - priv->hw_params.first_ampdu_q + priv->hw->queues;
1518 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); 1569 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1519 priv->stations[ba_resp->sta_id]. 1570 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1520 tid[ba_resp->tid].tfds_in_queue -= freed; 1571
1521 if (iwl_queue_space(&txq->q) > txq->q.low_mark && 1572 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1522 priv->mac80211_registered && 1573 priv->mac80211_registered &&
1523 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA) 1574 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1524 ieee80211_wake_queue(priv->hw, ampdu_q); 1575 ieee80211_wake_queue(priv->hw, txq->swq_id);
1525 1576
1526 iwl_txq_check_empty(priv, ba_resp->sta_id, 1577 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
1527 ba_resp->tid, scd_flow);
1528 } 1578 }
1529} 1579}
1530EXPORT_SYMBOL(iwl_rx_reply_compressed_ba); 1580EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 45a6b0c35695..d64580805d6e 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -22,7 +22,7 @@
22 * file called LICENSE. 22 * file called LICENSE.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
@@ -41,6 +41,7 @@
41#include <linux/if_arp.h> 41#include <linux/if_arp.h>
42 42
43#include <net/ieee80211_radiotap.h> 43#include <net/ieee80211_radiotap.h>
44#include <net/lib80211.h>
44#include <net/mac80211.h> 45#include <net/mac80211.h>
45 46
46#include <asm/div64.h> 47#include <asm/div64.h>
@@ -64,11 +65,10 @@ static int iwl3945_tx_queue_update_write_ptr(struct iwl3945_priv *priv,
64 65
65/* module parameters */ 66/* module parameters */
66static int iwl3945_param_disable_hw_scan; /* def: 0 = use 3945's h/w scan */ 67static int iwl3945_param_disable_hw_scan; /* def: 0 = use 3945's h/w scan */
67static int iwl3945_param_debug; /* def: 0 = minimal debug log messages */ 68static u32 iwl3945_param_debug; /* def: 0 = minimal debug log messages */
68static int iwl3945_param_disable; /* def: 0 = enable radio */ 69static int iwl3945_param_disable; /* def: 0 = enable radio */
69static int iwl3945_param_antenna; /* def: 0 = both antennas (use diversity) */ 70static int iwl3945_param_antenna; /* def: 0 = both antennas (use diversity) */
70int iwl3945_param_hwcrypto; /* def: 0 = use software encryption */ 71int iwl3945_param_hwcrypto; /* def: 0 = use software encryption */
71static int iwl3945_param_qos_enable = 1; /* def: 1 = use quality of service */
72int iwl3945_param_queues_num = IWL39_MAX_NUM_QUEUES; /* def: 8 Tx queues */ 72int iwl3945_param_queues_num = IWL39_MAX_NUM_QUEUES; /* def: 8 Tx queues */
73 73
74/* 74/*
@@ -93,12 +93,13 @@ int iwl3945_param_queues_num = IWL39_MAX_NUM_QUEUES; /* def: 8 Tx queues */
93 93
94#define IWLWIFI_VERSION "1.2.26k" VD VS 94#define IWLWIFI_VERSION "1.2.26k" VD VS
95#define DRV_COPYRIGHT "Copyright(c) 2003-2008 Intel Corporation" 95#define DRV_COPYRIGHT "Copyright(c) 2003-2008 Intel Corporation"
96#define DRV_AUTHOR "<ilw@linux.intel.com>"
96#define DRV_VERSION IWLWIFI_VERSION 97#define DRV_VERSION IWLWIFI_VERSION
97 98
98 99
99MODULE_DESCRIPTION(DRV_DESCRIPTION); 100MODULE_DESCRIPTION(DRV_DESCRIPTION);
100MODULE_VERSION(DRV_VERSION); 101MODULE_VERSION(DRV_VERSION);
101MODULE_AUTHOR(DRV_COPYRIGHT); 102MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
102MODULE_LICENSE("GPL"); 103MODULE_LICENSE("GPL");
103 104
104static const struct ieee80211_supported_band *iwl3945_get_band( 105static const struct ieee80211_supported_band *iwl3945_get_band(
@@ -107,46 +108,6 @@ static const struct ieee80211_supported_band *iwl3945_get_band(
107 return priv->hw->wiphy->bands[band]; 108 return priv->hw->wiphy->bands[band];
108} 109}
109 110
110static int iwl3945_is_empty_essid(const char *essid, int essid_len)
111{
112 /* Single white space is for Linksys APs */
113 if (essid_len == 1 && essid[0] == ' ')
114 return 1;
115
116 /* Otherwise, if the entire essid is 0, we assume it is hidden */
117 while (essid_len) {
118 essid_len--;
119 if (essid[essid_len] != '\0')
120 return 0;
121 }
122
123 return 1;
124}
125
126static const char *iwl3945_escape_essid(const char *essid, u8 essid_len)
127{
128 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
129 const char *s = essid;
130 char *d = escaped;
131
132 if (iwl3945_is_empty_essid(essid, essid_len)) {
133 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
134 return escaped;
135 }
136
137 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
138 while (essid_len--) {
139 if (*s == '\0') {
140 *d++ = '\\';
141 *d++ = '0';
142 s++;
143 } else
144 *d++ = *s++;
145 }
146 *d = '\0';
147 return escaped;
148}
149
150/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 111/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
151 * DMA services 112 * DMA services
152 * 113 *
@@ -446,7 +407,6 @@ u8 iwl3945_add_station(struct iwl3945_priv *priv, const u8 *addr, int is_ap, u8
446 int index = IWL_INVALID_STATION; 407 int index = IWL_INVALID_STATION;
447 struct iwl3945_station_entry *station; 408 struct iwl3945_station_entry *station;
448 unsigned long flags_spin; 409 unsigned long flags_spin;
449 DECLARE_MAC_BUF(mac);
450 u8 rate; 410 u8 rate;
451 411
452 spin_lock_irqsave(&priv->sta_lock, flags_spin); 412 spin_lock_irqsave(&priv->sta_lock, flags_spin);
@@ -480,7 +440,7 @@ u8 iwl3945_add_station(struct iwl3945_priv *priv, const u8 *addr, int is_ap, u8
480 return index; 440 return index;
481 } 441 }
482 442
483 IWL_DEBUG_ASSOC("Add STA ID %d: %s\n", index, print_mac(mac, addr)); 443 IWL_DEBUG_ASSOC("Add STA ID %d: %pM\n", index, addr);
484 station = &priv->stations[index]; 444 station = &priv->stations[index];
485 station->used = 1; 445 station->used = 1;
486 priv->num_stations++; 446 priv->num_stations++;
@@ -559,7 +519,7 @@ static inline int iwl3945_is_ready_rf(struct iwl3945_priv *priv)
559 519
560/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 520/*************** HOST COMMAND QUEUE FUNCTIONS *****/
561 521
562#define IWL_CMD(x) case x : return #x 522#define IWL_CMD(x) case x: return #x
563 523
564static const char *get_cmd_string(u8 cmd) 524static const char *get_cmd_string(u8 cmd)
565{ 525{
@@ -1063,7 +1023,6 @@ static int iwl3945_commit_rxon(struct iwl3945_priv *priv)
1063 /* cast away the const for active_rxon in this function */ 1023 /* cast away the const for active_rxon in this function */
1064 struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 1024 struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
1065 int rc = 0; 1025 int rc = 0;
1066 DECLARE_MAC_BUF(mac);
1067 1026
1068 if (!iwl3945_is_alive(priv)) 1027 if (!iwl3945_is_alive(priv))
1069 return -1; 1028 return -1;
@@ -1124,11 +1083,11 @@ static int iwl3945_commit_rxon(struct iwl3945_priv *priv)
1124 IWL_DEBUG_INFO("Sending RXON\n" 1083 IWL_DEBUG_INFO("Sending RXON\n"
1125 "* with%s RXON_FILTER_ASSOC_MSK\n" 1084 "* with%s RXON_FILTER_ASSOC_MSK\n"
1126 "* channel = %d\n" 1085 "* channel = %d\n"
1127 "* bssid = %s\n", 1086 "* bssid = %pM\n",
1128 ((priv->staging_rxon.filter_flags & 1087 ((priv->staging_rxon.filter_flags &
1129 RXON_FILTER_ASSOC_MSK) ? "" : "out"), 1088 RXON_FILTER_ASSOC_MSK) ? "" : "out"),
1130 le16_to_cpu(priv->staging_rxon.channel), 1089 le16_to_cpu(priv->staging_rxon.channel),
1131 print_mac(mac, priv->staging_rxon.bssid_addr)); 1090 priv->staging_rxon.bssid_addr);
1132 1091
1133 /* Apply the new configuration */ 1092 /* Apply the new configuration */
1134 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON, 1093 rc = iwl3945_send_cmd_pdu(priv, REPLY_RXON,
@@ -1443,7 +1402,7 @@ static void iwl3945_free_frame(struct iwl3945_priv *priv, struct iwl3945_frame *
1443 1402
1444unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv, 1403unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv,
1445 struct ieee80211_hdr *hdr, 1404 struct ieee80211_hdr *hdr,
1446 const u8 *dest, int left) 1405 int left)
1447{ 1406{
1448 1407
1449 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon || 1408 if (!iwl3945_is_associated(priv) || !priv->ibss_beacon ||
@@ -1459,9 +1418,16 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl3945_priv *priv,
1459 return priv->ibss_beacon->len; 1418 return priv->ibss_beacon->len;
1460} 1419}
1461 1420
1462static u8 iwl3945_rate_get_lowest_plcp(int rate_mask) 1421static u8 iwl3945_rate_get_lowest_plcp(struct iwl3945_priv *priv)
1463{ 1422{
1464 u8 i; 1423 u8 i;
1424 int rate_mask;
1425
1426 /* Set rate mask*/
1427 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
1428 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
1429 else
1430 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
1465 1431
1466 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; 1432 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
1467 i = iwl3945_rates[i].next_ieee) { 1433 i = iwl3945_rates[i].next_ieee) {
@@ -1469,7 +1435,11 @@ static u8 iwl3945_rate_get_lowest_plcp(int rate_mask)
1469 return iwl3945_rates[i].plcp; 1435 return iwl3945_rates[i].plcp;
1470 } 1436 }
1471 1437
1472 return IWL_RATE_INVALID; 1438 /* No valid rate was found. Assign the lowest one */
1439 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
1440 return IWL_RATE_1M_PLCP;
1441 else
1442 return IWL_RATE_6M_PLCP;
1473} 1443}
1474 1444
1475static int iwl3945_send_beacon_cmd(struct iwl3945_priv *priv) 1445static int iwl3945_send_beacon_cmd(struct iwl3945_priv *priv)
@@ -1487,16 +1457,7 @@ static int iwl3945_send_beacon_cmd(struct iwl3945_priv *priv)
1487 return -ENOMEM; 1457 return -ENOMEM;
1488 } 1458 }
1489 1459
1490 if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) { 1460 rate = iwl3945_rate_get_lowest_plcp(priv);
1491 rate = iwl3945_rate_get_lowest_plcp(priv->active_rate_basic &
1492 0xFF0);
1493 if (rate == IWL_INVALID_RATE)
1494 rate = IWL_RATE_6M_PLCP;
1495 } else {
1496 rate = iwl3945_rate_get_lowest_plcp(priv->active_rate_basic & 0xF);
1497 if (rate == IWL_INVALID_RATE)
1498 rate = IWL_RATE_1M_PLCP;
1499 }
1500 1461
1501 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); 1462 frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
1502 1463
@@ -1544,10 +1505,8 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv)
1544{ 1505{
1545 u16 *e = (u16 *)&priv->eeprom; 1506 u16 *e = (u16 *)&priv->eeprom;
1546 u32 gp = iwl3945_read32(priv, CSR_EEPROM_GP); 1507 u32 gp = iwl3945_read32(priv, CSR_EEPROM_GP);
1547 u32 r;
1548 int sz = sizeof(priv->eeprom); 1508 int sz = sizeof(priv->eeprom);
1549 int rc; 1509 int ret;
1550 int i;
1551 u16 addr; 1510 u16 addr;
1552 1511
1553 /* The EEPROM structure has several padding buffers within it 1512 /* The EEPROM structure has several padding buffers within it
@@ -1562,29 +1521,28 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv)
1562 } 1521 }
1563 1522
1564 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 1523 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
1565 rc = iwl3945_eeprom_acquire_semaphore(priv); 1524 ret = iwl3945_eeprom_acquire_semaphore(priv);
1566 if (rc < 0) { 1525 if (ret < 0) {
1567 IWL_ERROR("Failed to acquire EEPROM semaphore.\n"); 1526 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
1568 return -ENOENT; 1527 return -ENOENT;
1569 } 1528 }
1570 1529
1571 /* eeprom is an array of 16bit values */ 1530 /* eeprom is an array of 16bit values */
1572 for (addr = 0; addr < sz; addr += sizeof(u16)) { 1531 for (addr = 0; addr < sz; addr += sizeof(u16)) {
1573 _iwl3945_write32(priv, CSR_EEPROM_REG, addr << 1); 1532 u32 r;
1574 _iwl3945_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1575
1576 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1577 i += IWL_EEPROM_ACCESS_DELAY) {
1578 r = _iwl3945_read_direct32(priv, CSR_EEPROM_REG);
1579 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1580 break;
1581 udelay(IWL_EEPROM_ACCESS_DELAY);
1582 }
1583 1533
1584 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { 1534 _iwl3945_write32(priv, CSR_EEPROM_REG,
1535 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
1536 _iwl3945_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1537 ret = iwl3945_poll_direct_bit(priv, CSR_EEPROM_REG,
1538 CSR_EEPROM_REG_READ_VALID_MSK,
1539 IWL_EEPROM_ACCESS_TIMEOUT);
1540 if (ret < 0) {
1585 IWL_ERROR("Time out reading EEPROM[%d]\n", addr); 1541 IWL_ERROR("Time out reading EEPROM[%d]\n", addr);
1586 return -ETIMEDOUT; 1542 return ret;
1587 } 1543 }
1544
1545 r = _iwl3945_read_direct32(priv, CSR_EEPROM_REG);
1588 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16)); 1546 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
1589 } 1547 }
1590 1548
@@ -1634,7 +1592,7 @@ static u16 iwl3945_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1634 */ 1592 */
1635static u16 iwl3945_fill_probe_req(struct iwl3945_priv *priv, 1593static u16 iwl3945_fill_probe_req(struct iwl3945_priv *priv,
1636 struct ieee80211_mgmt *frame, 1594 struct ieee80211_mgmt *frame,
1637 int left, int is_direct) 1595 int left)
1638{ 1596{
1639 int len = 0; 1597 int len = 0;
1640 u8 *pos = NULL; 1598 u8 *pos = NULL;
@@ -1664,20 +1622,6 @@ static u16 iwl3945_fill_probe_req(struct iwl3945_priv *priv,
1664 *pos++ = WLAN_EID_SSID; 1622 *pos++ = WLAN_EID_SSID;
1665 *pos++ = 0; 1623 *pos++ = 0;
1666 1624
1667 /* fill in our direct SSID IE... */
1668 if (is_direct) {
1669 /* ...next IE... */
1670 left -= 2 + priv->essid_len;
1671 if (left < 0)
1672 return 0;
1673 /* ... fill it in... */
1674 *pos++ = WLAN_EID_SSID;
1675 *pos++ = priv->essid_len;
1676 memcpy(pos, priv->essid, priv->essid_len);
1677 pos += priv->essid_len;
1678 len += 2 + priv->essid_len;
1679 }
1680
1681 /* fill in supported rate */ 1625 /* fill in supported rate */
1682 /* ...next IE... */ 1626 /* ...next IE... */
1683 left -= 2; 1627 left -= 2;
@@ -1746,17 +1690,21 @@ static void iwl3945_reset_qos(struct iwl3945_priv *priv)
1746 spin_lock_irqsave(&priv->lock, flags); 1690 spin_lock_irqsave(&priv->lock, flags);
1747 priv->qos_data.qos_active = 0; 1691 priv->qos_data.qos_active = 0;
1748 1692
1749 if (priv->iw_mode == NL80211_IFTYPE_ADHOC) { 1693 /* QoS always active in AP and ADHOC mode
1750 if (priv->qos_data.qos_enable) 1694 * In STA mode wait for association
1751 priv->qos_data.qos_active = 1; 1695 */
1752 if (!(priv->active_rate & 0xfff0)) { 1696 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
1753 cw_min = 31; 1697 priv->iw_mode == NL80211_IFTYPE_AP)
1754 is_legacy = 1; 1698 priv->qos_data.qos_active = 1;
1755 } 1699 else
1756 } else if (priv->iw_mode == NL80211_IFTYPE_AP) { 1700 priv->qos_data.qos_active = 0;
1757 if (priv->qos_data.qos_enable) 1701
1758 priv->qos_data.qos_active = 1; 1702
1759 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { 1703 /* check for legacy mode */
1704 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
1705 (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
1706 (priv->iw_mode == NL80211_IFTYPE_STATION &&
1707 (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
1760 cw_min = 31; 1708 cw_min = 31;
1761 is_legacy = 1; 1709 is_legacy = 1;
1762 } 1710 }
@@ -1828,9 +1776,6 @@ static void iwl3945_activate_qos(struct iwl3945_priv *priv, u8 force)
1828 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1776 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1829 return; 1777 return;
1830 1778
1831 if (!priv->qos_data.qos_enable)
1832 return;
1833
1834 spin_lock_irqsave(&priv->lock, flags); 1779 spin_lock_irqsave(&priv->lock, flags);
1835 priv->qos_data.def_qos_parm.qos_flags = 0; 1780 priv->qos_data.def_qos_parm.qos_flags = 0;
1836 1781
@@ -1846,7 +1791,7 @@ static void iwl3945_activate_qos(struct iwl3945_priv *priv, u8 force)
1846 spin_unlock_irqrestore(&priv->lock, flags); 1791 spin_unlock_irqrestore(&priv->lock, flags);
1847 1792
1848 if (force || iwl3945_is_associated(priv)) { 1793 if (force || iwl3945_is_associated(priv)) {
1849 IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n", 1794 IWL_DEBUG_QOS("send QoS cmd with QoS active %d \n",
1850 priv->qos_data.qos_active); 1795 priv->qos_data.qos_active);
1851 1796
1852 iwl3945_send_qos_params_command(priv, 1797 iwl3945_send_qos_params_command(priv,
@@ -1870,7 +1815,7 @@ static void iwl3945_activate_qos(struct iwl3945_priv *priv, u8 force)
1870 1815
1871 1816
1872/* default power management (not Tx power) table values */ 1817/* default power management (not Tx power) table values */
1873/* for tim 0-10 */ 1818/* for TIM 0-10 */
1874static struct iwl3945_power_vec_entry range_0[IWL_POWER_AC] = { 1819static struct iwl3945_power_vec_entry range_0[IWL_POWER_AC] = {
1875 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, 1820 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1876 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, 1821 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
@@ -1880,7 +1825,7 @@ static struct iwl3945_power_vec_entry range_0[IWL_POWER_AC] = {
1880 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1} 1825 {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1}
1881}; 1826};
1882 1827
1883/* for tim > 10 */ 1828/* for TIM > 10 */
1884static struct iwl3945_power_vec_entry range_1[IWL_POWER_AC] = { 1829static struct iwl3945_power_vec_entry range_1[IWL_POWER_AC] = {
1885 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, 1830 {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0},
1886 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), 1831 {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500),
@@ -2156,11 +2101,6 @@ static void iwl3945_setup_rxon_timing(struct iwl3945_priv *priv)
2156 2101
2157static int iwl3945_scan_initiate(struct iwl3945_priv *priv) 2102static int iwl3945_scan_initiate(struct iwl3945_priv *priv)
2158{ 2103{
2159 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2160 IWL_ERROR("APs don't scan.\n");
2161 return 0;
2162 }
2163
2164 if (!iwl3945_is_ready_rf(priv)) { 2104 if (!iwl3945_is_ready_rf(priv)) {
2165 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); 2105 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2166 return -EIO; 2106 return -EIO;
@@ -2230,13 +2170,14 @@ static void iwl3945_set_flags_for_phymode(struct iwl3945_priv *priv,
2230/* 2170/*
2231 * initialize rxon structure with default values from eeprom 2171 * initialize rxon structure with default values from eeprom
2232 */ 2172 */
2233static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv) 2173static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv,
2174 int mode)
2234{ 2175{
2235 const struct iwl3945_channel_info *ch_info; 2176 const struct iwl3945_channel_info *ch_info;
2236 2177
2237 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); 2178 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2238 2179
2239 switch (priv->iw_mode) { 2180 switch (mode) {
2240 case NL80211_IFTYPE_AP: 2181 case NL80211_IFTYPE_AP:
2241 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; 2182 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
2242 break; 2183 break;
@@ -2259,7 +2200,7 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2259 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 2200 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2260 break; 2201 break;
2261 default: 2202 default:
2262 IWL_ERROR("Unsupported interface type %d\n", priv->iw_mode); 2203 IWL_ERROR("Unsupported interface type %d\n", mode);
2263 break; 2204 break;
2264 } 2205 }
2265 2206
@@ -2282,8 +2223,7 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2282 * in some case A channels are all non IBSS 2223 * in some case A channels are all non IBSS
2283 * in this case force B/G channel 2224 * in this case force B/G channel
2284 */ 2225 */
2285 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) && 2226 if ((mode == NL80211_IFTYPE_ADHOC) && !(is_channel_ibss(ch_info)))
2286 !(is_channel_ibss(ch_info)))
2287 ch_info = &priv->channel_info[0]; 2227 ch_info = &priv->channel_info[0];
2288 2228
2289 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); 2229 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
@@ -2316,14 +2256,12 @@ static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode)
2316 } 2256 }
2317 } 2257 }
2318 2258
2319 priv->iw_mode = mode; 2259 iwl3945_connection_init_rx_config(priv, mode);
2320
2321 iwl3945_connection_init_rx_config(priv);
2322 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 2260 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2323 2261
2324 iwl3945_clear_stations_table(priv); 2262 iwl3945_clear_stations_table(priv);
2325 2263
2326 /* dont commit rxon if rf-kill is on*/ 2264 /* don't commit rxon if rf-kill is on*/
2327 if (!iwl3945_is_ready_rf(priv)) 2265 if (!iwl3945_is_ready_rf(priv))
2328 return -EAGAIN; 2266 return -EAGAIN;
2329 2267
@@ -2352,7 +2290,7 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl3945_priv *priv,
2352 case ALG_CCMP: 2290 case ALG_CCMP:
2353 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM; 2291 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2354 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen); 2292 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
2355 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); 2293 IWL_DEBUG_TX("tx_cmd with AES hwcrypto\n");
2356 break; 2294 break;
2357 2295
2358 case ALG_TKIP: 2296 case ALG_TKIP:
@@ -2397,6 +2335,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2397{ 2335{
2398 __le16 fc = hdr->frame_control; 2336 __le16 fc = hdr->frame_control;
2399 __le32 tx_flags = cmd->cmd.tx.tx_flags; 2337 __le32 tx_flags = cmd->cmd.tx.tx_flags;
2338 u8 rc_flags = info->control.rates[0].flags;
2400 2339
2401 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2340 cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2402 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 2341 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
@@ -2423,10 +2362,10 @@ static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2423 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 2362 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
2424 } 2363 }
2425 2364
2426 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { 2365 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
2427 tx_flags |= TX_CMD_FLG_RTS_MSK; 2366 tx_flags |= TX_CMD_FLG_RTS_MSK;
2428 tx_flags &= ~TX_CMD_FLG_CTS_MSK; 2367 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
2429 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 2368 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
2430 tx_flags &= ~TX_CMD_FLG_RTS_MSK; 2369 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2431 tx_flags |= TX_CMD_FLG_CTS_MSK; 2370 tx_flags |= TX_CMD_FLG_CTS_MSK;
2432 } 2371 }
@@ -2482,8 +2421,6 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2482 /* If this frame is going out to an IBSS network, find the station, 2421 /* If this frame is going out to an IBSS network, find the station,
2483 * or create a new station table entry */ 2422 * or create a new station table entry */
2484 case NL80211_IFTYPE_ADHOC: { 2423 case NL80211_IFTYPE_ADHOC: {
2485 DECLARE_MAC_BUF(mac);
2486
2487 /* Create new station table entry */ 2424 /* Create new station table entry */
2488 sta_id = iwl3945_hw_find_station(priv, hdr->addr1); 2425 sta_id = iwl3945_hw_find_station(priv, hdr->addr1);
2489 if (sta_id != IWL_INVALID_STATION) 2426 if (sta_id != IWL_INVALID_STATION)
@@ -2494,9 +2431,9 @@ static int iwl3945_get_sta_id(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2494 if (sta_id != IWL_INVALID_STATION) 2431 if (sta_id != IWL_INVALID_STATION)
2495 return sta_id; 2432 return sta_id;
2496 2433
2497 IWL_DEBUG_DROP("Station %s not in station map. " 2434 IWL_DEBUG_DROP("Station %pM not in station map. "
2498 "Defaulting to broadcast...\n", 2435 "Defaulting to broadcast...\n",
2499 print_mac(mac, hdr->addr1)); 2436 hdr->addr1);
2500 iwl3945_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); 2437 iwl3945_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2501 return priv->hw_setting.bcast_sta_id; 2438 return priv->hw_setting.bcast_sta_id;
2502 } 2439 }
@@ -2579,10 +2516,8 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv, struct sk_buff *skb)
2579 /* Find (or create) index into station table for destination station */ 2516 /* Find (or create) index into station table for destination station */
2580 sta_id = iwl3945_get_sta_id(priv, hdr); 2517 sta_id = iwl3945_get_sta_id(priv, hdr);
2581 if (sta_id == IWL_INVALID_STATION) { 2518 if (sta_id == IWL_INVALID_STATION) {
2582 DECLARE_MAC_BUF(mac); 2519 IWL_DEBUG_DROP("Dropping - INVALID STATION: %pM\n",
2583 2520 hdr->addr1);
2584 IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n",
2585 print_mac(mac, hdr->addr1));
2586 goto drop; 2521 goto drop;
2587 } 2522 }
2588 2523
@@ -4019,8 +3954,6 @@ static int iwl3945_tx_queue_update_write_ptr(struct iwl3945_priv *priv,
4019#ifdef CONFIG_IWL3945_DEBUG 3954#ifdef CONFIG_IWL3945_DEBUG
4020static void iwl3945_print_rx_config_cmd(struct iwl3945_rxon_cmd *rxon) 3955static void iwl3945_print_rx_config_cmd(struct iwl3945_rxon_cmd *rxon)
4021{ 3956{
4022 DECLARE_MAC_BUF(mac);
4023
4024 IWL_DEBUG_RADIO("RX CONFIG:\n"); 3957 IWL_DEBUG_RADIO("RX CONFIG:\n");
4025 iwl3945_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 3958 iwl3945_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4026 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); 3959 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
@@ -4031,10 +3964,8 @@ static void iwl3945_print_rx_config_cmd(struct iwl3945_rxon_cmd *rxon)
4031 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n", 3964 IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n",
4032 rxon->ofdm_basic_rates); 3965 rxon->ofdm_basic_rates);
4033 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); 3966 IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
4034 IWL_DEBUG_RADIO("u8[6] node_addr: %s\n", 3967 IWL_DEBUG_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
4035 print_mac(mac, rxon->node_addr)); 3968 IWL_DEBUG_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
4036 IWL_DEBUG_RADIO("u8[6] bssid_addr: %s\n",
4037 print_mac(mac, rxon->bssid_addr));
4038 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 3969 IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
4039} 3970}
4040#endif 3971#endif
@@ -4050,7 +3981,7 @@ static void iwl3945_enable_interrupts(struct iwl3945_priv *priv)
4050/* call this function to flush any scheduled tasklet */ 3981/* call this function to flush any scheduled tasklet */
4051static inline void iwl_synchronize_irq(struct iwl3945_priv *priv) 3982static inline void iwl_synchronize_irq(struct iwl3945_priv *priv)
4052{ 3983{
4053 /* wait to make sure we flush pedding tasklet*/ 3984 /* wait to make sure we flush pending tasklet*/
4054 synchronize_irq(priv->pci_dev->irq); 3985 synchronize_irq(priv->pci_dev->irq);
4055 tasklet_kill(&priv->irq_tasklet); 3986 tasklet_kill(&priv->irq_tasklet);
4056} 3987}
@@ -4373,35 +4304,6 @@ static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
4373 /* Safely ignore these bits for debug checks below */ 4304 /* Safely ignore these bits for debug checks below */
4374 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 4305 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
4375 4306
4376 /* HW RF KILL switch toggled (4965 only) */
4377 if (inta & CSR_INT_BIT_RF_KILL) {
4378 int hw_rf_kill = 0;
4379 if (!(iwl3945_read32(priv, CSR_GP_CNTRL) &
4380 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
4381 hw_rf_kill = 1;
4382
4383 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR,
4384 "RF_KILL bit toggled to %s.\n",
4385 hw_rf_kill ? "disable radio":"enable radio");
4386
4387 /* Queue restart only if RF_KILL switch was set to "kill"
4388 * when we loaded driver, and is now set to "enable".
4389 * After we're Alive, RF_KILL gets handled by
4390 * iwl3945_rx_card_state_notif() */
4391 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) {
4392 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4393 queue_work(priv->workqueue, &priv->restart);
4394 }
4395
4396 handled |= CSR_INT_BIT_RF_KILL;
4397 }
4398
4399 /* Chip got too hot and stopped itself (4965 only) */
4400 if (inta & CSR_INT_BIT_CT_KILL) {
4401 IWL_ERROR("Microcode CT kill error detected.\n");
4402 handled |= CSR_INT_BIT_CT_KILL;
4403 }
4404
4405 /* Error detected by uCode */ 4307 /* Error detected by uCode */
4406 if (inta & CSR_INT_BIT_SW_ERR) { 4308 if (inta & CSR_INT_BIT_SW_ERR) {
4407 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n", 4309 IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n",
@@ -4502,7 +4404,7 @@ static irqreturn_t iwl3945_isr(int irq, void *data)
4502 4404
4503 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 4405 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
4504 /* Hardware disappeared */ 4406 /* Hardware disappeared */
4505 IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta); 4407 IWL_WARNING("HARDWARE GONE?? INTA == 0x%08x\n", inta);
4506 goto unplugged; 4408 goto unplugged;
4507 } 4409 }
4508 4410
@@ -4805,7 +4707,7 @@ static void iwl3945_free_channel_map(struct iwl3945_priv *priv)
4805#define IWL_PASSIVE_DWELL_BASE (100) 4707#define IWL_PASSIVE_DWELL_BASE (100)
4806#define IWL_CHANNEL_TUNE_TIME 5 4708#define IWL_CHANNEL_TUNE_TIME 5
4807 4709
4808#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) 4710#define IWL_SCAN_PROBE_MASK(n) (BIT(n) | (BIT(n) - BIT(1)))
4809 4711
4810static inline u16 iwl3945_get_active_dwell_time(struct iwl3945_priv *priv, 4712static inline u16 iwl3945_get_active_dwell_time(struct iwl3945_priv *priv,
4811 enum ieee80211_band band, 4713 enum ieee80211_band band,
@@ -4876,17 +4778,33 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4876 continue; 4778 continue;
4877 } 4779 }
4878 4780
4781 scan_ch->active_dwell = cpu_to_le16(active_dwell);
4782 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
4783 /* If passive , set up for auto-switch
4784 * and use long active_dwell time.
4785 */
4879 if (!is_active || is_channel_passive(ch_info) || 4786 if (!is_active || is_channel_passive(ch_info) ||
4880 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN)) 4787 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
4881 scan_ch->type = 0; /* passive */ 4788 scan_ch->type = 0; /* passive */
4882 else 4789 if (IWL_UCODE_API(priv->ucode_ver) == 1)
4790 scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
4791 } else {
4883 scan_ch->type = 1; /* active */ 4792 scan_ch->type = 1; /* active */
4793 }
4884 4794
4885 if ((scan_ch->type & 1) && n_probes) 4795 /* Set direct probe bits. These may be used both for active
4886 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); 4796 * scan channels (probes gets sent right away),
4887 4797 * or for passive channels (probes get se sent only after
4888 scan_ch->active_dwell = cpu_to_le16(active_dwell); 4798 * hearing clear Rx packet).*/
4889 scan_ch->passive_dwell = cpu_to_le16(passive_dwell); 4799 if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
4800 if (n_probes)
4801 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4802 } else {
4803 /* uCode v1 does not allow setting direct probe bits on
4804 * passive channel. */
4805 if ((scan_ch->type & 1) && n_probes)
4806 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
4807 }
4890 4808
4891 /* Set txpower levels to defaults */ 4809 /* Set txpower levels to defaults */
4892 scan_ch->tpc.dsp_atten = 110; 4810 scan_ch->tpc.dsp_atten = 110;
@@ -5387,25 +5305,41 @@ static void iwl3945_nic_start(struct iwl3945_priv *priv)
5387static int iwl3945_read_ucode(struct iwl3945_priv *priv) 5305static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5388{ 5306{
5389 struct iwl3945_ucode *ucode; 5307 struct iwl3945_ucode *ucode;
5390 int ret = 0; 5308 int ret = -EINVAL, index;
5391 const struct firmware *ucode_raw; 5309 const struct firmware *ucode_raw;
5392 /* firmware file name contains uCode/driver compatibility version */ 5310 /* firmware file name contains uCode/driver compatibility version */
5393 const char *name = priv->cfg->fw_name; 5311 const char *name_pre = priv->cfg->fw_name_pre;
5312 const unsigned int api_max = priv->cfg->ucode_api_max;
5313 const unsigned int api_min = priv->cfg->ucode_api_min;
5314 char buf[25];
5394 u8 *src; 5315 u8 *src;
5395 size_t len; 5316 size_t len;
5396 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size; 5317 u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
5397 5318
5398 /* Ask kernel firmware_class module to get the boot firmware off disk. 5319 /* Ask kernel firmware_class module to get the boot firmware off disk.
5399 * request_firmware() is synchronous, file is in memory on return. */ 5320 * request_firmware() is synchronous, file is in memory on return. */
5400 ret = request_firmware(&ucode_raw, name, &priv->pci_dev->dev); 5321 for (index = api_max; index >= api_min; index--) {
5401 if (ret < 0) { 5322 sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
5402 IWL_ERROR("%s firmware file req failed: Reason %d\n", 5323 ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
5403 name, ret); 5324 if (ret < 0) {
5404 goto error; 5325 IWL_ERROR("%s firmware file req failed: Reason %d\n",
5326 buf, ret);
5327 if (ret == -ENOENT)
5328 continue;
5329 else
5330 goto error;
5331 } else {
5332 if (index < api_max)
5333 IWL_ERROR("Loaded firmware %s, which is deprecated. Please use API v%u instead.\n",
5334 buf, api_max);
5335 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n",
5336 buf, ucode_raw->size);
5337 break;
5338 }
5405 } 5339 }
5406 5340
5407 IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n", 5341 if (ret < 0)
5408 name, ucode_raw->size); 5342 goto error;
5409 5343
5410 /* Make sure that we got at least our header! */ 5344 /* Make sure that we got at least our header! */
5411 if (ucode_raw->size < sizeof(*ucode)) { 5345 if (ucode_raw->size < sizeof(*ucode)) {
@@ -5417,20 +5351,46 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5417 /* Data from ucode file: header followed by uCode images */ 5351 /* Data from ucode file: header followed by uCode images */
5418 ucode = (void *)ucode_raw->data; 5352 ucode = (void *)ucode_raw->data;
5419 5353
5420 ver = le32_to_cpu(ucode->ver); 5354 priv->ucode_ver = le32_to_cpu(ucode->ver);
5355 api_ver = IWL_UCODE_API(priv->ucode_ver);
5421 inst_size = le32_to_cpu(ucode->inst_size); 5356 inst_size = le32_to_cpu(ucode->inst_size);
5422 data_size = le32_to_cpu(ucode->data_size); 5357 data_size = le32_to_cpu(ucode->data_size);
5423 init_size = le32_to_cpu(ucode->init_size); 5358 init_size = le32_to_cpu(ucode->init_size);
5424 init_data_size = le32_to_cpu(ucode->init_data_size); 5359 init_data_size = le32_to_cpu(ucode->init_data_size);
5425 boot_size = le32_to_cpu(ucode->boot_size); 5360 boot_size = le32_to_cpu(ucode->boot_size);
5426 5361
5427 IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver); 5362 /* api_ver should match the api version forming part of the
5363 * firmware filename ... but we don't check for that and only rely
5364 * on the API version read from firware header from here on forward */
5365
5366 if (api_ver < api_min || api_ver > api_max) {
5367 IWL_ERROR("Driver unable to support your firmware API. "
5368 "Driver supports v%u, firmware is v%u.\n",
5369 api_max, api_ver);
5370 priv->ucode_ver = 0;
5371 ret = -EINVAL;
5372 goto err_release;
5373 }
5374 if (api_ver != api_max)
5375 IWL_ERROR("Firmware has old API version. Expected %u, "
5376 "got %u. New firmware can be obtained "
5377 "from http://www.intellinuxwireless.org.\n",
5378 api_max, api_ver);
5379
5380 printk(KERN_INFO DRV_NAME " loaded firmware version %u.%u.%u.%u\n",
5381 IWL_UCODE_MAJOR(priv->ucode_ver),
5382 IWL_UCODE_MINOR(priv->ucode_ver),
5383 IWL_UCODE_API(priv->ucode_ver),
5384 IWL_UCODE_SERIAL(priv->ucode_ver));
5385 IWL_DEBUG_INFO("f/w package hdr ucode version raw = 0x%x\n",
5386 priv->ucode_ver);
5428 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", inst_size); 5387 IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
5429 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", data_size); 5388 IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", data_size);
5430 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", init_size); 5389 IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", init_size);
5431 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", init_data_size); 5390 IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", init_data_size);
5432 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", boot_size); 5391 IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", boot_size);
5433 5392
5393
5434 /* Verify size of file vs. image size info in file's header */ 5394 /* Verify size of file vs. image size info in file's header */
5435 if (ucode_raw->size < sizeof(*ucode) + 5395 if (ucode_raw->size < sizeof(*ucode) +
5436 inst_size + data_size + init_size + 5396 inst_size + data_size + init_size +
@@ -5607,7 +5567,7 @@ static int iwl3945_set_ucode_ptrs(struct iwl3945_priv *priv)
5607 iwl3945_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, 5567 iwl3945_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
5608 priv->ucode_data.len); 5568 priv->ucode_data.len);
5609 5569
5610 /* Inst bytecount must be last to set up, bit 31 signals uCode 5570 /* Inst byte count must be last to set up, bit 31 signals uCode
5611 * that all new ptr/size info is in place */ 5571 * that all new ptr/size info is in place */
5612 iwl3945_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, 5572 iwl3945_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
5613 priv->ucode_code.len | BSM_DRAM_INST_LOAD); 5573 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
@@ -5665,6 +5625,10 @@ static void iwl3945_init_alive_start(struct iwl3945_priv *priv)
5665} 5625}
5666 5626
5667 5627
5628/* temporary */
5629static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw,
5630 struct sk_buff *skb);
5631
5668/** 5632/**
5669 * iwl3945_alive_start - called after REPLY_ALIVE notification received 5633 * iwl3945_alive_start - called after REPLY_ALIVE notification received
5670 * from protocol/runtime uCode (initialization uCode's 5634 * from protocol/runtime uCode (initialization uCode's
@@ -5699,7 +5663,7 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5699 5663
5700 rc = iwl3945_grab_nic_access(priv); 5664 rc = iwl3945_grab_nic_access(priv);
5701 if (rc) { 5665 if (rc) {
5702 IWL_WARNING("Can not read rfkill status from adapter\n"); 5666 IWL_WARNING("Can not read RFKILL status from adapter\n");
5703 return; 5667 return;
5704 } 5668 }
5705 5669
@@ -5709,7 +5673,7 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5709 5673
5710 if (rfkill & 0x1) { 5674 if (rfkill & 0x1) {
5711 clear_bit(STATUS_RF_KILL_HW, &priv->status); 5675 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5712 /* if rfkill is not on, then wait for thermal 5676 /* if RFKILL is not on, then wait for thermal
5713 * sensor in adapter to kick in */ 5677 * sensor in adapter to kick in */
5714 while (iwl3945_hw_get_temperature(priv) == 0) { 5678 while (iwl3945_hw_get_temperature(priv) == 0) {
5715 thermal_spin++; 5679 thermal_spin++;
@@ -5747,7 +5711,7 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5747 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 5711 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
5748 } else { 5712 } else {
5749 /* Initialize our rx_config data */ 5713 /* Initialize our rx_config data */
5750 iwl3945_connection_init_rx_config(priv); 5714 iwl3945_connection_init_rx_config(priv, priv->iw_mode);
5751 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 5715 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
5752 } 5716 }
5753 5717
@@ -5768,6 +5732,14 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
5768 if (priv->error_recovering) 5732 if (priv->error_recovering)
5769 iwl3945_error_recovery(priv); 5733 iwl3945_error_recovery(priv);
5770 5734
5735 /* reassociate for ADHOC mode */
5736 if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
5737 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
5738 priv->vif);
5739 if (beacon)
5740 iwl3945_mac_beacon_update(priv->hw, beacon);
5741 }
5742
5771 return; 5743 return;
5772 5744
5773 restart: 5745 restart:
@@ -5902,7 +5874,7 @@ static int __iwl3945_up(struct iwl3945_priv *priv)
5902 } 5874 }
5903 5875
5904 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { 5876 if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
5905 IWL_ERROR("ucode not available for device bringup\n"); 5877 IWL_ERROR("ucode not available for device bring up\n");
5906 return -EIO; 5878 return -EIO;
5907 } 5879 }
5908 5880
@@ -6046,24 +6018,6 @@ static void iwl3945_bg_rf_kill(struct work_struct *work)
6046 iwl3945_rfkill_set_hw_state(priv); 6018 iwl3945_rfkill_set_hw_state(priv);
6047} 6019}
6048 6020
6049static void iwl3945_bg_set_monitor(struct work_struct *work)
6050{
6051 struct iwl3945_priv *priv = container_of(work,
6052 struct iwl3945_priv, set_monitor);
6053
6054 IWL_DEBUG(IWL_DL_STATE, "setting monitor mode\n");
6055
6056 mutex_lock(&priv->mutex);
6057
6058 if (!iwl3945_is_ready(priv))
6059 IWL_DEBUG(IWL_DL_STATE, "leave - not ready\n");
6060 else
6061 if (iwl3945_set_mode(priv, NL80211_IFTYPE_MONITOR) != 0)
6062 IWL_ERROR("iwl3945_set_mode() failed\n");
6063
6064 mutex_unlock(&priv->mutex);
6065}
6066
6067#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 6021#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
6068 6022
6069static void iwl3945_bg_scan_check(struct work_struct *data) 6023static void iwl3945_bg_scan_check(struct work_struct *data)
@@ -6101,6 +6055,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6101 struct ieee80211_conf *conf = NULL; 6055 struct ieee80211_conf *conf = NULL;
6102 u8 n_probes = 2; 6056 u8 n_probes = 2;
6103 enum ieee80211_band band; 6057 enum ieee80211_band band;
6058 DECLARE_SSID_BUF(ssid);
6104 6059
6105 conf = ieee80211_get_hw_conf(priv->hw); 6060 conf = ieee80211_get_hw_conf(priv->hw);
6106 6061
@@ -6111,7 +6066,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6111 goto done; 6066 goto done;
6112 } 6067 }
6113 6068
6114 /* Make sure the scan wasn't cancelled before this queued work 6069 /* Make sure the scan wasn't canceled before this queued work
6115 * was given the chance to run... */ 6070 * was given the chance to run... */
6116 if (!test_bit(STATUS_SCANNING, &priv->status)) 6071 if (!test_bit(STATUS_SCANNING, &priv->status))
6117 goto done; 6072 goto done;
@@ -6201,21 +6156,13 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6201 if (priv->one_direct_scan) { 6156 if (priv->one_direct_scan) {
6202 IWL_DEBUG_SCAN 6157 IWL_DEBUG_SCAN
6203 ("Kicking off one direct scan for '%s'\n", 6158 ("Kicking off one direct scan for '%s'\n",
6204 iwl3945_escape_essid(priv->direct_ssid, 6159 print_ssid(ssid, priv->direct_ssid,
6205 priv->direct_ssid_len)); 6160 priv->direct_ssid_len));
6206 scan->direct_scan[0].id = WLAN_EID_SSID; 6161 scan->direct_scan[0].id = WLAN_EID_SSID;
6207 scan->direct_scan[0].len = priv->direct_ssid_len; 6162 scan->direct_scan[0].len = priv->direct_ssid_len;
6208 memcpy(scan->direct_scan[0].ssid, 6163 memcpy(scan->direct_scan[0].ssid,
6209 priv->direct_ssid, priv->direct_ssid_len); 6164 priv->direct_ssid, priv->direct_ssid_len);
6210 n_probes++; 6165 n_probes++;
6211 } else if (!iwl3945_is_associated(priv) && priv->essid_len) {
6212 IWL_DEBUG_SCAN
6213 ("Kicking off one direct scan for '%s' when not associated\n",
6214 iwl3945_escape_essid(priv->essid, priv->essid_len));
6215 scan->direct_scan[0].id = WLAN_EID_SSID;
6216 scan->direct_scan[0].len = priv->essid_len;
6217 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
6218 n_probes++;
6219 } else 6166 } else
6220 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n"); 6167 IWL_DEBUG_SCAN("Kicking off one indirect scan.\n");
6221 6168
@@ -6223,7 +6170,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6223 * that based on the direct_mask added to each channel entry */ 6170 * that based on the direct_mask added to each channel entry */
6224 scan->tx_cmd.len = cpu_to_le16( 6171 scan->tx_cmd.len = cpu_to_le16(
6225 iwl3945_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, 6172 iwl3945_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
6226 IWL_MAX_SCAN_SIZE - sizeof(*scan), 0)); 6173 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
6227 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 6174 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
6228 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id; 6175 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id;
6229 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 6176 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
@@ -6333,7 +6280,6 @@ static void iwl3945_post_associate(struct iwl3945_priv *priv)
6333{ 6280{
6334 int rc = 0; 6281 int rc = 0;
6335 struct ieee80211_conf *conf = NULL; 6282 struct ieee80211_conf *conf = NULL;
6336 DECLARE_MAC_BUF(mac);
6337 6283
6338 if (priv->iw_mode == NL80211_IFTYPE_AP) { 6284 if (priv->iw_mode == NL80211_IFTYPE_AP) {
6339 IWL_ERROR("%s Should not be called in AP mode\n", __func__); 6285 IWL_ERROR("%s Should not be called in AP mode\n", __func__);
@@ -6341,9 +6287,8 @@ static void iwl3945_post_associate(struct iwl3945_priv *priv)
6341 } 6287 }
6342 6288
6343 6289
6344 IWL_DEBUG_ASSOC("Associated as %d to: %s\n", 6290 IWL_DEBUG_ASSOC("Associated as %d to: %pM\n",
6345 priv->assoc_id, 6291 priv->assoc_id, priv->active_rxon.bssid_addr);
6346 print_mac(mac, priv->active_rxon.bssid_addr));
6347 6292
6348 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 6293 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6349 return; 6294 return;
@@ -6398,10 +6343,7 @@ static void iwl3945_post_associate(struct iwl3945_priv *priv)
6398 6343
6399 case NL80211_IFTYPE_ADHOC: 6344 case NL80211_IFTYPE_ADHOC:
6400 6345
6401 /* clear out the station table */ 6346 priv->assoc_id = 1;
6402 iwl3945_clear_stations_table(priv);
6403
6404 iwl3945_add_station(priv, iwl3945_broadcast_addr, 0, 0);
6405 iwl3945_add_station(priv, priv->bssid, 0, 0); 6347 iwl3945_add_station(priv, priv->bssid, 0, 0);
6406 iwl3945_sync_sta(priv, IWL_STA_ID, 6348 iwl3945_sync_sta(priv, IWL_STA_ID,
6407 (priv->band == IEEE80211_BAND_5GHZ) ? 6349 (priv->band == IEEE80211_BAND_5GHZ) ?
@@ -6439,7 +6381,7 @@ static void iwl3945_bg_abort_scan(struct work_struct *work)
6439 mutex_unlock(&priv->mutex); 6381 mutex_unlock(&priv->mutex);
6440} 6382}
6441 6383
6442static int iwl3945_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf); 6384static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed);
6443 6385
6444static void iwl3945_bg_scan_completed(struct work_struct *work) 6386static void iwl3945_bg_scan_completed(struct work_struct *work)
6445{ 6387{
@@ -6452,7 +6394,7 @@ static void iwl3945_bg_scan_completed(struct work_struct *work)
6452 return; 6394 return;
6453 6395
6454 if (test_bit(STATUS_CONF_PENDING, &priv->status)) 6396 if (test_bit(STATUS_CONF_PENDING, &priv->status))
6455 iwl3945_mac_config(priv->hw, ieee80211_get_hw_conf(priv->hw)); 6397 iwl3945_mac_config(priv->hw, 0);
6456 6398
6457 ieee80211_scan_completed(priv->hw); 6399 ieee80211_scan_completed(priv->hw);
6458 6400
@@ -6604,7 +6546,6 @@ static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
6604{ 6546{
6605 struct iwl3945_priv *priv = hw->priv; 6547 struct iwl3945_priv *priv = hw->priv;
6606 unsigned long flags; 6548 unsigned long flags;
6607 DECLARE_MAC_BUF(mac);
6608 6549
6609 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type); 6550 IWL_DEBUG_MAC80211("enter: type %d\n", conf->type);
6610 6551
@@ -6615,13 +6556,14 @@ static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
6615 6556
6616 spin_lock_irqsave(&priv->lock, flags); 6557 spin_lock_irqsave(&priv->lock, flags);
6617 priv->vif = conf->vif; 6558 priv->vif = conf->vif;
6559 priv->iw_mode = conf->type;
6618 6560
6619 spin_unlock_irqrestore(&priv->lock, flags); 6561 spin_unlock_irqrestore(&priv->lock, flags);
6620 6562
6621 mutex_lock(&priv->mutex); 6563 mutex_lock(&priv->mutex);
6622 6564
6623 if (conf->mac_addr) { 6565 if (conf->mac_addr) {
6624 IWL_DEBUG_MAC80211("Set: %s\n", print_mac(mac, conf->mac_addr)); 6566 IWL_DEBUG_MAC80211("Set: %pM\n", conf->mac_addr);
6625 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 6567 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
6626 } 6568 }
6627 6569
@@ -6641,10 +6583,11 @@ static int iwl3945_mac_add_interface(struct ieee80211_hw *hw,
6641 * be set inappropriately and the driver currently sets the hardware up to 6583 * be set inappropriately and the driver currently sets the hardware up to
6642 * use it whenever needed. 6584 * use it whenever needed.
6643 */ 6585 */
6644static int iwl3945_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 6586static int iwl3945_mac_config(struct ieee80211_hw *hw, u32 changed)
6645{ 6587{
6646 struct iwl3945_priv *priv = hw->priv; 6588 struct iwl3945_priv *priv = hw->priv;
6647 const struct iwl3945_channel_info *ch_info; 6589 const struct iwl3945_channel_info *ch_info;
6590 struct ieee80211_conf *conf = &hw->conf;
6648 unsigned long flags; 6591 unsigned long flags;
6649 int ret = 0; 6592 int ret = 0;
6650 6593
@@ -6782,16 +6725,11 @@ static void iwl3945_config_ap(struct iwl3945_priv *priv)
6782 * clear sta table, add BCAST sta... */ 6725 * clear sta table, add BCAST sta... */
6783} 6726}
6784 6727
6785/* temporary */
6786static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
6787
6788static int iwl3945_mac_config_interface(struct ieee80211_hw *hw, 6728static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6789 struct ieee80211_vif *vif, 6729 struct ieee80211_vif *vif,
6790 struct ieee80211_if_conf *conf) 6730 struct ieee80211_if_conf *conf)
6791{ 6731{
6792 struct iwl3945_priv *priv = hw->priv; 6732 struct iwl3945_priv *priv = hw->priv;
6793 DECLARE_MAC_BUF(mac);
6794 unsigned long flags;
6795 int rc; 6733 int rc;
6796 6734
6797 if (conf == NULL) 6735 if (conf == NULL)
@@ -6808,28 +6746,20 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6808 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 6746 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
6809 if (!beacon) 6747 if (!beacon)
6810 return -ENOMEM; 6748 return -ENOMEM;
6749 mutex_lock(&priv->mutex);
6811 rc = iwl3945_mac_beacon_update(hw, beacon); 6750 rc = iwl3945_mac_beacon_update(hw, beacon);
6751 mutex_unlock(&priv->mutex);
6812 if (rc) 6752 if (rc)
6813 return rc; 6753 return rc;
6814 } 6754 }
6815 6755
6816 /* XXX: this MUST use conf->mac_addr */
6817
6818 if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
6819 (!conf->ssid_len)) {
6820 IWL_DEBUG_MAC80211
6821 ("Leaving in AP mode because HostAPD is not ready.\n");
6822 return 0;
6823 }
6824
6825 if (!iwl3945_is_alive(priv)) 6756 if (!iwl3945_is_alive(priv))
6826 return -EAGAIN; 6757 return -EAGAIN;
6827 6758
6828 mutex_lock(&priv->mutex); 6759 mutex_lock(&priv->mutex);
6829 6760
6830 if (conf->bssid) 6761 if (conf->bssid)
6831 IWL_DEBUG_MAC80211("bssid: %s\n", 6762 IWL_DEBUG_MAC80211("bssid: %pM\n", conf->bssid);
6832 print_mac(mac, conf->bssid));
6833 6763
6834/* 6764/*
6835 * very dubious code was here; the probe filtering flag is never set: 6765 * very dubious code was here; the probe filtering flag is never set:
@@ -6842,8 +6772,8 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6842 if (!conf->bssid) { 6772 if (!conf->bssid) {
6843 conf->bssid = priv->mac_addr; 6773 conf->bssid = priv->mac_addr;
6844 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); 6774 memcpy(priv->bssid, priv->mac_addr, ETH_ALEN);
6845 IWL_DEBUG_MAC80211("bssid was set to: %s\n", 6775 IWL_DEBUG_MAC80211("bssid was set to: %pM\n",
6846 print_mac(mac, conf->bssid)); 6776 conf->bssid);
6847 } 6777 }
6848 if (priv->ibss_beacon) 6778 if (priv->ibss_beacon)
6849 dev_kfree_skb(priv->ibss_beacon); 6779 dev_kfree_skb(priv->ibss_beacon);
@@ -6889,15 +6819,6 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
6889 } 6819 }
6890 6820
6891 done: 6821 done:
6892 spin_lock_irqsave(&priv->lock, flags);
6893 if (!conf->ssid_len)
6894 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
6895 else
6896 memcpy(priv->essid, conf->ssid, conf->ssid_len);
6897
6898 priv->essid_len = conf->ssid_len;
6899 spin_unlock_irqrestore(&priv->lock, flags);
6900
6901 IWL_DEBUG_MAC80211("leave\n"); 6822 IWL_DEBUG_MAC80211("leave\n");
6902 mutex_unlock(&priv->mutex); 6823 mutex_unlock(&priv->mutex);
6903 6824
@@ -6910,16 +6831,43 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
6910 int mc_count, struct dev_addr_list *mc_list) 6831 int mc_count, struct dev_addr_list *mc_list)
6911{ 6832{
6912 struct iwl3945_priv *priv = hw->priv; 6833 struct iwl3945_priv *priv = hw->priv;
6834 __le32 *filter_flags = &priv->staging_rxon.filter_flags;
6913 6835
6914 if (changed_flags & (*total_flags) & FIF_OTHER_BSS) { 6836 IWL_DEBUG_MAC80211("Enter: changed: 0x%x, total: 0x%x\n",
6915 IWL_DEBUG_MAC80211("Enter: type %d (0x%x, 0x%x)\n", 6837 changed_flags, *total_flags);
6916 NL80211_IFTYPE_MONITOR, 6838
6917 changed_flags, *total_flags); 6839 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
6918 /* queue work 'cuz mac80211 is holding a lock which 6840 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))
6919 * prevents us from issuing (synchronous) f/w cmds */ 6841 *filter_flags |= RXON_FILTER_PROMISC_MSK;
6920 queue_work(priv->workqueue, &priv->set_monitor); 6842 else
6843 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
6921 } 6844 }
6922 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | 6845 if (changed_flags & FIF_ALLMULTI) {
6846 if (*total_flags & FIF_ALLMULTI)
6847 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
6848 else
6849 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
6850 }
6851 if (changed_flags & FIF_CONTROL) {
6852 if (*total_flags & FIF_CONTROL)
6853 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
6854 else
6855 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
6856 }
6857 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
6858 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
6859 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
6860 else
6861 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
6862 }
6863
6864 /* We avoid iwl_commit_rxon here to commit the new filter flags
6865 * since mac80211 will call ieee80211_hw_config immediately.
6866 * (mc_list is not supported at this time). Otherwise, we need to
6867 * queue a background iwl_commit_rxon work.
6868 */
6869
6870 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
6923 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 6871 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
6924} 6872}
6925 6873
@@ -6940,8 +6888,6 @@ static void iwl3945_mac_remove_interface(struct ieee80211_hw *hw,
6940 if (priv->vif == conf->vif) { 6888 if (priv->vif == conf->vif) {
6941 priv->vif = NULL; 6889 priv->vif = NULL;
6942 memset(priv->bssid, 0, ETH_ALEN); 6890 memset(priv->bssid, 0, ETH_ALEN);
6943 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
6944 priv->essid_len = 0;
6945 } 6891 }
6946 mutex_unlock(&priv->mutex); 6892 mutex_unlock(&priv->mutex);
6947 6893
@@ -7010,6 +6956,7 @@ static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
7010 int rc = 0; 6956 int rc = 0;
7011 unsigned long flags; 6957 unsigned long flags;
7012 struct iwl3945_priv *priv = hw->priv; 6958 struct iwl3945_priv *priv = hw->priv;
6959 DECLARE_SSID_BUF(ssid_buf);
7013 6960
7014 IWL_DEBUG_MAC80211("enter\n"); 6961 IWL_DEBUG_MAC80211("enter\n");
7015 6962
@@ -7022,12 +6969,6 @@ static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
7022 goto out_unlock; 6969 goto out_unlock;
7023 } 6970 }
7024 6971
7025 if (priv->iw_mode == NL80211_IFTYPE_AP) { /* APs don't scan */
7026 rc = -EIO;
7027 IWL_ERROR("ERROR: APs don't scan\n");
7028 goto out_unlock;
7029 }
7030
7031 /* we don't schedule scan within next_scan_jiffies period */ 6972 /* we don't schedule scan within next_scan_jiffies period */
7032 if (priv->next_scan_jiffies && 6973 if (priv->next_scan_jiffies &&
7033 time_after(priv->next_scan_jiffies, jiffies)) { 6974 time_after(priv->next_scan_jiffies, jiffies)) {
@@ -7043,7 +6984,7 @@ static int iwl3945_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
7043 } 6984 }
7044 if (len) { 6985 if (len) {
7045 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ", 6986 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
7046 iwl3945_escape_essid(ssid, len), (int)len); 6987 print_ssid(ssid_buf, ssid, len), (int)len);
7047 6988
7048 priv->one_direct_scan = 1; 6989 priv->one_direct_scan = 1;
7049 priv->direct_ssid_len = (u8) 6990 priv->direct_ssid_len = (u8)
@@ -7084,10 +7025,8 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
7084 7025
7085 sta_id = iwl3945_hw_find_station(priv, addr); 7026 sta_id = iwl3945_hw_find_station(priv, addr);
7086 if (sta_id == IWL_INVALID_STATION) { 7027 if (sta_id == IWL_INVALID_STATION) {
7087 DECLARE_MAC_BUF(mac); 7028 IWL_DEBUG_MAC80211("leave - %pM not in station map.\n",
7088 7029 addr);
7089 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
7090 print_mac(mac, addr));
7091 return -EINVAL; 7030 return -EINVAL;
7092 } 7031 }
7093 7032
@@ -7143,11 +7082,6 @@ static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
7143 return 0; 7082 return 0;
7144 } 7083 }
7145 7084
7146 if (!priv->qos_data.qos_enable) {
7147 priv->qos_data.qos_active = 0;
7148 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
7149 return 0;
7150 }
7151 q = AC_NUM - 1 - queue; 7085 q = AC_NUM - 1 - queue;
7152 7086
7153 spin_lock_irqsave(&priv->lock, flags); 7087 spin_lock_irqsave(&priv->lock, flags);
@@ -7219,14 +7153,6 @@ static int iwl3945_mac_get_stats(struct ieee80211_hw *hw,
7219 return 0; 7153 return 0;
7220} 7154}
7221 7155
7222static u64 iwl3945_mac_get_tsf(struct ieee80211_hw *hw)
7223{
7224 IWL_DEBUG_MAC80211("enter\n");
7225 IWL_DEBUG_MAC80211("leave\n");
7226
7227 return 0;
7228}
7229
7230static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw) 7156static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7231{ 7157{
7232 struct iwl3945_priv *priv = hw->priv; 7158 struct iwl3945_priv *priv = hw->priv;
@@ -7292,18 +7218,15 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7292 struct iwl3945_priv *priv = hw->priv; 7218 struct iwl3945_priv *priv = hw->priv;
7293 unsigned long flags; 7219 unsigned long flags;
7294 7220
7295 mutex_lock(&priv->mutex);
7296 IWL_DEBUG_MAC80211("enter\n"); 7221 IWL_DEBUG_MAC80211("enter\n");
7297 7222
7298 if (!iwl3945_is_ready_rf(priv)) { 7223 if (!iwl3945_is_ready_rf(priv)) {
7299 IWL_DEBUG_MAC80211("leave - RF not ready\n"); 7224 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7300 mutex_unlock(&priv->mutex);
7301 return -EIO; 7225 return -EIO;
7302 } 7226 }
7303 7227
7304 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { 7228 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
7305 IWL_DEBUG_MAC80211("leave - not IBSS\n"); 7229 IWL_DEBUG_MAC80211("leave - not IBSS\n");
7306 mutex_unlock(&priv->mutex);
7307 return -EIO; 7230 return -EIO;
7308 } 7231 }
7309 7232
@@ -7323,7 +7246,6 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7323 7246
7324 iwl3945_post_associate(priv); 7247 iwl3945_post_associate(priv);
7325 7248
7326 mutex_unlock(&priv->mutex);
7327 7249
7328 return 0; 7250 return 0;
7329} 7251}
@@ -7792,7 +7714,7 @@ static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
7792 7714
7793/***************************************************************************** 7715/*****************************************************************************
7794 * 7716 *
7795 * driver setup and teardown 7717 * driver setup and tear down
7796 * 7718 *
7797 *****************************************************************************/ 7719 *****************************************************************************/
7798 7720
@@ -7810,7 +7732,6 @@ static void iwl3945_setup_deferred_work(struct iwl3945_priv *priv)
7810 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan); 7732 INIT_WORK(&priv->abort_scan, iwl3945_bg_abort_scan);
7811 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill); 7733 INIT_WORK(&priv->rf_kill, iwl3945_bg_rf_kill);
7812 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 7734 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
7813 INIT_WORK(&priv->set_monitor, iwl3945_bg_set_monitor);
7814 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 7735 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
7815 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 7736 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
7816 INIT_DELAYED_WORK(&priv->scan_check, iwl3945_bg_scan_check); 7737 INIT_DELAYED_WORK(&priv->scan_check, iwl3945_bg_scan_check);
@@ -7869,7 +7790,6 @@ static struct ieee80211_ops iwl3945_hw_ops = {
7869 .get_stats = iwl3945_mac_get_stats, 7790 .get_stats = iwl3945_mac_get_stats,
7870 .get_tx_stats = iwl3945_mac_get_tx_stats, 7791 .get_tx_stats = iwl3945_mac_get_tx_stats,
7871 .conf_tx = iwl3945_mac_conf_tx, 7792 .conf_tx = iwl3945_mac_conf_tx,
7872 .get_tsf = iwl3945_mac_get_tsf,
7873 .reset_tsf = iwl3945_mac_reset_tsf, 7793 .reset_tsf = iwl3945_mac_reset_tsf,
7874 .bss_info_changed = iwl3945_bss_info_changed, 7794 .bss_info_changed = iwl3945_bss_info_changed,
7875 .hw_scan = iwl3945_mac_hw_scan 7795 .hw_scan = iwl3945_mac_hw_scan
@@ -7882,7 +7802,10 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7882 struct ieee80211_hw *hw; 7802 struct ieee80211_hw *hw;
7883 struct iwl_3945_cfg *cfg = (struct iwl_3945_cfg *)(ent->driver_data); 7803 struct iwl_3945_cfg *cfg = (struct iwl_3945_cfg *)(ent->driver_data);
7884 unsigned long flags; 7804 unsigned long flags;
7885 DECLARE_MAC_BUF(mac); 7805
7806 /***********************
7807 * 1. Allocating HW data
7808 * ********************/
7886 7809
7887 /* Disabling hardware scan means that mac80211 will perform scans 7810 /* Disabling hardware scan means that mac80211 will perform scans
7888 * "the hard way", rather than using device's scan. */ 7811 * "the hard way", rather than using device's scan. */
@@ -7907,48 +7830,41 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7907 err = -ENOMEM; 7830 err = -ENOMEM;
7908 goto out; 7831 goto out;
7909 } 7832 }
7910 SET_IEEE80211_DEV(hw, &pdev->dev);
7911 7833
7912 hw->rate_control_algorithm = "iwl-3945-rs"; 7834 SET_IEEE80211_DEV(hw, &pdev->dev);
7913 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
7914 7835
7915 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
7916 priv = hw->priv; 7836 priv = hw->priv;
7917 priv->hw = hw; 7837 priv->hw = hw;
7918
7919 priv->pci_dev = pdev; 7838 priv->pci_dev = pdev;
7920 priv->cfg = cfg; 7839 priv->cfg = cfg;
7921 7840
7841 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
7842 hw->rate_control_algorithm = "iwl-3945-rs";
7843 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
7844
7922 /* Select antenna (may be helpful if only one antenna is connected) */ 7845 /* Select antenna (may be helpful if only one antenna is connected) */
7923 priv->antenna = (enum iwl3945_antenna)iwl3945_param_antenna; 7846 priv->antenna = (enum iwl3945_antenna)iwl3945_param_antenna;
7924#ifdef CONFIG_IWL3945_DEBUG 7847#ifdef CONFIG_IWL3945_DEBUG
7925 iwl3945_debug_level = iwl3945_param_debug; 7848 iwl3945_debug_level = iwl3945_param_debug;
7926 atomic_set(&priv->restrict_refcnt, 0); 7849 atomic_set(&priv->restrict_refcnt, 0);
7927#endif 7850#endif
7928 priv->retry_rate = 1;
7929
7930 priv->ibss_beacon = NULL;
7931 7851
7932 /* Tell mac80211 our characteristics */ 7852 /* Tell mac80211 our characteristics */
7933 hw->flags = IEEE80211_HW_SIGNAL_DBM | 7853 hw->flags = IEEE80211_HW_SIGNAL_DBM |
7934 IEEE80211_HW_NOISE_DBM; 7854 IEEE80211_HW_NOISE_DBM;
7935 7855
7936 hw->wiphy->interface_modes = 7856 hw->wiphy->interface_modes =
7937 BIT(NL80211_IFTYPE_AP) |
7938 BIT(NL80211_IFTYPE_STATION) | 7857 BIT(NL80211_IFTYPE_STATION) |
7939 BIT(NL80211_IFTYPE_ADHOC); 7858 BIT(NL80211_IFTYPE_ADHOC);
7940 7859
7860 hw->wiphy->fw_handles_regulatory = true;
7861
7941 /* 4 EDCA QOS priorities */ 7862 /* 4 EDCA QOS priorities */
7942 hw->queues = 4; 7863 hw->queues = 4;
7943 7864
7944 spin_lock_init(&priv->lock); 7865 /***************************
7945 spin_lock_init(&priv->power_data.lock); 7866 * 2. Initializing PCI bus
7946 spin_lock_init(&priv->sta_lock); 7867 * *************************/
7947 spin_lock_init(&priv->hcmd_lock);
7948
7949 INIT_LIST_HEAD(&priv->free_frames);
7950
7951 mutex_init(&priv->mutex);
7952 if (pci_enable_device(pdev)) { 7868 if (pci_enable_device(pdev)) {
7953 err = -ENODEV; 7869 err = -ENODEV;
7954 goto out_ieee80211_free_hw; 7870 goto out_ieee80211_free_hw;
@@ -7956,14 +7872,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7956 7872
7957 pci_set_master(pdev); 7873 pci_set_master(pdev);
7958 7874
7959 /* Clear the driver's (not device's) station table */
7960 iwl3945_clear_stations_table(priv);
7961
7962 priv->data_retry_limit = -1;
7963 priv->ieee_channels = NULL;
7964 priv->ieee_rates = NULL;
7965 priv->band = IEEE80211_BAND_2GHZ;
7966
7967 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 7875 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
7968 if (!err) 7876 if (!err)
7969 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 7877 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
@@ -7977,10 +7885,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7977 if (err) 7885 if (err)
7978 goto out_pci_disable_device; 7886 goto out_pci_disable_device;
7979 7887
7980 /* We disable the RETRY_TIMEOUT register (0x41) to keep 7888 /***********************
7981 * PCI Tx retries from interfering with C3 CPU state */ 7889 * 3. Read REV Register
7982 pci_write_config_byte(pdev, 0x41, 0x00); 7890 * ********************/
7983
7984 priv->hw_base = pci_iomap(pdev, 0, 0); 7891 priv->hw_base = pci_iomap(pdev, 0, 0);
7985 if (!priv->hw_base) { 7892 if (!priv->hw_base) {
7986 err = -ENODEV; 7893 err = -ENODEV;
@@ -7991,97 +7898,144 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
7991 (unsigned long long) pci_resource_len(pdev, 0)); 7898 (unsigned long long) pci_resource_len(pdev, 0));
7992 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); 7899 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
7993 7900
7994 /* Initialize module parameter values here */ 7901 /* We disable the RETRY_TIMEOUT register (0x41) to keep
7902 * PCI Tx retries from interfering with C3 CPU state */
7903 pci_write_config_byte(pdev, 0x41, 0x00);
7995 7904
7996 /* Disable radio (SW RF KILL) via parameter when loading driver */ 7905 /* nic init */
7997 if (iwl3945_param_disable) { 7906 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS,
7998 set_bit(STATUS_RF_KILL_SW, &priv->status); 7907 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
7999 IWL_DEBUG_INFO("Radio disabled.\n");
8000 }
8001 7908
8002 priv->iw_mode = NL80211_IFTYPE_STATION; 7909 iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7910 err = iwl3945_poll_direct_bit(priv, CSR_GP_CNTRL,
7911 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
7912 if (err < 0) {
7913 IWL_DEBUG_INFO("Failed to init the card\n");
7914 goto out_remove_sysfs;
7915 }
8003 7916
8004 printk(KERN_INFO DRV_NAME 7917 /***********************
8005 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name); 7918 * 4. Read EEPROM
7919 * ********************/
7920 /* Read the EEPROM */
7921 err = iwl3945_eeprom_init(priv);
7922 if (err) {
7923 IWL_ERROR("Unable to init EEPROM\n");
7924 goto out_remove_sysfs;
7925 }
7926 /* MAC Address location in EEPROM same for 3945/4965 */
7927 get_eeprom_mac(priv, priv->mac_addr);
7928 IWL_DEBUG_INFO("MAC address: %pM\n", priv->mac_addr);
7929 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
8006 7930
7931 /***********************
7932 * 5. Setup HW Constants
7933 * ********************/
8007 /* Device-specific setup */ 7934 /* Device-specific setup */
8008 if (iwl3945_hw_set_hw_setting(priv)) { 7935 if (iwl3945_hw_set_hw_setting(priv)) {
8009 IWL_ERROR("failed to set hw settings\n"); 7936 IWL_ERROR("failed to set hw settings\n");
8010 goto out_iounmap; 7937 goto out_iounmap;
8011 } 7938 }
8012 7939
8013 if (iwl3945_param_qos_enable) 7940 /***********************
8014 priv->qos_data.qos_enable = 1; 7941 * 6. Setup priv
7942 * ********************/
7943 priv->retry_rate = 1;
7944 priv->ibss_beacon = NULL;
7945
7946 spin_lock_init(&priv->lock);
7947 spin_lock_init(&priv->power_data.lock);
7948 spin_lock_init(&priv->sta_lock);
7949 spin_lock_init(&priv->hcmd_lock);
7950
7951 INIT_LIST_HEAD(&priv->free_frames);
7952 mutex_init(&priv->mutex);
7953
7954 /* Clear the driver's (not device's) station table */
7955 iwl3945_clear_stations_table(priv);
7956
7957 priv->data_retry_limit = -1;
7958 priv->ieee_channels = NULL;
7959 priv->ieee_rates = NULL;
7960 priv->band = IEEE80211_BAND_2GHZ;
7961
7962 priv->iw_mode = NL80211_IFTYPE_STATION;
8015 7963
8016 iwl3945_reset_qos(priv); 7964 iwl3945_reset_qos(priv);
8017 7965
8018 priv->qos_data.qos_active = 0; 7966 priv->qos_data.qos_active = 0;
8019 priv->qos_data.qos_cap.val = 0; 7967 priv->qos_data.qos_cap.val = 0;
8020 7968
8021 iwl3945_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
8022 iwl3945_setup_deferred_work(priv);
8023 iwl3945_setup_rx_handlers(priv);
8024 7969
8025 priv->rates_mask = IWL_RATES_MASK; 7970 priv->rates_mask = IWL_RATES_MASK;
8026 /* If power management is turned on, default to AC mode */ 7971 /* If power management is turned on, default to AC mode */
8027 priv->power_mode = IWL_POWER_AC; 7972 priv->power_mode = IWL_POWER_AC;
8028 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER; 7973 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
8029 7974
8030 spin_lock_irqsave(&priv->lock, flags); 7975 err = iwl3945_init_channel_map(priv);
8031 iwl3945_disable_interrupts(priv);
8032 spin_unlock_irqrestore(&priv->lock, flags);
8033
8034 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
8035 if (err) { 7976 if (err) {
8036 IWL_ERROR("failed to create sysfs device attributes\n"); 7977 IWL_ERROR("initializing regulatory failed: %d\n", err);
8037 goto out_release_irq; 7978 goto out_release_irq;
8038 } 7979 }
8039 7980
8040 /* nic init */ 7981 err = iwl3945_init_geos(priv);
8041 iwl3945_set_bit(priv, CSR_GIO_CHICKEN_BITS,
8042 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
8043
8044 iwl3945_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
8045 err = iwl3945_poll_bit(priv, CSR_GP_CNTRL,
8046 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
8047 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
8048 if (err < 0) {
8049 IWL_DEBUG_INFO("Failed to init the card\n");
8050 goto out_remove_sysfs;
8051 }
8052 /* Read the EEPROM */
8053 err = iwl3945_eeprom_init(priv);
8054 if (err) { 7982 if (err) {
8055 IWL_ERROR("Unable to init EEPROM\n"); 7983 IWL_ERROR("initializing geos failed: %d\n", err);
8056 goto out_remove_sysfs; 7984 goto out_free_channel_map;
8057 } 7985 }
8058 /* MAC Address location in EEPROM same for 3945/4965 */
8059 get_eeprom_mac(priv, priv->mac_addr);
8060 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr));
8061 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
8062 7986
8063 err = iwl3945_init_channel_map(priv); 7987 printk(KERN_INFO DRV_NAME
8064 if (err) { 7988 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name);
8065 IWL_ERROR("initializing regulatory failed: %d\n", err); 7989
8066 goto out_remove_sysfs; 7990 /***********************************
7991 * 7. Initialize Module Parameters
7992 * **********************************/
7993
7994 /* Initialize module parameter values here */
7995 /* Disable radio (SW RF KILL) via parameter when loading driver */
7996 if (iwl3945_param_disable) {
7997 set_bit(STATUS_RF_KILL_SW, &priv->status);
7998 IWL_DEBUG_INFO("Radio disabled.\n");
8067 } 7999 }
8068 8000
8069 err = iwl3945_init_geos(priv); 8001
8002 /***********************
8003 * 8. Setup Services
8004 * ********************/
8005
8006 spin_lock_irqsave(&priv->lock, flags);
8007 iwl3945_disable_interrupts(priv);
8008 spin_unlock_irqrestore(&priv->lock, flags);
8009
8010 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
8070 if (err) { 8011 if (err) {
8071 IWL_ERROR("initializing geos failed: %d\n", err); 8012 IWL_ERROR("failed to create sysfs device attributes\n");
8072 goto out_free_channel_map; 8013 goto out_free_geos;
8073 } 8014 }
8074 8015
8016 iwl3945_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
8017 iwl3945_setup_deferred_work(priv);
8018 iwl3945_setup_rx_handlers(priv);
8019
8020 /***********************
8021 * 9. Conclude
8022 * ********************/
8023 pci_save_state(pdev);
8024 pci_disable_device(pdev);
8025
8026 /*********************************
8027 * 10. Setup and Register mac80211
8028 * *******************************/
8029
8075 err = ieee80211_register_hw(priv->hw); 8030 err = ieee80211_register_hw(priv->hw);
8076 if (err) { 8031 if (err) {
8077 IWL_ERROR("Failed to register network device (error %d)\n", err); 8032 IWL_ERROR("Failed to register network device (error %d)\n", err);
8078 goto out_free_geos; 8033 goto out_remove_sysfs;
8079 } 8034 }
8080 8035
8081 priv->hw->conf.beacon_int = 100; 8036 priv->hw->conf.beacon_int = 100;
8082 priv->mac80211_registered = 1; 8037 priv->mac80211_registered = 1;
8083 pci_save_state(pdev); 8038
8084 pci_disable_device(pdev);
8085 8039
8086 err = iwl3945_rfkill_init(priv); 8040 err = iwl3945_rfkill_init(priv);
8087 if (err) 8041 if (err)
@@ -8090,12 +8044,13 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8090 8044
8091 return 0; 8045 return 0;
8092 8046
8047 out_remove_sysfs:
8048 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
8093 out_free_geos: 8049 out_free_geos:
8094 iwl3945_free_geos(priv); 8050 iwl3945_free_geos(priv);
8095 out_free_channel_map: 8051 out_free_channel_map:
8096 iwl3945_free_channel_map(priv); 8052 iwl3945_free_channel_map(priv);
8097 out_remove_sysfs: 8053
8098 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
8099 8054
8100 out_release_irq: 8055 out_release_irq:
8101 destroy_workqueue(priv->workqueue); 8056 destroy_workqueue(priv->workqueue);
@@ -8222,7 +8177,7 @@ static int iwl3945_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
8222 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 8177 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
8223 return 0; 8178 return 0;
8224 8179
8225 IWL_DEBUG_RF_KILL("we recieved soft RFKILL set to state %d\n", state); 8180 IWL_DEBUG_RF_KILL("we received soft RFKILL set to state %d\n", state);
8226 mutex_lock(&priv->mutex); 8181 mutex_lock(&priv->mutex);
8227 8182
8228 switch (state) { 8183 switch (state) {
@@ -8237,7 +8192,7 @@ static int iwl3945_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
8237 iwl3945_radio_kill_sw(priv, 1); 8192 iwl3945_radio_kill_sw(priv, 1);
8238 break; 8193 break;
8239 default: 8194 default:
8240 IWL_WARNING("we recieved unexpected RFKILL state %d\n", state); 8195 IWL_WARNING("we received unexpected RFKILL state %d\n", state);
8241 break; 8196 break;
8242 } 8197 }
8243out_unlock: 8198out_unlock:
@@ -8379,7 +8334,7 @@ static void __exit iwl3945_exit(void)
8379 iwl3945_rate_control_unregister(); 8334 iwl3945_rate_control_unregister();
8380} 8335}
8381 8336
8382MODULE_FIRMWARE("iwlwifi-3945" IWL3945_UCODE_API ".ucode"); 8337MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
8383 8338
8384module_param_named(antenna, iwl3945_param_antenna, int, 0444); 8339module_param_named(antenna, iwl3945_param_antenna, int, 0444);
8385MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 8340MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
@@ -8388,7 +8343,7 @@ MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
8388module_param_named(hwcrypto, iwl3945_param_hwcrypto, int, 0444); 8343module_param_named(hwcrypto, iwl3945_param_hwcrypto, int, 0444);
8389MODULE_PARM_DESC(hwcrypto, 8344MODULE_PARM_DESC(hwcrypto,
8390 "using hardware crypto engine (default 0 [software])\n"); 8345 "using hardware crypto engine (default 0 [software])\n");
8391module_param_named(debug, iwl3945_param_debug, int, 0444); 8346module_param_named(debug, iwl3945_param_debug, uint, 0444);
8392MODULE_PARM_DESC(debug, "debug output mask"); 8347MODULE_PARM_DESC(debug, "debug output mask");
8393module_param_named(disable_hw_scan, iwl3945_param_disable_hw_scan, int, 0444); 8348module_param_named(disable_hw_scan, iwl3945_param_disable_hw_scan, int, 0444);
8394MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); 8349MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
@@ -8396,9 +8351,5 @@ MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
8396module_param_named(queues_num, iwl3945_param_queues_num, int, 0444); 8351module_param_named(queues_num, iwl3945_param_queues_num, int, 0444);
8397MODULE_PARM_DESC(queues_num, "number of hw queues."); 8352MODULE_PARM_DESC(queues_num, "number of hw queues.");
8398 8353
8399/* QoS */
8400module_param_named(qos_enable, iwl3945_param_qos_enable, int, 0444);
8401MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
8402
8403module_exit(iwl3945_exit); 8354module_exit(iwl3945_exit);
8404module_init(iwl3945_init); 8355module_init(iwl3945_init);
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 92be60415d04..a0e440cd8967 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -1,6 +1,10 @@
1/* Copyright (C) 2006, Red Hat, Inc. */ 1/* Copyright (C) 2006, Red Hat, Inc. */
2 2
3#include <linux/types.h>
3#include <linux/etherdevice.h> 4#include <linux/etherdevice.h>
5#include <linux/ieee80211.h>
6#include <linux/if_arp.h>
7#include <net/lib80211.h>
4 8
5#include "assoc.h" 9#include "assoc.h"
6#include "decl.h" 10#include "decl.h"
@@ -151,18 +155,18 @@ static int lbs_adhoc_join(struct lbs_private *priv,
151 struct cmd_ds_802_11_ad_hoc_join cmd; 155 struct cmd_ds_802_11_ad_hoc_join cmd;
152 struct bss_descriptor *bss = &assoc_req->bss; 156 struct bss_descriptor *bss = &assoc_req->bss;
153 u8 preamble = RADIO_PREAMBLE_LONG; 157 u8 preamble = RADIO_PREAMBLE_LONG;
154 DECLARE_MAC_BUF(mac); 158 DECLARE_SSID_BUF(ssid);
155 u16 ratesize = 0; 159 u16 ratesize = 0;
156 int ret = 0; 160 int ret = 0;
157 161
158 lbs_deb_enter(LBS_DEB_ASSOC); 162 lbs_deb_enter(LBS_DEB_ASSOC);
159 163
160 lbs_deb_join("current SSID '%s', ssid length %u\n", 164 lbs_deb_join("current SSID '%s', ssid length %u\n",
161 escape_essid(priv->curbssparams.ssid, 165 print_ssid(ssid, priv->curbssparams.ssid,
162 priv->curbssparams.ssid_len), 166 priv->curbssparams.ssid_len),
163 priv->curbssparams.ssid_len); 167 priv->curbssparams.ssid_len);
164 lbs_deb_join("requested ssid '%s', ssid length %u\n", 168 lbs_deb_join("requested ssid '%s', ssid length %u\n",
165 escape_essid(bss->ssid, bss->ssid_len), 169 print_ssid(ssid, bss->ssid, bss->ssid_len),
166 bss->ssid_len); 170 bss->ssid_len);
167 171
168 /* check if the requested SSID is already joined */ 172 /* check if the requested SSID is already joined */
@@ -226,8 +230,8 @@ static int lbs_adhoc_join(struct lbs_private *priv,
226 bss->capability, CAPINFO_MASK); 230 bss->capability, CAPINFO_MASK);
227 231
228 /* information on BSSID descriptor passed to FW */ 232 /* information on BSSID descriptor passed to FW */
229 lbs_deb_join("ADHOC_J_CMD: BSSID = %s, SSID = '%s'\n", 233 lbs_deb_join("ADHOC_J_CMD: BSSID = %pM, SSID = '%s'\n",
230 print_mac(mac, cmd.bss.bssid), cmd.bss.ssid); 234 cmd.bss.bssid, cmd.bss.ssid);
231 235
232 /* Only v8 and below support setting these */ 236 /* Only v8 and below support setting these */
233 if (priv->fwrelease < 0x09000000) { 237 if (priv->fwrelease < 0x09000000) {
@@ -307,6 +311,7 @@ static int lbs_adhoc_start(struct lbs_private *priv,
307 size_t ratesize = 0; 311 size_t ratesize = 0;
308 u16 tmpcap = 0; 312 u16 tmpcap = 0;
309 int ret = 0; 313 int ret = 0;
314 DECLARE_SSID_BUF(ssid);
310 315
311 lbs_deb_enter(LBS_DEB_ASSOC); 316 lbs_deb_enter(LBS_DEB_ASSOC);
312 317
@@ -326,7 +331,7 @@ static int lbs_adhoc_start(struct lbs_private *priv,
326 memcpy(cmd.ssid, assoc_req->ssid, assoc_req->ssid_len); 331 memcpy(cmd.ssid, assoc_req->ssid, assoc_req->ssid_len);
327 332
328 lbs_deb_join("ADHOC_START: SSID '%s', ssid length %u\n", 333 lbs_deb_join("ADHOC_START: SSID '%s', ssid length %u\n",
329 escape_essid(assoc_req->ssid, assoc_req->ssid_len), 334 print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len),
330 assoc_req->ssid_len); 335 assoc_req->ssid_len);
331 336
332 cmd.bsstype = CMD_BSS_TYPE_IBSS; 337 cmd.bsstype = CMD_BSS_TYPE_IBSS;
@@ -338,12 +343,12 @@ static int lbs_adhoc_start(struct lbs_private *priv,
338 WARN_ON(!assoc_req->channel); 343 WARN_ON(!assoc_req->channel);
339 344
340 /* set Physical parameter set */ 345 /* set Physical parameter set */
341 cmd.phyparamset.dsparamset.elementid = MFIE_TYPE_DS_SET; 346 cmd.phyparamset.dsparamset.elementid = WLAN_EID_DS_PARAMS;
342 cmd.phyparamset.dsparamset.len = 1; 347 cmd.phyparamset.dsparamset.len = 1;
343 cmd.phyparamset.dsparamset.currentchan = assoc_req->channel; 348 cmd.phyparamset.dsparamset.currentchan = assoc_req->channel;
344 349
345 /* set IBSS parameter set */ 350 /* set IBSS parameter set */
346 cmd.ssparamset.ibssparamset.elementid = MFIE_TYPE_IBSS_SET; 351 cmd.ssparamset.ibssparamset.elementid = WLAN_EID_IBSS_PARAMS;
347 cmd.ssparamset.ibssparamset.len = 2; 352 cmd.ssparamset.ibssparamset.len = 2;
348 cmd.ssparamset.ibssparamset.atimwindow = 0; 353 cmd.ssparamset.ibssparamset.atimwindow = 0;
349 354
@@ -427,8 +432,8 @@ static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
427{ 432{
428 if (!secinfo->wep_enabled && !secinfo->WPAenabled 433 if (!secinfo->wep_enabled && !secinfo->WPAenabled
429 && !secinfo->WPA2enabled 434 && !secinfo->WPA2enabled
430 && match_bss->wpa_ie[0] != MFIE_TYPE_GENERIC 435 && match_bss->wpa_ie[0] != WLAN_EID_GENERIC
431 && match_bss->rsn_ie[0] != MFIE_TYPE_RSN 436 && match_bss->rsn_ie[0] != WLAN_EID_RSN
432 && !(match_bss->capability & WLAN_CAPABILITY_PRIVACY)) 437 && !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
433 return 1; 438 return 1;
434 else 439 else
@@ -450,7 +455,7 @@ static inline int match_bss_wpa(struct lbs_802_11_security *secinfo,
450 struct bss_descriptor *match_bss) 455 struct bss_descriptor *match_bss)
451{ 456{
452 if (!secinfo->wep_enabled && secinfo->WPAenabled 457 if (!secinfo->wep_enabled && secinfo->WPAenabled
453 && (match_bss->wpa_ie[0] == MFIE_TYPE_GENERIC) 458 && (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
454 /* privacy bit may NOT be set in some APs like LinkSys WRT54G 459 /* privacy bit may NOT be set in some APs like LinkSys WRT54G
455 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */ 460 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
456 ) 461 )
@@ -463,7 +468,7 @@ static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo,
463 struct bss_descriptor *match_bss) 468 struct bss_descriptor *match_bss)
464{ 469{
465 if (!secinfo->wep_enabled && secinfo->WPA2enabled && 470 if (!secinfo->wep_enabled && secinfo->WPA2enabled &&
466 (match_bss->rsn_ie[0] == MFIE_TYPE_RSN) 471 (match_bss->rsn_ie[0] == WLAN_EID_RSN)
467 /* privacy bit may NOT be set in some APs like LinkSys WRT54G 472 /* privacy bit may NOT be set in some APs like LinkSys WRT54G
468 (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */ 473 (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
469 ) 474 )
@@ -477,8 +482,8 @@ static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo,
477{ 482{
478 if (!secinfo->wep_enabled && !secinfo->WPAenabled 483 if (!secinfo->wep_enabled && !secinfo->WPAenabled
479 && !secinfo->WPA2enabled 484 && !secinfo->WPA2enabled
480 && (match_bss->wpa_ie[0] != MFIE_TYPE_GENERIC) 485 && (match_bss->wpa_ie[0] != WLAN_EID_GENERIC)
481 && (match_bss->rsn_ie[0] != MFIE_TYPE_RSN) 486 && (match_bss->rsn_ie[0] != WLAN_EID_RSN)
482 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) 487 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
483 return 1; 488 return 1;
484 else 489 else
@@ -694,6 +699,7 @@ static int assoc_helper_essid(struct lbs_private *priv,
694 int ret = 0; 699 int ret = 0;
695 struct bss_descriptor * bss; 700 struct bss_descriptor * bss;
696 int channel = -1; 701 int channel = -1;
702 DECLARE_SSID_BUF(ssid);
697 703
698 lbs_deb_enter(LBS_DEB_ASSOC); 704 lbs_deb_enter(LBS_DEB_ASSOC);
699 705
@@ -705,7 +711,7 @@ static int assoc_helper_essid(struct lbs_private *priv,
705 channel = assoc_req->channel; 711 channel = assoc_req->channel;
706 712
707 lbs_deb_assoc("SSID '%s' requested\n", 713 lbs_deb_assoc("SSID '%s' requested\n",
708 escape_essid(assoc_req->ssid, assoc_req->ssid_len)); 714 print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len));
709 if (assoc_req->mode == IW_MODE_INFRA) { 715 if (assoc_req->mode == IW_MODE_INFRA) {
710 lbs_send_specific_ssid_scan(priv, assoc_req->ssid, 716 lbs_send_specific_ssid_scan(priv, assoc_req->ssid,
711 assoc_req->ssid_len); 717 assoc_req->ssid_len);
@@ -752,17 +758,15 @@ static int assoc_helper_bssid(struct lbs_private *priv,
752{ 758{
753 int ret = 0; 759 int ret = 0;
754 struct bss_descriptor * bss; 760 struct bss_descriptor * bss;
755 DECLARE_MAC_BUF(mac);
756 761
757 lbs_deb_enter_args(LBS_DEB_ASSOC, "BSSID %s", 762 lbs_deb_enter_args(LBS_DEB_ASSOC, "BSSID %pM", assoc_req->bssid);
758 print_mac(mac, assoc_req->bssid));
759 763
760 /* Search for index position in list for requested MAC */ 764 /* Search for index position in list for requested MAC */
761 bss = lbs_find_bssid_in_list(priv, assoc_req->bssid, 765 bss = lbs_find_bssid_in_list(priv, assoc_req->bssid,
762 assoc_req->mode); 766 assoc_req->mode);
763 if (bss == NULL) { 767 if (bss == NULL) {
764 lbs_deb_assoc("ASSOC: WAP: BSSID %s not found, " 768 lbs_deb_assoc("ASSOC: WAP: BSSID %pM not found, "
765 "cannot associate.\n", print_mac(mac, assoc_req->bssid)); 769 "cannot associate.\n", assoc_req->bssid);
766 goto out; 770 goto out;
767 } 771 }
768 772
@@ -1208,7 +1212,7 @@ void lbs_association_worker(struct work_struct *work)
1208 struct assoc_request * assoc_req = NULL; 1212 struct assoc_request * assoc_req = NULL;
1209 int ret = 0; 1213 int ret = 0;
1210 int find_any_ssid = 0; 1214 int find_any_ssid = 0;
1211 DECLARE_MAC_BUF(mac); 1215 DECLARE_SSID_BUF(ssid);
1212 1216
1213 lbs_deb_enter(LBS_DEB_ASSOC); 1217 lbs_deb_enter(LBS_DEB_ASSOC);
1214 1218
@@ -1228,13 +1232,13 @@ void lbs_association_worker(struct work_struct *work)
1228 " chann: %d\n" 1232 " chann: %d\n"
1229 " band: %d\n" 1233 " band: %d\n"
1230 " mode: %d\n" 1234 " mode: %d\n"
1231 " BSSID: %s\n" 1235 " BSSID: %pM\n"
1232 " secinfo: %s%s%s\n" 1236 " secinfo: %s%s%s\n"
1233 " auth_mode: %d\n", 1237 " auth_mode: %d\n",
1234 assoc_req->flags, 1238 assoc_req->flags,
1235 escape_essid(assoc_req->ssid, assoc_req->ssid_len), 1239 print_ssid(ssid, assoc_req->ssid, assoc_req->ssid_len),
1236 assoc_req->channel, assoc_req->band, assoc_req->mode, 1240 assoc_req->channel, assoc_req->band, assoc_req->mode,
1237 print_mac(mac, assoc_req->bssid), 1241 assoc_req->bssid,
1238 assoc_req->secinfo.WPAenabled ? " WPA" : "", 1242 assoc_req->secinfo.WPAenabled ? " WPA" : "",
1239 assoc_req->secinfo.WPA2enabled ? " WPA2" : "", 1243 assoc_req->secinfo.WPA2enabled ? " WPA2" : "",
1240 assoc_req->secinfo.wep_enabled ? " WEP" : "", 1244 assoc_req->secinfo.wep_enabled ? " WEP" : "",
@@ -1357,8 +1361,8 @@ void lbs_association_worker(struct work_struct *work)
1357 } 1361 }
1358 1362
1359 if (success) { 1363 if (success) {
1360 lbs_deb_assoc("associated to %s\n", 1364 lbs_deb_assoc("associated to %pM\n",
1361 print_mac(mac, priv->curbssparams.bssid)); 1365 priv->curbssparams.bssid);
1362 lbs_prepare_and_send_command(priv, 1366 lbs_prepare_and_send_command(priv,
1363 CMD_802_11_RSSI, 1367 CMD_802_11_RSSI,
1364 0, CMD_OPTION_WAITFORRSP, 0, NULL); 1368 0, CMD_OPTION_WAITFORRSP, 0, NULL);
@@ -1478,7 +1482,6 @@ int lbs_cmd_80211_authenticate(struct lbs_private *priv,
1478 struct cmd_ds_802_11_authenticate *pauthenticate = &cmd->params.auth; 1482 struct cmd_ds_802_11_authenticate *pauthenticate = &cmd->params.auth;
1479 int ret = -1; 1483 int ret = -1;
1480 u8 *bssid = pdata_buf; 1484 u8 *bssid = pdata_buf;
1481 DECLARE_MAC_BUF(mac);
1482 1485
1483 lbs_deb_enter(LBS_DEB_JOIN); 1486 lbs_deb_enter(LBS_DEB_JOIN);
1484 1487
@@ -1505,8 +1508,8 @@ int lbs_cmd_80211_authenticate(struct lbs_private *priv,
1505 1508
1506 memcpy(pauthenticate->macaddr, bssid, ETH_ALEN); 1509 memcpy(pauthenticate->macaddr, bssid, ETH_ALEN);
1507 1510
1508 lbs_deb_join("AUTH_CMD: BSSID %s, auth 0x%x\n", 1511 lbs_deb_join("AUTH_CMD: BSSID %pM, auth 0x%x\n",
1509 print_mac(mac, bssid), pauthenticate->authtype); 1512 bssid, pauthenticate->authtype);
1510 ret = 0; 1513 ret = 0;
1511 1514
1512out: 1515out:
@@ -1770,7 +1773,7 @@ static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp)
1770 struct cmd_ds_802_11_ad_hoc_result *adhoc_resp; 1773 struct cmd_ds_802_11_ad_hoc_result *adhoc_resp;
1771 union iwreq_data wrqu; 1774 union iwreq_data wrqu;
1772 struct bss_descriptor *bss; 1775 struct bss_descriptor *bss;
1773 DECLARE_MAC_BUF(mac); 1776 DECLARE_SSID_BUF(ssid);
1774 1777
1775 lbs_deb_enter(LBS_DEB_JOIN); 1778 lbs_deb_enter(LBS_DEB_JOIN);
1776 1779
@@ -1819,9 +1822,9 @@ static int lbs_adhoc_post(struct lbs_private *priv, struct cmd_header *resp)
1819 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1822 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1820 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); 1823 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
1821 1824
1822 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %s, channel %d\n", 1825 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
1823 escape_essid(bss->ssid, bss->ssid_len), 1826 print_ssid(ssid, bss->ssid, bss->ssid_len),
1824 print_mac(mac, priv->curbssparams.bssid), 1827 priv->curbssparams.bssid,
1825 priv->curbssparams.channel); 1828 priv->curbssparams.channel);
1826 1829
1827done: 1830done:
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 8265c7d25edc..639dd02d3d31 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -4,7 +4,7 @@
4 */ 4 */
5 5
6#include <net/iw_handler.h> 6#include <net/iw_handler.h>
7#include <net/ieee80211.h> 7#include <net/lib80211.h>
8#include <linux/kfifo.h> 8#include <linux/kfifo.h>
9#include "host.h" 9#include "host.h"
10#include "hostcmd.h" 10#include "hostcmd.h"
@@ -87,7 +87,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
87 struct cmd_ds_get_hw_spec cmd; 87 struct cmd_ds_get_hw_spec cmd;
88 int ret = -1; 88 int ret = -1;
89 u32 i; 89 u32 i;
90 DECLARE_MAC_BUF(mac);
91 90
92 lbs_deb_enter(LBS_DEB_CMD); 91 lbs_deb_enter(LBS_DEB_CMD);
93 92
@@ -110,8 +109,8 @@ int lbs_update_hw_spec(struct lbs_private *priv)
110 * CF card firmware 5.0.16p0: cap 0x00000303 109 * CF card firmware 5.0.16p0: cap 0x00000303
111 * USB dongle firmware 5.110.17p2: cap 0x00000303 110 * USB dongle firmware 5.110.17p2: cap 0x00000303
112 */ 111 */
113 lbs_pr_info("%s, fw %u.%u.%up%u, cap 0x%08x\n", 112 lbs_pr_info("%pM, fw %u.%u.%up%u, cap 0x%08x\n",
114 print_mac(mac, cmd.permanentaddr), 113 cmd.permanentaddr,
115 priv->fwrelease >> 24 & 0xff, 114 priv->fwrelease >> 24 & 0xff,
116 priv->fwrelease >> 16 & 0xff, 115 priv->fwrelease >> 16 & 0xff,
117 priv->fwrelease >> 8 & 0xff, 116 priv->fwrelease >> 8 & 0xff,
@@ -160,7 +159,8 @@ out:
160 return ret; 159 return ret;
161} 160}
162 161
163int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria) 162int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
163 struct wol_config *p_wol_config)
164{ 164{
165 struct cmd_ds_host_sleep cmd_config; 165 struct cmd_ds_host_sleep cmd_config;
166 int ret; 166 int ret;
@@ -170,10 +170,21 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria)
170 cmd_config.gpio = priv->wol_gpio; 170 cmd_config.gpio = priv->wol_gpio;
171 cmd_config.gap = priv->wol_gap; 171 cmd_config.gap = priv->wol_gap;
172 172
173 if (p_wol_config != NULL)
174 memcpy((uint8_t *)&cmd_config.wol_conf, (uint8_t *)p_wol_config,
175 sizeof(struct wol_config));
176 else
177 cmd_config.wol_conf.action = CMD_ACT_ACTION_NONE;
178
173 ret = lbs_cmd_with_response(priv, CMD_802_11_HOST_SLEEP_CFG, &cmd_config); 179 ret = lbs_cmd_with_response(priv, CMD_802_11_HOST_SLEEP_CFG, &cmd_config);
174 if (!ret) { 180 if (!ret) {
175 lbs_deb_cmd("Set WOL criteria to %x\n", criteria); 181 if (criteria) {
176 priv->wol_criteria = criteria; 182 lbs_deb_cmd("Set WOL criteria to %x\n", criteria);
183 priv->wol_criteria = criteria;
184 } else
185 memcpy((uint8_t *) p_wol_config,
186 (uint8_t *)&cmd_config.wol_conf,
187 sizeof(struct wol_config));
177 } else { 188 } else {
178 lbs_pr_info("HOST_SLEEP_CFG failed %d\n", ret); 189 lbs_pr_info("HOST_SLEEP_CFG failed %d\n", ret);
179 } 190 }
@@ -1063,6 +1074,7 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1063{ 1074{
1064 struct cmd_ds_mesh_config cmd; 1075 struct cmd_ds_mesh_config cmd;
1065 struct mrvl_meshie *ie; 1076 struct mrvl_meshie *ie;
1077 DECLARE_SSID_BUF(ssid);
1066 1078
1067 memset(&cmd, 0, sizeof(cmd)); 1079 memset(&cmd, 0, sizeof(cmd));
1068 cmd.channel = cpu_to_le16(chan); 1080 cmd.channel = cpu_to_le16(chan);
@@ -1070,7 +1082,7 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1070 1082
1071 switch (action) { 1083 switch (action) {
1072 case CMD_ACT_MESH_CONFIG_START: 1084 case CMD_ACT_MESH_CONFIG_START:
1073 ie->hdr.id = MFIE_TYPE_GENERIC; 1085 ie->id = WLAN_EID_GENERIC;
1074 ie->val.oui[0] = 0x00; 1086 ie->val.oui[0] = 0x00;
1075 ie->val.oui[1] = 0x50; 1087 ie->val.oui[1] = 0x50;
1076 ie->val.oui[2] = 0x43; 1088 ie->val.oui[2] = 0x43;
@@ -1082,7 +1094,7 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1082 ie->val.mesh_capability = MARVELL_MESH_CAPABILITY; 1094 ie->val.mesh_capability = MARVELL_MESH_CAPABILITY;
1083 ie->val.mesh_id_len = priv->mesh_ssid_len; 1095 ie->val.mesh_id_len = priv->mesh_ssid_len;
1084 memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len); 1096 memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len);
1085 ie->hdr.len = sizeof(struct mrvl_meshie_val) - 1097 ie->len = sizeof(struct mrvl_meshie_val) -
1086 IW_ESSID_MAX_SIZE + priv->mesh_ssid_len; 1098 IW_ESSID_MAX_SIZE + priv->mesh_ssid_len;
1087 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val)); 1099 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val));
1088 break; 1100 break;
@@ -1093,7 +1105,7 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1093 } 1105 }
1094 lbs_deb_cmd("mesh config action %d type %x channel %d SSID %s\n", 1106 lbs_deb_cmd("mesh config action %d type %x channel %d SSID %s\n",
1095 action, priv->mesh_tlv, chan, 1107 action, priv->mesh_tlv, chan,
1096 escape_essid(priv->mesh_ssid, priv->mesh_ssid_len)); 1108 print_ssid(ssid, priv->mesh_ssid, priv->mesh_ssid_len));
1097 1109
1098 return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv); 1110 return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
1099} 1111}
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 36be4c9703e0..392e578ca095 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -56,7 +56,8 @@ int lbs_mesh_config_send(struct lbs_private *priv,
56 uint16_t action, uint16_t type); 56 uint16_t action, uint16_t type);
57int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan); 57int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
58 58
59int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria); 59int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
60 struct wol_config *p_wol_config);
60int lbs_suspend(struct lbs_private *priv); 61int lbs_suspend(struct lbs_private *priv);
61void lbs_resume(struct lbs_private *priv); 62void lbs_resume(struct lbs_private *priv);
62 63
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 0aa0ce3b2c42..ec4efd7ff3c8 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -5,6 +5,7 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <net/iw_handler.h> 7#include <net/iw_handler.h>
8#include <net/lib80211.h>
8 9
9#include "dev.h" 10#include "dev.h"
10#include "decl.h" 11#include "decl.h"
@@ -65,7 +66,7 @@ static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
65 int numscansdone = 0, res; 66 int numscansdone = 0, res;
66 unsigned long addr = get_zeroed_page(GFP_KERNEL); 67 unsigned long addr = get_zeroed_page(GFP_KERNEL);
67 char *buf = (char *)addr; 68 char *buf = (char *)addr;
68 DECLARE_MAC_BUF(mac); 69 DECLARE_SSID_BUF(ssid);
69 struct bss_descriptor * iter_bss; 70 struct bss_descriptor * iter_bss;
70 71
71 pos += snprintf(buf+pos, len-pos, 72 pos += snprintf(buf+pos, len-pos,
@@ -77,17 +78,17 @@ static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
77 u16 privacy = (iter_bss->capability & WLAN_CAPABILITY_PRIVACY); 78 u16 privacy = (iter_bss->capability & WLAN_CAPABILITY_PRIVACY);
78 u16 spectrum_mgmt = (iter_bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT); 79 u16 spectrum_mgmt = (iter_bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT);
79 80
80 pos += snprintf(buf+pos, len-pos, 81 pos += snprintf(buf+pos, len-pos, "%02u| %03d | %04d | %pM |",
81 "%02u| %03d | %04d | %s |",
82 numscansdone, iter_bss->channel, iter_bss->rssi, 82 numscansdone, iter_bss->channel, iter_bss->rssi,
83 print_mac(mac, iter_bss->bssid)); 83 iter_bss->bssid);
84 pos += snprintf(buf+pos, len-pos, " %04x-", iter_bss->capability); 84 pos += snprintf(buf+pos, len-pos, " %04x-", iter_bss->capability);
85 pos += snprintf(buf+pos, len-pos, "%c%c%c |", 85 pos += snprintf(buf+pos, len-pos, "%c%c%c |",
86 ibss ? 'A' : 'I', privacy ? 'P' : ' ', 86 ibss ? 'A' : 'I', privacy ? 'P' : ' ',
87 spectrum_mgmt ? 'S' : ' '); 87 spectrum_mgmt ? 'S' : ' ');
88 pos += snprintf(buf+pos, len-pos, " %04d |", SCAN_RSSI(iter_bss->rssi)); 88 pos += snprintf(buf+pos, len-pos, " %04d |", SCAN_RSSI(iter_bss->rssi));
89 pos += snprintf(buf+pos, len-pos, " %s\n", 89 pos += snprintf(buf+pos, len-pos, " %s\n",
90 escape_essid(iter_bss->ssid, iter_bss->ssid_len)); 90 print_ssid(ssid, iter_bss->ssid,
91 iter_bss->ssid_len));
91 92
92 numscansdone++; 93 numscansdone++;
93 } 94 }
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 1a8888cceadc..0b84bdca0726 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -74,8 +74,4 @@ void lbs_host_to_card_done(struct lbs_private *priv);
74 74
75int lbs_update_channel(struct lbs_private *priv); 75int lbs_update_channel(struct lbs_private *priv);
76 76
77#ifndef CONFIG_IEEE80211
78const char *escape_essid(const char *essid, u8 essid_len);
79#endif
80
81#endif 77#endif
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 076a636e8f62..c364e4c01d1b 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -79,7 +79,7 @@ do { if ((lbs_debug & (grp)) == (grp)) \
79#define lbs_deb_tx(fmt, args...) LBS_DEB_LL(LBS_DEB_TX, " tx", fmt, ##args) 79#define lbs_deb_tx(fmt, args...) LBS_DEB_LL(LBS_DEB_TX, " tx", fmt, ##args)
80#define lbs_deb_fw(fmt, args...) LBS_DEB_LL(LBS_DEB_FW, " fw", fmt, ##args) 80#define lbs_deb_fw(fmt, args...) LBS_DEB_LL(LBS_DEB_FW, " fw", fmt, ##args)
81#define lbs_deb_usb(fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usb", fmt, ##args) 81#define lbs_deb_usb(fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usb", fmt, ##args)
82#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usbd", "%s:" fmt, (dev)->bus_id, ##args) 82#define lbs_deb_usbd(dev, fmt, args...) LBS_DEB_LL(LBS_DEB_USB, " usbd", "%s:" fmt, dev_name(dev), ##args)
83#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, " cs", fmt, ##args) 83#define lbs_deb_cs(fmt, args...) LBS_DEB_LL(LBS_DEB_CS, " cs", fmt, ##args)
84#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args) 84#define lbs_deb_thread(fmt, args...) LBS_DEB_LL(LBS_DEB_THREAD, " thread", fmt, ##args)
85#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args) 85#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args)
@@ -149,6 +149,18 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
149#define EHS_WAKE_ON_MAC_EVENT 0x0004 149#define EHS_WAKE_ON_MAC_EVENT 0x0004
150#define EHS_WAKE_ON_MULTICAST_DATA 0x0008 150#define EHS_WAKE_ON_MULTICAST_DATA 0x0008
151#define EHS_REMOVE_WAKEUP 0xFFFFFFFF 151#define EHS_REMOVE_WAKEUP 0xFFFFFFFF
152/* Wake rules for Host_Sleep_CFG command */
153#define WOL_RULE_NET_TYPE_INFRA_OR_IBSS 0x00
154#define WOL_RULE_NET_TYPE_MESH 0x10
155#define WOL_RULE_ADDR_TYPE_BCAST 0x01
156#define WOL_RULE_ADDR_TYPE_MCAST 0x08
157#define WOL_RULE_ADDR_TYPE_UCAST 0x02
158#define WOL_RULE_OP_AND 0x01
159#define WOL_RULE_OP_OR 0x02
160#define WOL_RULE_OP_INVALID 0xFF
161#define WOL_RESULT_VALID_CMD 0
162#define WOL_RESULT_NOSPC_ERR 1
163#define WOL_RESULT_EEXIST_ERR 2
152 164
153/** Misc constants */ 165/** Misc constants */
154/* This section defines 802.11 specific contants */ 166/* This section defines 802.11 specific contants */
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index f6f3753da303..dd682c4cfde8 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -10,7 +10,6 @@
10#include <linux/wireless.h> 10#include <linux/wireless.h>
11#include <linux/ethtool.h> 11#include <linux/ethtool.h>
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <net/ieee80211.h>
14 13
15#include "defs.h" 14#include "defs.h"
16#include "hostcmd.h" 15#include "hostcmd.h"
@@ -278,6 +277,12 @@ struct lbs_private {
278 struct enc_key wpa_mcast_key; 277 struct enc_key wpa_mcast_key;
279 struct enc_key wpa_unicast_key; 278 struct enc_key wpa_unicast_key;
280 279
280/*
281 * In theory, the IE is limited to the IE length, 255,
282 * but in practice 64 bytes are enough.
283 */
284#define MAX_WPA_IE_LEN 64
285
281 /** WPA Information Elements*/ 286 /** WPA Information Elements*/
282 u8 wpa_ie[MAX_WPA_IE_LEN]; 287 u8 wpa_ie[MAX_WPA_IE_LEN];
283 u8 wpa_ie_len; 288 u8 wpa_ie_len;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 688d60de55cb..61d2f50470c8 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -23,7 +23,7 @@ static const char * mesh_stat_strings[]= {
23static void lbs_ethtool_get_drvinfo(struct net_device *dev, 23static void lbs_ethtool_get_drvinfo(struct net_device *dev,
24 struct ethtool_drvinfo *info) 24 struct ethtool_drvinfo *info)
25{ 25{
26 struct lbs_private *priv = (struct lbs_private *) dev->priv; 26 struct lbs_private *priv = netdev_priv(dev);
27 27
28 snprintf(info->fw_version, 32, "%u.%u.%u.p%u", 28 snprintf(info->fw_version, 32, "%u.%u.%u.p%u",
29 priv->fwrelease >> 24 & 0xff, 29 priv->fwrelease >> 24 & 0xff,
@@ -47,7 +47,7 @@ static int lbs_ethtool_get_eeprom_len(struct net_device *dev)
47static int lbs_ethtool_get_eeprom(struct net_device *dev, 47static int lbs_ethtool_get_eeprom(struct net_device *dev,
48 struct ethtool_eeprom *eeprom, u8 * bytes) 48 struct ethtool_eeprom *eeprom, u8 * bytes)
49{ 49{
50 struct lbs_private *priv = (struct lbs_private *) dev->priv; 50 struct lbs_private *priv = netdev_priv(dev);
51 struct cmd_ds_802_11_eeprom_access cmd; 51 struct cmd_ds_802_11_eeprom_access cmd;
52 int ret; 52 int ret;
53 53
@@ -76,7 +76,7 @@ out:
76static void lbs_ethtool_get_stats(struct net_device *dev, 76static void lbs_ethtool_get_stats(struct net_device *dev,
77 struct ethtool_stats *stats, uint64_t *data) 77 struct ethtool_stats *stats, uint64_t *data)
78{ 78{
79 struct lbs_private *priv = dev->priv; 79 struct lbs_private *priv = netdev_priv(dev);
80 struct cmd_ds_mesh_access mesh_access; 80 struct cmd_ds_mesh_access mesh_access;
81 int ret; 81 int ret;
82 82
@@ -113,7 +113,7 @@ static void lbs_ethtool_get_stats(struct net_device *dev,
113 113
114static int lbs_ethtool_get_sset_count(struct net_device *dev, int sset) 114static int lbs_ethtool_get_sset_count(struct net_device *dev, int sset)
115{ 115{
116 struct lbs_private *priv = dev->priv; 116 struct lbs_private *priv = netdev_priv(dev);
117 117
118 if (sset == ETH_SS_STATS && dev == priv->mesh_dev) 118 if (sset == ETH_SS_STATS && dev == priv->mesh_dev)
119 return MESH_STATS_NUM; 119 return MESH_STATS_NUM;
@@ -143,7 +143,7 @@ static void lbs_ethtool_get_strings(struct net_device *dev,
143static void lbs_ethtool_get_wol(struct net_device *dev, 143static void lbs_ethtool_get_wol(struct net_device *dev,
144 struct ethtool_wolinfo *wol) 144 struct ethtool_wolinfo *wol)
145{ 145{
146 struct lbs_private *priv = dev->priv; 146 struct lbs_private *priv = netdev_priv(dev);
147 147
148 if (priv->wol_criteria == 0xffffffff) { 148 if (priv->wol_criteria == 0xffffffff) {
149 /* Interface driver didn't configure wake */ 149 /* Interface driver didn't configure wake */
@@ -166,7 +166,7 @@ static void lbs_ethtool_get_wol(struct net_device *dev,
166static int lbs_ethtool_set_wol(struct net_device *dev, 166static int lbs_ethtool_set_wol(struct net_device *dev,
167 struct ethtool_wolinfo *wol) 167 struct ethtool_wolinfo *wol)
168{ 168{
169 struct lbs_private *priv = dev->priv; 169 struct lbs_private *priv = netdev_priv(dev);
170 uint32_t criteria = 0; 170 uint32_t criteria = 0;
171 171
172 if (priv->wol_criteria == 0xffffffff && wol->wolopts) 172 if (priv->wol_criteria == 0xffffffff && wol->wolopts)
@@ -180,7 +180,7 @@ static int lbs_ethtool_set_wol(struct net_device *dev,
180 if (wol->wolopts & WAKE_BCAST) criteria |= EHS_WAKE_ON_BROADCAST_DATA; 180 if (wol->wolopts & WAKE_BCAST) criteria |= EHS_WAKE_ON_BROADCAST_DATA;
181 if (wol->wolopts & WAKE_PHY) criteria |= EHS_WAKE_ON_MAC_EVENT; 181 if (wol->wolopts & WAKE_PHY) criteria |= EHS_WAKE_ON_MAC_EVENT;
182 182
183 return lbs_host_sleep_cfg(priv, criteria); 183 return lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL);
184} 184}
185 185
186struct ethtool_ops lbs_ethtool_ops = { 186struct ethtool_ops lbs_ethtool_ops = {
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 5004d7679c02..277ff1975bde 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -220,6 +220,14 @@ enum cmd_fwt_access_opts {
220 CMD_ACT_FWT_ACCESS_TIME, 220 CMD_ACT_FWT_ACCESS_TIME,
221}; 221};
222 222
223/* Define action or option for CMD_802_11_HOST_SLEEP_CFG */
224enum cmd_wol_cfg_opts {
225 CMD_ACT_ACTION_NONE = 0,
226 CMD_ACT_SET_WOL_RULE,
227 CMD_ACT_GET_WOL_RULE,
228 CMD_ACT_RESET_WOL_RULE,
229};
230
223/* Define action or option for CMD_MESH_ACCESS */ 231/* Define action or option for CMD_MESH_ACCESS */
224enum cmd_mesh_access_opts { 232enum cmd_mesh_access_opts {
225 CMD_ACT_MESH_GET_TTL = 1, 233 CMD_ACT_MESH_GET_TTL = 1,
@@ -237,6 +245,7 @@ enum cmd_mesh_access_opts {
237 CMD_ACT_MESH_GET_ROUTE_EXP, 245 CMD_ACT_MESH_GET_ROUTE_EXP,
238 CMD_ACT_MESH_SET_AUTOSTART_ENABLED, 246 CMD_ACT_MESH_SET_AUTOSTART_ENABLED,
239 CMD_ACT_MESH_GET_AUTOSTART_ENABLED, 247 CMD_ACT_MESH_GET_AUTOSTART_ENABLED,
248 CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT = 17,
240}; 249};
241 250
242/* Define actions and types for CMD_MESH_CONFIG */ 251/* Define actions and types for CMD_MESH_CONFIG */
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index d9f9a12a739e..e173b1b46c23 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -580,13 +580,37 @@ struct MrvlIEtype_keyParamSet {
580 u8 key[32]; 580 u8 key[32];
581}; 581};
582 582
583#define MAX_WOL_RULES 16
584
585struct host_wol_rule {
586 uint8_t rule_no;
587 uint8_t rule_ops;
588 __le16 sig_offset;
589 __le16 sig_length;
590 __le16 reserve;
591 __be32 sig_mask;
592 __be32 signature;
593};
594
595struct wol_config {
596 uint8_t action;
597 uint8_t pattern;
598 uint8_t no_rules_in_cmd;
599 uint8_t result;
600 struct host_wol_rule rule[MAX_WOL_RULES];
601};
602
603
583struct cmd_ds_host_sleep { 604struct cmd_ds_host_sleep {
584 struct cmd_header hdr; 605 struct cmd_header hdr;
585 __le32 criteria; 606 __le32 criteria;
586 uint8_t gpio; 607 uint8_t gpio;
587 uint8_t gap; 608 uint16_t gap;
609 struct wol_config wol_conf;
588} __attribute__ ((packed)); 610} __attribute__ ((packed));
589 611
612
613
590struct cmd_ds_802_11_key_material { 614struct cmd_ds_802_11_key_material {
591 struct cmd_header hdr; 615 struct cmd_header hdr;
592 616
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index b54e2ea8346b..4519d7314f47 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -26,6 +26,7 @@
26 * if_sdio_card_to_host() to pad the data. 26 * if_sdio_card_to_host() to pad the data.
27 */ 27 */
28 28
29#include <linux/kernel.h>
29#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
30#include <linux/firmware.h> 31#include <linux/firmware.h>
31#include <linux/netdevice.h> 32#include <linux/netdevice.h>
@@ -581,7 +582,7 @@ static int if_sdio_prog_real(struct if_sdio_card *card)
581 chunk_size, (chunk_size + 31) / 32 * 32); 582 chunk_size, (chunk_size + 31) / 32 * 32);
582*/ 583*/
583 ret = sdio_writesb(card->func, card->ioport, 584 ret = sdio_writesb(card->func, card->ioport,
584 chunk_buffer, (chunk_size + 31) / 32 * 32); 585 chunk_buffer, roundup(chunk_size, 32));
585 if (ret) 586 if (ret)
586 goto release; 587 goto release;
587 588
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index cafbccb74143..2fc637ad85c7 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -59,7 +59,7 @@ static int if_usb_reset_device(struct if_usb_card *cardp);
59static ssize_t if_usb_firmware_set(struct device *dev, 59static ssize_t if_usb_firmware_set(struct device *dev,
60 struct device_attribute *attr, const char *buf, size_t count) 60 struct device_attribute *attr, const char *buf, size_t count)
61{ 61{
62 struct lbs_private *priv = to_net_dev(dev)->priv; 62 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
63 struct if_usb_card *cardp = priv->card; 63 struct if_usb_card *cardp = priv->card;
64 char fwname[FIRMWARE_NAME_MAX]; 64 char fwname[FIRMWARE_NAME_MAX];
65 int ret; 65 int ret;
@@ -86,7 +86,7 @@ static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set);
86static ssize_t if_usb_boot2_set(struct device *dev, 86static ssize_t if_usb_boot2_set(struct device *dev,
87 struct device_attribute *attr, const char *buf, size_t count) 87 struct device_attribute *attr, const char *buf, size_t count)
88{ 88{
89 struct lbs_private *priv = to_net_dev(dev)->priv; 89 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
90 struct if_usb_card *cardp = priv->card; 90 struct if_usb_card *cardp = priv->card;
91 char fwname[FIRMWARE_NAME_MAX]; 91 char fwname[FIRMWARE_NAME_MAX];
92 int ret; 92 int ret;
@@ -178,7 +178,8 @@ static void if_usb_setup_firmware(struct lbs_private *priv)
178 178
179 priv->wol_gpio = 2; /* Wake via GPIO2... */ 179 priv->wol_gpio = 2; /* Wake via GPIO2... */
180 priv->wol_gap = 20; /* ... after 20ms */ 180 priv->wol_gap = 20; /* ... after 20ms */
181 lbs_host_sleep_cfg(priv, EHS_WAKE_ON_UNICAST_DATA); 181 lbs_host_sleep_cfg(priv, EHS_WAKE_ON_UNICAST_DATA,
182 (struct wol_config *) NULL);
182 183
183 wake_method.hdr.size = cpu_to_le16(sizeof(wake_method)); 184 wake_method.hdr.size = cpu_to_le16(sizeof(wake_method));
184 wake_method.action = cpu_to_le16(CMD_ACT_GET); 185 wake_method.action = cpu_to_le16(CMD_ACT_GET);
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 73dc8c72402a..3dba83679444 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -12,9 +12,8 @@
12#include <linux/kthread.h> 12#include <linux/kthread.h>
13#include <linux/kfifo.h> 13#include <linux/kfifo.h>
14#include <linux/stddef.h> 14#include <linux/stddef.h>
15 15#include <linux/ieee80211.h>
16#include <net/iw_handler.h> 16#include <net/iw_handler.h>
17#include <net/ieee80211.h>
18 17
19#include "host.h" 18#include "host.h"
20#include "decl.h" 19#include "decl.h"
@@ -223,7 +222,7 @@ u8 lbs_data_rate_to_fw_index(u32 rate)
223static ssize_t lbs_anycast_get(struct device *dev, 222static ssize_t lbs_anycast_get(struct device *dev,
224 struct device_attribute *attr, char * buf) 223 struct device_attribute *attr, char * buf)
225{ 224{
226 struct lbs_private *priv = to_net_dev(dev)->priv; 225 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
227 struct cmd_ds_mesh_access mesh_access; 226 struct cmd_ds_mesh_access mesh_access;
228 int ret; 227 int ret;
229 228
@@ -242,7 +241,7 @@ static ssize_t lbs_anycast_get(struct device *dev,
242static ssize_t lbs_anycast_set(struct device *dev, 241static ssize_t lbs_anycast_set(struct device *dev,
243 struct device_attribute *attr, const char * buf, size_t count) 242 struct device_attribute *attr, const char * buf, size_t count)
244{ 243{
245 struct lbs_private *priv = to_net_dev(dev)->priv; 244 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
246 struct cmd_ds_mesh_access mesh_access; 245 struct cmd_ds_mesh_access mesh_access;
247 uint32_t datum; 246 uint32_t datum;
248 int ret; 247 int ret;
@@ -258,6 +257,58 @@ static ssize_t lbs_anycast_set(struct device *dev,
258 return strlen(buf); 257 return strlen(buf);
259} 258}
260 259
260/**
261 * @brief Get function for sysfs attribute prb_rsp_limit
262 */
263static ssize_t lbs_prb_rsp_limit_get(struct device *dev,
264 struct device_attribute *attr, char *buf)
265{
266 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
267 struct cmd_ds_mesh_access mesh_access;
268 int ret;
269 u32 retry_limit;
270
271 memset(&mesh_access, 0, sizeof(mesh_access));
272 mesh_access.data[0] = cpu_to_le32(CMD_ACT_GET);
273
274 ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT,
275 &mesh_access);
276 if (ret)
277 return ret;
278
279 retry_limit = le32_to_cpu(mesh_access.data[1]);
280 return snprintf(buf, 10, "%d\n", retry_limit);
281}
282
283/**
284 * @brief Set function for sysfs attribute prb_rsp_limit
285 */
286static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
287 struct device_attribute *attr, const char *buf, size_t count)
288{
289 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
290 struct cmd_ds_mesh_access mesh_access;
291 int ret;
292 unsigned long retry_limit;
293
294 memset(&mesh_access, 0, sizeof(mesh_access));
295 mesh_access.data[0] = cpu_to_le32(CMD_ACT_SET);
296
297 if (!strict_strtoul(buf, 10, &retry_limit))
298 return -ENOTSUPP;
299 if (retry_limit > 15)
300 return -ENOTSUPP;
301
302 mesh_access.data[1] = cpu_to_le32(retry_limit);
303
304 ret = lbs_mesh_access(priv, CMD_ACT_MESH_SET_GET_PRB_RSP_LIMIT,
305 &mesh_access);
306 if (ret)
307 return ret;
308
309 return strlen(buf);
310}
311
261static int lbs_add_rtap(struct lbs_private *priv); 312static int lbs_add_rtap(struct lbs_private *priv);
262static void lbs_remove_rtap(struct lbs_private *priv); 313static void lbs_remove_rtap(struct lbs_private *priv);
263static int lbs_add_mesh(struct lbs_private *priv); 314static int lbs_add_mesh(struct lbs_private *priv);
@@ -270,7 +321,7 @@ static void lbs_remove_mesh(struct lbs_private *priv);
270static ssize_t lbs_rtap_get(struct device *dev, 321static ssize_t lbs_rtap_get(struct device *dev,
271 struct device_attribute *attr, char * buf) 322 struct device_attribute *attr, char * buf)
272{ 323{
273 struct lbs_private *priv = to_net_dev(dev)->priv; 324 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
274 return snprintf(buf, 5, "0x%X\n", priv->monitormode); 325 return snprintf(buf, 5, "0x%X\n", priv->monitormode);
275} 326}
276 327
@@ -281,7 +332,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
281 struct device_attribute *attr, const char * buf, size_t count) 332 struct device_attribute *attr, const char * buf, size_t count)
282{ 333{
283 int monitor_mode; 334 int monitor_mode;
284 struct lbs_private *priv = to_net_dev(dev)->priv; 335 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
285 336
286 sscanf(buf, "%x", &monitor_mode); 337 sscanf(buf, "%x", &monitor_mode);
287 if (monitor_mode) { 338 if (monitor_mode) {
@@ -332,7 +383,7 @@ static DEVICE_ATTR(lbs_rtap, 0644, lbs_rtap_get, lbs_rtap_set );
332static ssize_t lbs_mesh_get(struct device *dev, 383static ssize_t lbs_mesh_get(struct device *dev,
333 struct device_attribute *attr, char * buf) 384 struct device_attribute *attr, char * buf)
334{ 385{
335 struct lbs_private *priv = to_net_dev(dev)->priv; 386 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
336 return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev); 387 return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev);
337} 388}
338 389
@@ -342,7 +393,7 @@ static ssize_t lbs_mesh_get(struct device *dev,
342static ssize_t lbs_mesh_set(struct device *dev, 393static ssize_t lbs_mesh_set(struct device *dev,
343 struct device_attribute *attr, const char * buf, size_t count) 394 struct device_attribute *attr, const char * buf, size_t count)
344{ 395{
345 struct lbs_private *priv = to_net_dev(dev)->priv; 396 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
346 int enable; 397 int enable;
347 int ret, action = CMD_ACT_MESH_CONFIG_STOP; 398 int ret, action = CMD_ACT_MESH_CONFIG_STOP;
348 399
@@ -376,8 +427,16 @@ static DEVICE_ATTR(lbs_mesh, 0644, lbs_mesh_get, lbs_mesh_set);
376 */ 427 */
377static DEVICE_ATTR(anycast_mask, 0644, lbs_anycast_get, lbs_anycast_set); 428static DEVICE_ATTR(anycast_mask, 0644, lbs_anycast_get, lbs_anycast_set);
378 429
430/**
431 * prb_rsp_limit attribute to be exported per mshX interface
432 * through sysfs (/sys/class/net/mshX/prb_rsp_limit)
433 */
434static DEVICE_ATTR(prb_rsp_limit, 0644, lbs_prb_rsp_limit_get,
435 lbs_prb_rsp_limit_set);
436
379static struct attribute *lbs_mesh_sysfs_entries[] = { 437static struct attribute *lbs_mesh_sysfs_entries[] = {
380 &dev_attr_anycast_mask.attr, 438 &dev_attr_anycast_mask.attr,
439 &dev_attr_prb_rsp_limit.attr,
381 NULL, 440 NULL,
382}; 441};
383 442
@@ -393,7 +452,7 @@ static struct attribute_group lbs_mesh_attr_group = {
393 */ 452 */
394static int lbs_dev_open(struct net_device *dev) 453static int lbs_dev_open(struct net_device *dev)
395{ 454{
396 struct lbs_private *priv = (struct lbs_private *) dev->priv ; 455 struct lbs_private *priv = netdev_priv(dev) ;
397 int ret = 0; 456 int ret = 0;
398 457
399 lbs_deb_enter(LBS_DEB_NET); 458 lbs_deb_enter(LBS_DEB_NET);
@@ -435,7 +494,7 @@ static int lbs_dev_open(struct net_device *dev)
435 */ 494 */
436static int lbs_mesh_stop(struct net_device *dev) 495static int lbs_mesh_stop(struct net_device *dev)
437{ 496{
438 struct lbs_private *priv = (struct lbs_private *) (dev->priv); 497 struct lbs_private *priv = dev->ml_priv;
439 498
440 lbs_deb_enter(LBS_DEB_MESH); 499 lbs_deb_enter(LBS_DEB_MESH);
441 spin_lock_irq(&priv->driver_lock); 500 spin_lock_irq(&priv->driver_lock);
@@ -462,7 +521,7 @@ static int lbs_mesh_stop(struct net_device *dev)
462 */ 521 */
463static int lbs_eth_stop(struct net_device *dev) 522static int lbs_eth_stop(struct net_device *dev)
464{ 523{
465 struct lbs_private *priv = (struct lbs_private *) dev->priv; 524 struct lbs_private *priv = netdev_priv(dev);
466 525
467 lbs_deb_enter(LBS_DEB_NET); 526 lbs_deb_enter(LBS_DEB_NET);
468 527
@@ -479,7 +538,7 @@ static int lbs_eth_stop(struct net_device *dev)
479 538
480static void lbs_tx_timeout(struct net_device *dev) 539static void lbs_tx_timeout(struct net_device *dev)
481{ 540{
482 struct lbs_private *priv = (struct lbs_private *) dev->priv; 541 struct lbs_private *priv = netdev_priv(dev);
483 542
484 lbs_deb_enter(LBS_DEB_TX); 543 lbs_deb_enter(LBS_DEB_TX);
485 544
@@ -531,7 +590,7 @@ EXPORT_SYMBOL_GPL(lbs_host_to_card_done);
531 */ 590 */
532static struct net_device_stats *lbs_get_stats(struct net_device *dev) 591static struct net_device_stats *lbs_get_stats(struct net_device *dev)
533{ 592{
534 struct lbs_private *priv = (struct lbs_private *) dev->priv; 593 struct lbs_private *priv = netdev_priv(dev);
535 594
536 lbs_deb_enter(LBS_DEB_NET); 595 lbs_deb_enter(LBS_DEB_NET);
537 return &priv->stats; 596 return &priv->stats;
@@ -540,7 +599,7 @@ static struct net_device_stats *lbs_get_stats(struct net_device *dev)
540static int lbs_set_mac_address(struct net_device *dev, void *addr) 599static int lbs_set_mac_address(struct net_device *dev, void *addr)
541{ 600{
542 int ret = 0; 601 int ret = 0;
543 struct lbs_private *priv = (struct lbs_private *) dev->priv; 602 struct lbs_private *priv = netdev_priv(dev);
544 struct sockaddr *phwaddr = addr; 603 struct sockaddr *phwaddr = addr;
545 struct cmd_ds_802_11_mac_address cmd; 604 struct cmd_ds_802_11_mac_address cmd;
546 605
@@ -588,7 +647,6 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
588{ 647{
589 int i = nr_addrs; 648 int i = nr_addrs;
590 struct dev_mc_list *mc_list; 649 struct dev_mc_list *mc_list;
591 DECLARE_MAC_BUF(mac);
592 650
593 if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST)) 651 if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST))
594 return nr_addrs; 652 return nr_addrs;
@@ -596,16 +654,16 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
596 netif_addr_lock_bh(dev); 654 netif_addr_lock_bh(dev);
597 for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) { 655 for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
598 if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) { 656 if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) {
599 lbs_deb_net("mcast address %s:%s skipped\n", dev->name, 657 lbs_deb_net("mcast address %s:%pM skipped\n", dev->name,
600 print_mac(mac, mc_list->dmi_addr)); 658 mc_list->dmi_addr);
601 continue; 659 continue;
602 } 660 }
603 661
604 if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE) 662 if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE)
605 break; 663 break;
606 memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN); 664 memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN);
607 lbs_deb_net("mcast address %s:%s added to filter\n", dev->name, 665 lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name,
608 print_mac(mac, mc_list->dmi_addr)); 666 mc_list->dmi_addr);
609 i++; 667 i++;
610 } 668 }
611 netif_addr_unlock_bh(dev); 669 netif_addr_unlock_bh(dev);
@@ -674,7 +732,7 @@ static void lbs_set_mcast_worker(struct work_struct *work)
674 732
675static void lbs_set_multicast_list(struct net_device *dev) 733static void lbs_set_multicast_list(struct net_device *dev)
676{ 734{
677 struct lbs_private *priv = dev->priv; 735 struct lbs_private *priv = netdev_priv(dev);
678 736
679 schedule_work(&priv->mcast_work); 737 schedule_work(&priv->mcast_work);
680} 738}
@@ -690,7 +748,7 @@ static void lbs_set_multicast_list(struct net_device *dev)
690static int lbs_thread(void *data) 748static int lbs_thread(void *data)
691{ 749{
692 struct net_device *dev = data; 750 struct net_device *dev = data;
693 struct lbs_private *priv = dev->priv; 751 struct lbs_private *priv = netdev_priv(dev);
694 wait_queue_t wait; 752 wait_queue_t wait;
695 753
696 lbs_deb_enter(LBS_DEB_THREAD); 754 lbs_deb_enter(LBS_DEB_THREAD);
@@ -1125,7 +1183,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1125 lbs_pr_err("init ethX device failed\n"); 1183 lbs_pr_err("init ethX device failed\n");
1126 goto done; 1184 goto done;
1127 } 1185 }
1128 priv = dev->priv; 1186 priv = netdev_priv(dev);
1129 1187
1130 if (lbs_init_adapter(priv)) { 1188 if (lbs_init_adapter(priv)) {
1131 lbs_pr_err("failed to initialize adapter structure.\n"); 1189 lbs_pr_err("failed to initialize adapter structure.\n");
@@ -1378,7 +1436,7 @@ static int lbs_add_mesh(struct lbs_private *priv)
1378 ret = -ENOMEM; 1436 ret = -ENOMEM;
1379 goto done; 1437 goto done;
1380 } 1438 }
1381 mesh_dev->priv = priv; 1439 mesh_dev->ml_priv = priv;
1382 priv->mesh_dev = mesh_dev; 1440 priv->mesh_dev = mesh_dev;
1383 1441
1384 mesh_dev->open = lbs_dev_open; 1442 mesh_dev->open = lbs_dev_open;
@@ -1591,7 +1649,7 @@ static int lbs_rtap_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1591 1649
1592static struct net_device_stats *lbs_rtap_get_stats(struct net_device *dev) 1650static struct net_device_stats *lbs_rtap_get_stats(struct net_device *dev)
1593{ 1651{
1594 struct lbs_private *priv = dev->priv; 1652 struct lbs_private *priv = dev->ml_priv;
1595 lbs_deb_enter(LBS_DEB_NET); 1653 lbs_deb_enter(LBS_DEB_NET);
1596 return &priv->stats; 1654 return &priv->stats;
1597} 1655}
@@ -1632,7 +1690,7 @@ static int lbs_add_rtap(struct lbs_private *priv)
1632 rtap_dev->stop = lbs_rtap_stop; 1690 rtap_dev->stop = lbs_rtap_stop;
1633 rtap_dev->get_stats = lbs_rtap_get_stats; 1691 rtap_dev->get_stats = lbs_rtap_get_stats;
1634 rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit; 1692 rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit;
1635 rtap_dev->priv = priv; 1693 rtap_dev->ml_priv = priv;
1636 SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent); 1694 SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent);
1637 1695
1638 ret = register_netdev(rtap_dev); 1696 ret = register_netdev(rtap_dev);
@@ -1647,33 +1705,6 @@ out:
1647 return ret; 1705 return ret;
1648} 1706}
1649 1707
1650#ifndef CONFIG_IEEE80211
1651const char *escape_essid(const char *essid, u8 essid_len)
1652{
1653 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
1654 const char *s = essid;
1655 char *d = escaped;
1656
1657 if (ieee80211_is_empty_essid(essid, essid_len)) {
1658 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
1659 return escaped;
1660 }
1661
1662 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
1663 while (essid_len--) {
1664 if (*s == '\0') {
1665 *d++ = '\\';
1666 *d++ = '0';
1667 s++;
1668 } else {
1669 *d++ = *s++;
1670 }
1671 }
1672 *d = '\0';
1673 return escaped;
1674}
1675#endif
1676
1677module_init(lbs_init_module); 1708module_init(lbs_init_module);
1678module_exit(lbs_exit_module); 1709module_exit(lbs_exit_module);
1679 1710
diff --git a/drivers/net/wireless/libertas/persistcfg.c b/drivers/net/wireless/libertas/persistcfg.c
index 3309a9c3cfef..d42b7a5a1b3f 100644
--- a/drivers/net/wireless/libertas/persistcfg.c
+++ b/drivers/net/wireless/libertas/persistcfg.c
@@ -18,7 +18,7 @@
18static int mesh_get_default_parameters(struct device *dev, 18static int mesh_get_default_parameters(struct device *dev,
19 struct mrvl_mesh_defaults *defs) 19 struct mrvl_mesh_defaults *defs)
20{ 20{
21 struct lbs_private *priv = to_net_dev(dev)->priv; 21 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
22 struct cmd_ds_mesh_config cmd; 22 struct cmd_ds_mesh_config cmd;
23 int ret; 23 int ret;
24 24
@@ -57,7 +57,7 @@ static ssize_t bootflag_get(struct device *dev,
57static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr, 57static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
58 const char *buf, size_t count) 58 const char *buf, size_t count)
59{ 59{
60 struct lbs_private *priv = to_net_dev(dev)->priv; 60 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
61 struct cmd_ds_mesh_config cmd; 61 struct cmd_ds_mesh_config cmd;
62 uint32_t datum; 62 uint32_t datum;
63 int ret; 63 int ret;
@@ -100,7 +100,7 @@ static ssize_t boottime_get(struct device *dev,
100static ssize_t boottime_set(struct device *dev, 100static ssize_t boottime_set(struct device *dev,
101 struct device_attribute *attr, const char *buf, size_t count) 101 struct device_attribute *attr, const char *buf, size_t count)
102{ 102{
103 struct lbs_private *priv = to_net_dev(dev)->priv; 103 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
104 struct cmd_ds_mesh_config cmd; 104 struct cmd_ds_mesh_config cmd;
105 uint32_t datum; 105 uint32_t datum;
106 int ret; 106 int ret;
@@ -152,7 +152,7 @@ static ssize_t channel_get(struct device *dev,
152static ssize_t channel_set(struct device *dev, struct device_attribute *attr, 152static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
153 const char *buf, size_t count) 153 const char *buf, size_t count)
154{ 154{
155 struct lbs_private *priv = to_net_dev(dev)->priv; 155 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
156 struct cmd_ds_mesh_config cmd; 156 struct cmd_ds_mesh_config cmd;
157 uint32_t datum; 157 uint32_t datum;
158 int ret; 158 int ret;
@@ -210,7 +210,7 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
210 struct cmd_ds_mesh_config cmd; 210 struct cmd_ds_mesh_config cmd;
211 struct mrvl_mesh_defaults defs; 211 struct mrvl_mesh_defaults defs;
212 struct mrvl_meshie *ie; 212 struct mrvl_meshie *ie;
213 struct lbs_private *priv = to_net_dev(dev)->priv; 213 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
214 int len; 214 int len;
215 int ret; 215 int ret;
216 216
@@ -233,7 +233,7 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
233 /* SSID len */ 233 /* SSID len */
234 ie->val.mesh_id_len = len; 234 ie->val.mesh_id_len = len;
235 /* IE len */ 235 /* IE len */
236 ie->hdr.len = sizeof(struct mrvl_meshie_val) - IW_ESSID_MAX_SIZE + len; 236 ie->len = sizeof(struct mrvl_meshie_val) - IW_ESSID_MAX_SIZE + len;
237 237
238 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, 238 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
239 CMD_TYPE_MESH_SET_MESH_IE); 239 CMD_TYPE_MESH_SET_MESH_IE);
@@ -269,7 +269,7 @@ static ssize_t protocol_id_set(struct device *dev,
269 struct cmd_ds_mesh_config cmd; 269 struct cmd_ds_mesh_config cmd;
270 struct mrvl_mesh_defaults defs; 270 struct mrvl_mesh_defaults defs;
271 struct mrvl_meshie *ie; 271 struct mrvl_meshie *ie;
272 struct lbs_private *priv = to_net_dev(dev)->priv; 272 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
273 uint32_t datum; 273 uint32_t datum;
274 int ret; 274 int ret;
275 275
@@ -323,7 +323,7 @@ static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
323 struct cmd_ds_mesh_config cmd; 323 struct cmd_ds_mesh_config cmd;
324 struct mrvl_mesh_defaults defs; 324 struct mrvl_mesh_defaults defs;
325 struct mrvl_meshie *ie; 325 struct mrvl_meshie *ie;
326 struct lbs_private *priv = to_net_dev(dev)->priv; 326 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
327 uint32_t datum; 327 uint32_t datum;
328 int ret; 328 int ret;
329 329
@@ -377,7 +377,7 @@ static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
377 struct cmd_ds_mesh_config cmd; 377 struct cmd_ds_mesh_config cmd;
378 struct mrvl_mesh_defaults defs; 378 struct mrvl_mesh_defaults defs;
379 struct mrvl_meshie *ie; 379 struct mrvl_meshie *ie;
380 struct lbs_private *priv = to_net_dev(dev)->priv; 380 struct lbs_private *priv = netdev_priv(to_net_dev(dev));
381 uint32_t datum; 381 uint32_t datum;
382 int ret; 382 int ret;
383 383
diff --git a/drivers/net/wireless/libertas/radiotap.h b/drivers/net/wireless/libertas/radiotap.h
index 5d118f40cfbc..f8eb9097ff0a 100644
--- a/drivers/net/wireless/libertas/radiotap.h
+++ b/drivers/net/wireless/libertas/radiotap.h
@@ -6,9 +6,6 @@ struct tx_radiotap_hdr {
6 u8 txpower; 6 u8 txpower;
7 u8 rts_retries; 7 u8 rts_retries;
8 u8 data_retries; 8 u8 data_retries;
9#if 0
10 u8 pad[IEEE80211_RADIOTAP_HDRLEN - 12];
11#endif
12} __attribute__ ((packed)); 9} __attribute__ ((packed));
13 10
14#define TX_RADIOTAP_PRESENT ( \ 11#define TX_RADIOTAP_PRESENT ( \
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 22c4c6110521..57f6c12cda20 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -4,8 +4,11 @@
4 * IOCTL handlers as well as command preperation and response routines 4 * IOCTL handlers as well as command preperation and response routines
5 * for sending scan commands to the firmware. 5 * for sending scan commands to the firmware.
6 */ 6 */
7#include <linux/types.h>
7#include <linux/etherdevice.h> 8#include <linux/etherdevice.h>
9#include <linux/if_arp.h>
8#include <asm/unaligned.h> 10#include <asm/unaligned.h>
11#include <net/lib80211.h>
9 12
10#include "host.h" 13#include "host.h"
11#include "decl.h" 14#include "decl.h"
@@ -52,6 +55,8 @@
52//! Scan time specified in the channel TLV for each channel for active scans 55//! Scan time specified in the channel TLV for each channel for active scans
53#define MRVDRV_ACTIVE_SCAN_CHAN_TIME 100 56#define MRVDRV_ACTIVE_SCAN_CHAN_TIME 100
54 57
58#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
59
55static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy, 60static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
56 struct cmd_header *resp); 61 struct cmd_header *resp);
57 62
@@ -359,7 +364,7 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan)
359#ifdef CONFIG_LIBERTAS_DEBUG 364#ifdef CONFIG_LIBERTAS_DEBUG
360 struct bss_descriptor *iter; 365 struct bss_descriptor *iter;
361 int i = 0; 366 int i = 0;
362 DECLARE_MAC_BUF(mac); 367 DECLARE_SSID_BUF(ssid);
363#endif 368#endif
364 369
365 lbs_deb_enter_args(LBS_DEB_SCAN, "full_scan %d", full_scan); 370 lbs_deb_enter_args(LBS_DEB_SCAN, "full_scan %d", full_scan);
@@ -451,9 +456,9 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan)
451 mutex_lock(&priv->lock); 456 mutex_lock(&priv->lock);
452 lbs_deb_scan("scan table:\n"); 457 lbs_deb_scan("scan table:\n");
453 list_for_each_entry(iter, &priv->network_list, list) 458 list_for_each_entry(iter, &priv->network_list, list)
454 lbs_deb_scan("%02d: BSSID %s, RSSI %d, SSID '%s'\n", 459 lbs_deb_scan("%02d: BSSID %pM, RSSI %d, SSID '%s'\n",
455 i++, print_mac(mac, iter->bssid), iter->rssi, 460 i++, iter->bssid, iter->rssi,
456 escape_essid(iter->ssid, iter->ssid_len)); 461 print_ssid(ssid, iter->ssid, iter->ssid_len));
457 mutex_unlock(&priv->lock); 462 mutex_unlock(&priv->lock);
458#endif 463#endif
459 464
@@ -512,7 +517,7 @@ static int lbs_process_bss(struct bss_descriptor *bss,
512 struct ieeetypes_dsparamset *pDS; 517 struct ieeetypes_dsparamset *pDS;
513 struct ieeetypes_cfparamset *pCF; 518 struct ieeetypes_cfparamset *pCF;
514 struct ieeetypes_ibssparamset *pibss; 519 struct ieeetypes_ibssparamset *pibss;
515 DECLARE_MAC_BUF(mac); 520 DECLARE_SSID_BUF(ssid);
516 struct ieeetypes_countryinfoset *pcountryinfo; 521 struct ieeetypes_countryinfoset *pcountryinfo;
517 uint8_t *pos, *end, *p; 522 uint8_t *pos, *end, *p;
518 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0; 523 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
@@ -544,7 +549,7 @@ static int lbs_process_bss(struct bss_descriptor *bss,
544 *bytesleft -= beaconsize; 549 *bytesleft -= beaconsize;
545 550
546 memcpy(bss->bssid, pos, ETH_ALEN); 551 memcpy(bss->bssid, pos, ETH_ALEN);
547 lbs_deb_scan("process_bss: BSSID %s\n", print_mac(mac, bss->bssid)); 552 lbs_deb_scan("process_bss: BSSID %pM\n", bss->bssid);
548 pos += ETH_ALEN; 553 pos += ETH_ALEN;
549 554
550 if ((end - pos) < 12) { 555 if ((end - pos) < 12) {
@@ -588,38 +593,36 @@ static int lbs_process_bss(struct bss_descriptor *bss,
588 593
589 /* process variable IE */ 594 /* process variable IE */
590 while (pos <= end - 2) { 595 while (pos <= end - 2) {
591 struct ieee80211_info_element * elem = (void *)pos; 596 if (pos + pos[1] > end) {
592
593 if (pos + elem->len > end) {
594 lbs_deb_scan("process_bss: error in processing IE, " 597 lbs_deb_scan("process_bss: error in processing IE, "
595 "bytes left < IE length\n"); 598 "bytes left < IE length\n");
596 break; 599 break;
597 } 600 }
598 601
599 switch (elem->id) { 602 switch (pos[0]) {
600 case MFIE_TYPE_SSID: 603 case WLAN_EID_SSID:
601 bss->ssid_len = min_t(int, 32, elem->len); 604 bss->ssid_len = min_t(int, IEEE80211_MAX_SSID_LEN, pos[1]);
602 memcpy(bss->ssid, elem->data, bss->ssid_len); 605 memcpy(bss->ssid, pos + 2, bss->ssid_len);
603 lbs_deb_scan("got SSID IE: '%s', len %u\n", 606 lbs_deb_scan("got SSID IE: '%s', len %u\n",
604 escape_essid(bss->ssid, bss->ssid_len), 607 print_ssid(ssid, bss->ssid, bss->ssid_len),
605 bss->ssid_len); 608 bss->ssid_len);
606 break; 609 break;
607 610
608 case MFIE_TYPE_RATES: 611 case WLAN_EID_SUPP_RATES:
609 n_basic_rates = min_t(uint8_t, MAX_RATES, elem->len); 612 n_basic_rates = min_t(uint8_t, MAX_RATES, pos[1]);
610 memcpy(bss->rates, elem->data, n_basic_rates); 613 memcpy(bss->rates, pos + 2, n_basic_rates);
611 got_basic_rates = 1; 614 got_basic_rates = 1;
612 lbs_deb_scan("got RATES IE\n"); 615 lbs_deb_scan("got RATES IE\n");
613 break; 616 break;
614 617
615 case MFIE_TYPE_FH_SET: 618 case WLAN_EID_FH_PARAMS:
616 pFH = (struct ieeetypes_fhparamset *) pos; 619 pFH = (struct ieeetypes_fhparamset *) pos;
617 memmove(&bss->phyparamset.fhparamset, pFH, 620 memmove(&bss->phyparamset.fhparamset, pFH,
618 sizeof(struct ieeetypes_fhparamset)); 621 sizeof(struct ieeetypes_fhparamset));
619 lbs_deb_scan("got FH IE\n"); 622 lbs_deb_scan("got FH IE\n");
620 break; 623 break;
621 624
622 case MFIE_TYPE_DS_SET: 625 case WLAN_EID_DS_PARAMS:
623 pDS = (struct ieeetypes_dsparamset *) pos; 626 pDS = (struct ieeetypes_dsparamset *) pos;
624 bss->channel = pDS->currentchan; 627 bss->channel = pDS->currentchan;
625 memcpy(&bss->phyparamset.dsparamset, pDS, 628 memcpy(&bss->phyparamset.dsparamset, pDS,
@@ -627,14 +630,14 @@ static int lbs_process_bss(struct bss_descriptor *bss,
627 lbs_deb_scan("got DS IE, channel %d\n", bss->channel); 630 lbs_deb_scan("got DS IE, channel %d\n", bss->channel);
628 break; 631 break;
629 632
630 case MFIE_TYPE_CF_SET: 633 case WLAN_EID_CF_PARAMS:
631 pCF = (struct ieeetypes_cfparamset *) pos; 634 pCF = (struct ieeetypes_cfparamset *) pos;
632 memcpy(&bss->ssparamset.cfparamset, pCF, 635 memcpy(&bss->ssparamset.cfparamset, pCF,
633 sizeof(struct ieeetypes_cfparamset)); 636 sizeof(struct ieeetypes_cfparamset));
634 lbs_deb_scan("got CF IE\n"); 637 lbs_deb_scan("got CF IE\n");
635 break; 638 break;
636 639
637 case MFIE_TYPE_IBSS_SET: 640 case WLAN_EID_IBSS_PARAMS:
638 pibss = (struct ieeetypes_ibssparamset *) pos; 641 pibss = (struct ieeetypes_ibssparamset *) pos;
639 bss->atimwindow = le16_to_cpu(pibss->atimwindow); 642 bss->atimwindow = le16_to_cpu(pibss->atimwindow);
640 memmove(&bss->ssparamset.ibssparamset, pibss, 643 memmove(&bss->ssparamset.ibssparamset, pibss,
@@ -642,7 +645,7 @@ static int lbs_process_bss(struct bss_descriptor *bss,
642 lbs_deb_scan("got IBSS IE\n"); 645 lbs_deb_scan("got IBSS IE\n");
643 break; 646 break;
644 647
645 case MFIE_TYPE_COUNTRY: 648 case WLAN_EID_COUNTRY:
646 pcountryinfo = (struct ieeetypes_countryinfoset *) pos; 649 pcountryinfo = (struct ieeetypes_countryinfoset *) pos;
647 lbs_deb_scan("got COUNTRY IE\n"); 650 lbs_deb_scan("got COUNTRY IE\n");
648 if (pcountryinfo->len < sizeof(pcountryinfo->countrycode) 651 if (pcountryinfo->len < sizeof(pcountryinfo->countrycode)
@@ -659,7 +662,7 @@ static int lbs_process_bss(struct bss_descriptor *bss,
659 (int) (pcountryinfo->len + 2)); 662 (int) (pcountryinfo->len + 2));
660 break; 663 break;
661 664
662 case MFIE_TYPE_RATES_EX: 665 case WLAN_EID_EXT_SUPP_RATES:
663 /* only process extended supported rate if data rate is 666 /* only process extended supported rate if data rate is
664 * already found. Data rate IE should come before 667 * already found. Data rate IE should come before
665 * extended supported rate IE 668 * extended supported rate IE
@@ -670,50 +673,51 @@ static int lbs_process_bss(struct bss_descriptor *bss,
670 break; 673 break;
671 } 674 }
672 675
673 n_ex_rates = elem->len; 676 n_ex_rates = pos[1];
674 if (n_basic_rates + n_ex_rates > MAX_RATES) 677 if (n_basic_rates + n_ex_rates > MAX_RATES)
675 n_ex_rates = MAX_RATES - n_basic_rates; 678 n_ex_rates = MAX_RATES - n_basic_rates;
676 679
677 p = bss->rates + n_basic_rates; 680 p = bss->rates + n_basic_rates;
678 memcpy(p, elem->data, n_ex_rates); 681 memcpy(p, pos + 2, n_ex_rates);
679 break; 682 break;
680 683
681 case MFIE_TYPE_GENERIC: 684 case WLAN_EID_GENERIC:
682 if (elem->len >= 4 && 685 if (pos[1] >= 4 &&
683 elem->data[0] == 0x00 && elem->data[1] == 0x50 && 686 pos[2] == 0x00 && pos[3] == 0x50 &&
684 elem->data[2] == 0xf2 && elem->data[3] == 0x01) { 687 pos[4] == 0xf2 && pos[5] == 0x01) {
685 bss->wpa_ie_len = min(elem->len + 2, MAX_WPA_IE_LEN); 688 bss->wpa_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN);
686 memcpy(bss->wpa_ie, elem, bss->wpa_ie_len); 689 memcpy(bss->wpa_ie, pos, bss->wpa_ie_len);
687 lbs_deb_scan("got WPA IE\n"); 690 lbs_deb_scan("got WPA IE\n");
688 lbs_deb_hex(LBS_DEB_SCAN, "WPA IE", bss->wpa_ie, elem->len); 691 lbs_deb_hex(LBS_DEB_SCAN, "WPA IE", bss->wpa_ie,
689 } else if (elem->len >= MARVELL_MESH_IE_LENGTH && 692 bss->wpa_ie_len);
690 elem->data[0] == 0x00 && elem->data[1] == 0x50 && 693 } else if (pos[1] >= MARVELL_MESH_IE_LENGTH &&
691 elem->data[2] == 0x43 && elem->data[3] == 0x04) { 694 pos[2] == 0x00 && pos[3] == 0x50 &&
695 pos[4] == 0x43 && pos[4] == 0x04) {
692 lbs_deb_scan("got mesh IE\n"); 696 lbs_deb_scan("got mesh IE\n");
693 bss->mesh = 1; 697 bss->mesh = 1;
694 } else { 698 } else {
695 lbs_deb_scan("got generic IE: %02x:%02x:%02x:%02x, len %d\n", 699 lbs_deb_scan("got generic IE: %02x:%02x:%02x:%02x, len %d\n",
696 elem->data[0], elem->data[1], 700 pos[2], pos[3],
697 elem->data[2], elem->data[3], 701 pos[4], pos[5],
698 elem->len); 702 pos[1]);
699 } 703 }
700 break; 704 break;
701 705
702 case MFIE_TYPE_RSN: 706 case WLAN_EID_RSN:
703 lbs_deb_scan("got RSN IE\n"); 707 lbs_deb_scan("got RSN IE\n");
704 bss->rsn_ie_len = min(elem->len + 2, MAX_WPA_IE_LEN); 708 bss->rsn_ie_len = min(pos[1] + 2, MAX_WPA_IE_LEN);
705 memcpy(bss->rsn_ie, elem, bss->rsn_ie_len); 709 memcpy(bss->rsn_ie, pos, bss->rsn_ie_len);
706 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE", 710 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE",
707 bss->rsn_ie, elem->len); 711 bss->rsn_ie, bss->rsn_ie_len);
708 break; 712 break;
709 713
710 default: 714 default:
711 lbs_deb_scan("got IE 0x%04x, len %d\n", 715 lbs_deb_scan("got IE 0x%04x, len %d\n",
712 elem->id, elem->len); 716 pos[0], pos[1]);
713 break; 717 break;
714 } 718 }
715 719
716 pos += elem->len + 2; 720 pos += pos[1] + 2;
717 } 721 }
718 722
719 /* Timestamp */ 723 /* Timestamp */
@@ -741,10 +745,11 @@ done:
741int lbs_send_specific_ssid_scan(struct lbs_private *priv, uint8_t *ssid, 745int lbs_send_specific_ssid_scan(struct lbs_private *priv, uint8_t *ssid,
742 uint8_t ssid_len) 746 uint8_t ssid_len)
743{ 747{
748 DECLARE_SSID_BUF(ssid_buf);
744 int ret = 0; 749 int ret = 0;
745 750
746 lbs_deb_enter_args(LBS_DEB_SCAN, "SSID '%s'\n", 751 lbs_deb_enter_args(LBS_DEB_SCAN, "SSID '%s'\n",
747 escape_essid(ssid, ssid_len)); 752 print_ssid(ssid_buf, ssid, ssid_len));
748 753
749 if (!ssid_len) 754 if (!ssid_len)
750 goto out; 755 goto out;
@@ -939,7 +944,8 @@ out:
939int lbs_set_scan(struct net_device *dev, struct iw_request_info *info, 944int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
940 union iwreq_data *wrqu, char *extra) 945 union iwreq_data *wrqu, char *extra)
941{ 946{
942 struct lbs_private *priv = dev->priv; 947 DECLARE_SSID_BUF(ssid);
948 struct lbs_private *priv = netdev_priv(dev);
943 int ret = 0; 949 int ret = 0;
944 950
945 lbs_deb_enter(LBS_DEB_WEXT); 951 lbs_deb_enter(LBS_DEB_WEXT);
@@ -968,7 +974,7 @@ int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
968 priv->scan_ssid_len = req->essid_len; 974 priv->scan_ssid_len = req->essid_len;
969 memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len); 975 memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
970 lbs_deb_wext("set_scan, essid '%s'\n", 976 lbs_deb_wext("set_scan, essid '%s'\n",
971 escape_essid(priv->scan_ssid, priv->scan_ssid_len)); 977 print_ssid(ssid, priv->scan_ssid, priv->scan_ssid_len));
972 } else { 978 } else {
973 priv->scan_ssid_len = 0; 979 priv->scan_ssid_len = 0;
974 } 980 }
@@ -1002,7 +1008,7 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1002 struct iw_point *dwrq, char *extra) 1008 struct iw_point *dwrq, char *extra)
1003{ 1009{
1004#define SCAN_ITEM_SIZE 128 1010#define SCAN_ITEM_SIZE 128
1005 struct lbs_private *priv = dev->priv; 1011 struct lbs_private *priv = netdev_priv(dev);
1006 int err = 0; 1012 int err = 0;
1007 char *ev = extra; 1013 char *ev = extra;
1008 char *stop = ev + dwrq->length; 1014 char *stop = ev + dwrq->length;
@@ -1151,7 +1157,6 @@ static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
1151 struct bss_descriptor new; 1157 struct bss_descriptor new;
1152 struct bss_descriptor *found = NULL; 1158 struct bss_descriptor *found = NULL;
1153 struct bss_descriptor *oldest = NULL; 1159 struct bss_descriptor *oldest = NULL;
1154 DECLARE_MAC_BUF(mac);
1155 1160
1156 /* Process the data fields and IEs returned for this BSS */ 1161 /* Process the data fields and IEs returned for this BSS */
1157 memset(&new, 0, sizeof (struct bss_descriptor)); 1162 memset(&new, 0, sizeof (struct bss_descriptor));
@@ -1190,7 +1195,7 @@ static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
1190 continue; 1195 continue;
1191 } 1196 }
1192 1197
1193 lbs_deb_scan("SCAN_RESP: BSSID %s\n", print_mac(mac, new.bssid)); 1198 lbs_deb_scan("SCAN_RESP: BSSID %pM\n", new.bssid);
1194 1199
1195 /* Copy the locally created newbssentry to the scan table */ 1200 /* Copy the locally created newbssentry to the scan table */
1196 memcpy(found, &new, offsetof(struct bss_descriptor, list)); 1201 memcpy(found, &new, offsetof(struct bss_descriptor, list));
diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h
index 9e07b0464a8e..fab7d5d097fc 100644
--- a/drivers/net/wireless/libertas/scan.h
+++ b/drivers/net/wireless/libertas/scan.h
@@ -7,6 +7,10 @@
7#ifndef _LBS_SCAN_H 7#ifndef _LBS_SCAN_H
8#define _LBS_SCAN_H 8#define _LBS_SCAN_H
9 9
10#include <net/iw_handler.h>
11
12#define MAX_NETWORK_COUNT 128
13
10/** 14/**
11 * @brief Maximum number of channels that can be sent in a setuserscan ioctl 15 * @brief Maximum number of channels that can be sent in a setuserscan ioctl
12 */ 16 */
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index a4972fed2941..dac462641170 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -60,7 +60,7 @@ static u32 convert_radiotap_rate_to_mv(u8 rate)
60int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 60int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
61{ 61{
62 unsigned long flags; 62 unsigned long flags;
63 struct lbs_private *priv = dev->priv; 63 struct lbs_private *priv = netdev_priv(dev);
64 struct txpd *txpd; 64 struct txpd *txpd;
65 char *p802x_hdr; 65 char *p802x_hdr;
66 uint16_t pkt_len; 66 uint16_t pkt_len;
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index e0c2599da92f..fb7a2d1a2525 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -7,7 +7,6 @@
7#include <linux/if_ether.h> 7#include <linux/if_ether.h>
8#include <asm/byteorder.h> 8#include <asm/byteorder.h>
9#include <linux/wireless.h> 9#include <linux/wireless.h>
10#include <net/ieee80211.h>
11 10
12struct ieeetypes_cfparamset { 11struct ieeetypes_cfparamset {
13 u8 elementid; 12 u8 elementid;
@@ -258,7 +257,7 @@ struct mrvlietypes_ledbhv {
258 * Note that the len member of the ieee80211_info_element varies depending on 257 * Note that the len member of the ieee80211_info_element varies depending on
259 * the mesh_id_len */ 258 * the mesh_id_len */
260struct mrvl_meshie_val { 259struct mrvl_meshie_val {
261 uint8_t oui[P80211_OUI_LEN]; 260 uint8_t oui[3];
262 uint8_t type; 261 uint8_t type;
263 uint8_t subtype; 262 uint8_t subtype;
264 uint8_t version; 263 uint8_t version;
@@ -270,7 +269,7 @@ struct mrvl_meshie_val {
270} __attribute__ ((packed)); 269} __attribute__ ((packed));
271 270
272struct mrvl_meshie { 271struct mrvl_meshie {
273 struct ieee80211_info_element hdr; 272 u8 id, len;
274 struct mrvl_meshie_val val; 273 struct mrvl_meshie_val val;
275} __attribute__ ((packed)); 274} __attribute__ ((packed));
276 275
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 82c3e5a50ea6..c6102e08179e 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -8,7 +8,7 @@
8#include <linux/wireless.h> 8#include <linux/wireless.h>
9#include <linux/bitops.h> 9#include <linux/bitops.h>
10 10
11#include <net/ieee80211.h> 11#include <net/lib80211.h>
12#include <net/iw_handler.h> 12#include <net/iw_handler.h>
13 13
14#include "host.h" 14#include "host.h"
@@ -163,7 +163,7 @@ static int lbs_get_name(struct net_device *dev, struct iw_request_info *info,
163static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info, 163static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info,
164 struct iw_freq *fwrq, char *extra) 164 struct iw_freq *fwrq, char *extra)
165{ 165{
166 struct lbs_private *priv = dev->priv; 166 struct lbs_private *priv = netdev_priv(dev);
167 struct chan_freq_power *cfp; 167 struct chan_freq_power *cfp;
168 168
169 lbs_deb_enter(LBS_DEB_WEXT); 169 lbs_deb_enter(LBS_DEB_WEXT);
@@ -189,7 +189,7 @@ static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info,
189static int lbs_get_wap(struct net_device *dev, struct iw_request_info *info, 189static int lbs_get_wap(struct net_device *dev, struct iw_request_info *info,
190 struct sockaddr *awrq, char *extra) 190 struct sockaddr *awrq, char *extra)
191{ 191{
192 struct lbs_private *priv = dev->priv; 192 struct lbs_private *priv = netdev_priv(dev);
193 193
194 lbs_deb_enter(LBS_DEB_WEXT); 194 lbs_deb_enter(LBS_DEB_WEXT);
195 195
@@ -207,7 +207,7 @@ static int lbs_get_wap(struct net_device *dev, struct iw_request_info *info,
207static int lbs_set_nick(struct net_device *dev, struct iw_request_info *info, 207static int lbs_set_nick(struct net_device *dev, struct iw_request_info *info,
208 struct iw_point *dwrq, char *extra) 208 struct iw_point *dwrq, char *extra)
209{ 209{
210 struct lbs_private *priv = dev->priv; 210 struct lbs_private *priv = netdev_priv(dev);
211 211
212 lbs_deb_enter(LBS_DEB_WEXT); 212 lbs_deb_enter(LBS_DEB_WEXT);
213 213
@@ -231,7 +231,7 @@ static int lbs_set_nick(struct net_device *dev, struct iw_request_info *info,
231static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info, 231static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
232 struct iw_point *dwrq, char *extra) 232 struct iw_point *dwrq, char *extra)
233{ 233{
234 struct lbs_private *priv = dev->priv; 234 struct lbs_private *priv = netdev_priv(dev);
235 235
236 lbs_deb_enter(LBS_DEB_WEXT); 236 lbs_deb_enter(LBS_DEB_WEXT);
237 237
@@ -248,7 +248,7 @@ static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
248static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info, 248static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
249 struct iw_point *dwrq, char *extra) 249 struct iw_point *dwrq, char *extra)
250{ 250{
251 struct lbs_private *priv = dev->priv; 251 struct lbs_private *priv = netdev_priv(dev);
252 252
253 lbs_deb_enter(LBS_DEB_WEXT); 253 lbs_deb_enter(LBS_DEB_WEXT);
254 254
@@ -273,7 +273,7 @@ static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
273 struct iw_param *vwrq, char *extra) 273 struct iw_param *vwrq, char *extra)
274{ 274{
275 int ret = 0; 275 int ret = 0;
276 struct lbs_private *priv = dev->priv; 276 struct lbs_private *priv = netdev_priv(dev);
277 u32 val = vwrq->value; 277 u32 val = vwrq->value;
278 278
279 lbs_deb_enter(LBS_DEB_WEXT); 279 lbs_deb_enter(LBS_DEB_WEXT);
@@ -293,7 +293,7 @@ static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
293static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info, 293static int lbs_get_rts(struct net_device *dev, struct iw_request_info *info,
294 struct iw_param *vwrq, char *extra) 294 struct iw_param *vwrq, char *extra)
295{ 295{
296 struct lbs_private *priv = dev->priv; 296 struct lbs_private *priv = netdev_priv(dev);
297 int ret = 0; 297 int ret = 0;
298 u16 val = 0; 298 u16 val = 0;
299 299
@@ -315,7 +315,7 @@ out:
315static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info, 315static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
316 struct iw_param *vwrq, char *extra) 316 struct iw_param *vwrq, char *extra)
317{ 317{
318 struct lbs_private *priv = dev->priv; 318 struct lbs_private *priv = netdev_priv(dev);
319 int ret = 0; 319 int ret = 0;
320 u32 val = vwrq->value; 320 u32 val = vwrq->value;
321 321
@@ -336,7 +336,7 @@ static int lbs_set_frag(struct net_device *dev, struct iw_request_info *info,
336static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info, 336static int lbs_get_frag(struct net_device *dev, struct iw_request_info *info,
337 struct iw_param *vwrq, char *extra) 337 struct iw_param *vwrq, char *extra)
338{ 338{
339 struct lbs_private *priv = dev->priv; 339 struct lbs_private *priv = netdev_priv(dev);
340 int ret = 0; 340 int ret = 0;
341 u16 val = 0; 341 u16 val = 0;
342 342
@@ -359,7 +359,7 @@ out:
359static int lbs_get_mode(struct net_device *dev, 359static int lbs_get_mode(struct net_device *dev,
360 struct iw_request_info *info, u32 * uwrq, char *extra) 360 struct iw_request_info *info, u32 * uwrq, char *extra)
361{ 361{
362 struct lbs_private *priv = dev->priv; 362 struct lbs_private *priv = netdev_priv(dev);
363 363
364 lbs_deb_enter(LBS_DEB_WEXT); 364 lbs_deb_enter(LBS_DEB_WEXT);
365 365
@@ -385,7 +385,7 @@ static int lbs_get_txpow(struct net_device *dev,
385 struct iw_request_info *info, 385 struct iw_request_info *info,
386 struct iw_param *vwrq, char *extra) 386 struct iw_param *vwrq, char *extra)
387{ 387{
388 struct lbs_private *priv = dev->priv; 388 struct lbs_private *priv = netdev_priv(dev);
389 s16 curlevel = 0; 389 s16 curlevel = 0;
390 int ret = 0; 390 int ret = 0;
391 391
@@ -418,7 +418,7 @@ out:
418static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info, 418static int lbs_set_retry(struct net_device *dev, struct iw_request_info *info,
419 struct iw_param *vwrq, char *extra) 419 struct iw_param *vwrq, char *extra)
420{ 420{
421 struct lbs_private *priv = dev->priv; 421 struct lbs_private *priv = netdev_priv(dev);
422 int ret = 0; 422 int ret = 0;
423 u16 slimit = 0, llimit = 0; 423 u16 slimit = 0, llimit = 0;
424 424
@@ -466,7 +466,7 @@ out:
466static int lbs_get_retry(struct net_device *dev, struct iw_request_info *info, 466static int lbs_get_retry(struct net_device *dev, struct iw_request_info *info,
467 struct iw_param *vwrq, char *extra) 467 struct iw_param *vwrq, char *extra)
468{ 468{
469 struct lbs_private *priv = dev->priv; 469 struct lbs_private *priv = netdev_priv(dev);
470 int ret = 0; 470 int ret = 0;
471 u16 val = 0; 471 u16 val = 0;
472 472
@@ -542,7 +542,7 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
542 struct iw_point *dwrq, char *extra) 542 struct iw_point *dwrq, char *extra)
543{ 543{
544 int i, j; 544 int i, j;
545 struct lbs_private *priv = dev->priv; 545 struct lbs_private *priv = netdev_priv(dev);
546 struct iw_range *range = (struct iw_range *)extra; 546 struct iw_range *range = (struct iw_range *)extra;
547 struct chan_freq_power *cfp; 547 struct chan_freq_power *cfp;
548 u8 rates[MAX_RATES + 1]; 548 u8 rates[MAX_RATES + 1];
@@ -708,7 +708,7 @@ out:
708static int lbs_set_power(struct net_device *dev, struct iw_request_info *info, 708static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
709 struct iw_param *vwrq, char *extra) 709 struct iw_param *vwrq, char *extra)
710{ 710{
711 struct lbs_private *priv = dev->priv; 711 struct lbs_private *priv = netdev_priv(dev);
712 712
713 lbs_deb_enter(LBS_DEB_WEXT); 713 lbs_deb_enter(LBS_DEB_WEXT);
714 714
@@ -758,7 +758,7 @@ static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
758static int lbs_get_power(struct net_device *dev, struct iw_request_info *info, 758static int lbs_get_power(struct net_device *dev, struct iw_request_info *info,
759 struct iw_param *vwrq, char *extra) 759 struct iw_param *vwrq, char *extra)
760{ 760{
761 struct lbs_private *priv = dev->priv; 761 struct lbs_private *priv = netdev_priv(dev);
762 762
763 lbs_deb_enter(LBS_DEB_WEXT); 763 lbs_deb_enter(LBS_DEB_WEXT);
764 764
@@ -781,7 +781,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
781 EXCELLENT = 95, 781 EXCELLENT = 95,
782 PERFECT = 100 782 PERFECT = 100
783 }; 783 };
784 struct lbs_private *priv = dev->priv; 784 struct lbs_private *priv = netdev_priv(dev);
785 u32 rssi_qual; 785 u32 rssi_qual;
786 u32 tx_qual; 786 u32 tx_qual;
787 u32 quality = 0; 787 u32 quality = 0;
@@ -886,7 +886,7 @@ static int lbs_set_freq(struct net_device *dev, struct iw_request_info *info,
886 struct iw_freq *fwrq, char *extra) 886 struct iw_freq *fwrq, char *extra)
887{ 887{
888 int ret = -EINVAL; 888 int ret = -EINVAL;
889 struct lbs_private *priv = dev->priv; 889 struct lbs_private *priv = netdev_priv(dev);
890 struct chan_freq_power *cfp; 890 struct chan_freq_power *cfp;
891 struct assoc_request * assoc_req; 891 struct assoc_request * assoc_req;
892 892
@@ -943,7 +943,7 @@ static int lbs_mesh_set_freq(struct net_device *dev,
943 struct iw_request_info *info, 943 struct iw_request_info *info,
944 struct iw_freq *fwrq, char *extra) 944 struct iw_freq *fwrq, char *extra)
945{ 945{
946 struct lbs_private *priv = dev->priv; 946 struct lbs_private *priv = netdev_priv(dev);
947 struct chan_freq_power *cfp; 947 struct chan_freq_power *cfp;
948 int ret = -EINVAL; 948 int ret = -EINVAL;
949 949
@@ -994,7 +994,7 @@ out:
994static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info, 994static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
995 struct iw_param *vwrq, char *extra) 995 struct iw_param *vwrq, char *extra)
996{ 996{
997 struct lbs_private *priv = dev->priv; 997 struct lbs_private *priv = netdev_priv(dev);
998 u8 new_rate = 0; 998 u8 new_rate = 0;
999 int ret = -EINVAL; 999 int ret = -EINVAL;
1000 u8 rates[MAX_RATES + 1]; 1000 u8 rates[MAX_RATES + 1];
@@ -1054,7 +1054,7 @@ out:
1054static int lbs_get_rate(struct net_device *dev, struct iw_request_info *info, 1054static int lbs_get_rate(struct net_device *dev, struct iw_request_info *info,
1055 struct iw_param *vwrq, char *extra) 1055 struct iw_param *vwrq, char *extra)
1056{ 1056{
1057 struct lbs_private *priv = dev->priv; 1057 struct lbs_private *priv = netdev_priv(dev);
1058 1058
1059 lbs_deb_enter(LBS_DEB_WEXT); 1059 lbs_deb_enter(LBS_DEB_WEXT);
1060 1060
@@ -1079,7 +1079,7 @@ static int lbs_set_mode(struct net_device *dev,
1079 struct iw_request_info *info, u32 * uwrq, char *extra) 1079 struct iw_request_info *info, u32 * uwrq, char *extra)
1080{ 1080{
1081 int ret = 0; 1081 int ret = 0;
1082 struct lbs_private *priv = dev->priv; 1082 struct lbs_private *priv = netdev_priv(dev);
1083 struct assoc_request * assoc_req; 1083 struct assoc_request * assoc_req;
1084 1084
1085 lbs_deb_enter(LBS_DEB_WEXT); 1085 lbs_deb_enter(LBS_DEB_WEXT);
@@ -1124,7 +1124,7 @@ static int lbs_get_encode(struct net_device *dev,
1124 struct iw_request_info *info, 1124 struct iw_request_info *info,
1125 struct iw_point *dwrq, u8 * extra) 1125 struct iw_point *dwrq, u8 * extra)
1126{ 1126{
1127 struct lbs_private *priv = dev->priv; 1127 struct lbs_private *priv = netdev_priv(dev);
1128 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1; 1128 int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
1129 1129
1130 lbs_deb_enter(LBS_DEB_WEXT); 1130 lbs_deb_enter(LBS_DEB_WEXT);
@@ -1319,7 +1319,7 @@ static int lbs_set_encode(struct net_device *dev,
1319 struct iw_point *dwrq, char *extra) 1319 struct iw_point *dwrq, char *extra)
1320{ 1320{
1321 int ret = 0; 1321 int ret = 0;
1322 struct lbs_private *priv = dev->priv; 1322 struct lbs_private *priv = netdev_priv(dev);
1323 struct assoc_request * assoc_req; 1323 struct assoc_request * assoc_req;
1324 u16 is_default = 0, index = 0, set_tx_key = 0; 1324 u16 is_default = 0, index = 0, set_tx_key = 0;
1325 1325
@@ -1395,7 +1395,7 @@ static int lbs_get_encodeext(struct net_device *dev,
1395 char *extra) 1395 char *extra)
1396{ 1396{
1397 int ret = -EINVAL; 1397 int ret = -EINVAL;
1398 struct lbs_private *priv = dev->priv; 1398 struct lbs_private *priv = netdev_priv(dev);
1399 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1399 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1400 int index, max_key_len; 1400 int index, max_key_len;
1401 1401
@@ -1501,7 +1501,7 @@ static int lbs_set_encodeext(struct net_device *dev,
1501 char *extra) 1501 char *extra)
1502{ 1502{
1503 int ret = 0; 1503 int ret = 0;
1504 struct lbs_private *priv = dev->priv; 1504 struct lbs_private *priv = netdev_priv(dev);
1505 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1505 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1506 int alg = ext->alg; 1506 int alg = ext->alg;
1507 struct assoc_request * assoc_req; 1507 struct assoc_request * assoc_req;
@@ -1639,7 +1639,7 @@ static int lbs_set_genie(struct net_device *dev,
1639 struct iw_point *dwrq, 1639 struct iw_point *dwrq,
1640 char *extra) 1640 char *extra)
1641{ 1641{
1642 struct lbs_private *priv = dev->priv; 1642 struct lbs_private *priv = netdev_priv(dev);
1643 int ret = 0; 1643 int ret = 0;
1644 struct assoc_request * assoc_req; 1644 struct assoc_request * assoc_req;
1645 1645
@@ -1685,7 +1685,7 @@ static int lbs_get_genie(struct net_device *dev,
1685 char *extra) 1685 char *extra)
1686{ 1686{
1687 int ret = 0; 1687 int ret = 0;
1688 struct lbs_private *priv = dev->priv; 1688 struct lbs_private *priv = netdev_priv(dev);
1689 1689
1690 lbs_deb_enter(LBS_DEB_WEXT); 1690 lbs_deb_enter(LBS_DEB_WEXT);
1691 1691
@@ -1713,7 +1713,7 @@ static int lbs_set_auth(struct net_device *dev,
1713 struct iw_param *dwrq, 1713 struct iw_param *dwrq,
1714 char *extra) 1714 char *extra)
1715{ 1715{
1716 struct lbs_private *priv = dev->priv; 1716 struct lbs_private *priv = netdev_priv(dev);
1717 struct assoc_request * assoc_req; 1717 struct assoc_request * assoc_req;
1718 int ret = 0; 1718 int ret = 0;
1719 int updated = 0; 1719 int updated = 0;
@@ -1816,7 +1816,7 @@ static int lbs_get_auth(struct net_device *dev,
1816 char *extra) 1816 char *extra)
1817{ 1817{
1818 int ret = 0; 1818 int ret = 0;
1819 struct lbs_private *priv = dev->priv; 1819 struct lbs_private *priv = netdev_priv(dev);
1820 1820
1821 lbs_deb_enter(LBS_DEB_WEXT); 1821 lbs_deb_enter(LBS_DEB_WEXT);
1822 1822
@@ -1857,7 +1857,7 @@ static int lbs_set_txpow(struct net_device *dev, struct iw_request_info *info,
1857 struct iw_param *vwrq, char *extra) 1857 struct iw_param *vwrq, char *extra)
1858{ 1858{
1859 int ret = 0; 1859 int ret = 0;
1860 struct lbs_private *priv = dev->priv; 1860 struct lbs_private *priv = netdev_priv(dev);
1861 s16 dbm = (s16) vwrq->value; 1861 s16 dbm = (s16) vwrq->value;
1862 1862
1863 lbs_deb_enter(LBS_DEB_WEXT); 1863 lbs_deb_enter(LBS_DEB_WEXT);
@@ -1936,7 +1936,7 @@ out:
1936static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info, 1936static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
1937 struct iw_point *dwrq, char *extra) 1937 struct iw_point *dwrq, char *extra)
1938{ 1938{
1939 struct lbs_private *priv = dev->priv; 1939 struct lbs_private *priv = netdev_priv(dev);
1940 1940
1941 lbs_deb_enter(LBS_DEB_WEXT); 1941 lbs_deb_enter(LBS_DEB_WEXT);
1942 1942
@@ -1971,12 +1971,13 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
1971static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info, 1971static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
1972 struct iw_point *dwrq, char *extra) 1972 struct iw_point *dwrq, char *extra)
1973{ 1973{
1974 struct lbs_private *priv = dev->priv; 1974 struct lbs_private *priv = netdev_priv(dev);
1975 int ret = 0; 1975 int ret = 0;
1976 u8 ssid[IW_ESSID_MAX_SIZE]; 1976 u8 ssid[IW_ESSID_MAX_SIZE];
1977 u8 ssid_len = 0; 1977 u8 ssid_len = 0;
1978 struct assoc_request * assoc_req; 1978 struct assoc_request * assoc_req;
1979 int in_ssid_len = dwrq->length; 1979 int in_ssid_len = dwrq->length;
1980 DECLARE_SSID_BUF(ssid_buf);
1980 1981
1981 lbs_deb_enter(LBS_DEB_WEXT); 1982 lbs_deb_enter(LBS_DEB_WEXT);
1982 1983
@@ -2005,7 +2006,7 @@ static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
2005 lbs_deb_wext("requested any SSID\n"); 2006 lbs_deb_wext("requested any SSID\n");
2006 } else { 2007 } else {
2007 lbs_deb_wext("requested SSID '%s'\n", 2008 lbs_deb_wext("requested SSID '%s'\n",
2008 escape_essid(ssid, ssid_len)); 2009 print_ssid(ssid_buf, ssid, ssid_len));
2009 } 2010 }
2010 2011
2011out: 2012out:
@@ -2039,7 +2040,7 @@ static int lbs_mesh_get_essid(struct net_device *dev,
2039 struct iw_request_info *info, 2040 struct iw_request_info *info,
2040 struct iw_point *dwrq, char *extra) 2041 struct iw_point *dwrq, char *extra)
2041{ 2042{
2042 struct lbs_private *priv = dev->priv; 2043 struct lbs_private *priv = netdev_priv(dev);
2043 2044
2044 lbs_deb_enter(LBS_DEB_WEXT); 2045 lbs_deb_enter(LBS_DEB_WEXT);
2045 2046
@@ -2057,7 +2058,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2057 struct iw_request_info *info, 2058 struct iw_request_info *info,
2058 struct iw_point *dwrq, char *extra) 2059 struct iw_point *dwrq, char *extra)
2059{ 2060{
2060 struct lbs_private *priv = dev->priv; 2061 struct lbs_private *priv = netdev_priv(dev);
2061 int ret = 0; 2062 int ret = 0;
2062 2063
2063 lbs_deb_enter(LBS_DEB_WEXT); 2064 lbs_deb_enter(LBS_DEB_WEXT);
@@ -2101,10 +2102,9 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2101static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info, 2102static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info,
2102 struct sockaddr *awrq, char *extra) 2103 struct sockaddr *awrq, char *extra)
2103{ 2104{
2104 struct lbs_private *priv = dev->priv; 2105 struct lbs_private *priv = netdev_priv(dev);
2105 struct assoc_request * assoc_req; 2106 struct assoc_request * assoc_req;
2106 int ret = 0; 2107 int ret = 0;
2107 DECLARE_MAC_BUF(mac);
2108 2108
2109 lbs_deb_enter(LBS_DEB_WEXT); 2109 lbs_deb_enter(LBS_DEB_WEXT);
2110 2110
@@ -2114,7 +2114,7 @@ static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info,
2114 if (awrq->sa_family != ARPHRD_ETHER) 2114 if (awrq->sa_family != ARPHRD_ETHER)
2115 return -EINVAL; 2115 return -EINVAL;
2116 2116
2117 lbs_deb_wext("ASSOC: WAP: sa_data %s\n", print_mac(mac, awrq->sa_data)); 2117 lbs_deb_wext("ASSOC: WAP: sa_data %pM\n", awrq->sa_data);
2118 2118
2119 mutex_lock(&priv->lock); 2119 mutex_lock(&priv->lock);
2120 2120
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index fdbcf8ba3e8a..3d3914c83b14 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -79,7 +79,6 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
79 struct cmd_ds_get_hw_spec cmd; 79 struct cmd_ds_get_hw_spec cmd;
80 int ret = -1; 80 int ret = -1;
81 u32 i; 81 u32 i;
82 DECLARE_MAC_BUF(mac);
83 82
84 memset(&cmd, 0, sizeof(cmd)); 83 memset(&cmd, 0, sizeof(cmd));
85 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 84 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
@@ -96,8 +95,8 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
96 priv->fwrelease = (priv->fwrelease << 8) | 95 priv->fwrelease = (priv->fwrelease << 8) |
97 (priv->fwrelease >> 24 & 0xff); 96 (priv->fwrelease >> 24 & 0xff);
98 97
99 printk(KERN_INFO "libertastf: %s, fw %u.%u.%up%u, cap 0x%08x\n", 98 printk(KERN_INFO "libertastf: %pM, fw %u.%u.%up%u, cap 0x%08x\n",
100 print_mac(mac, cmd.permanentaddr), 99 cmd.permanentaddr,
101 priv->fwrelease >> 24 & 0xff, 100 priv->fwrelease >> 24 & 0xff,
102 priv->fwrelease >> 16 & 0xff, 101 priv->fwrelease >> 16 & 0xff,
103 priv->fwrelease >> 8 & 0xff, 102 priv->fwrelease >> 8 & 0xff,
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index feff945ad856..d1fc305de5fe 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -354,9 +354,11 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
354 priv->vif = NULL; 354 priv->vif = NULL;
355} 355}
356 356
357static int lbtf_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 357static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
358{ 358{
359 struct lbtf_private *priv = hw->priv; 359 struct lbtf_private *priv = hw->priv;
360 struct ieee80211_conf *conf = &hw->conf;
361
360 if (conf->channel->center_freq != priv->cur_freq) { 362 if (conf->channel->center_freq != priv->cur_freq) {
361 priv->cur_freq = conf->channel->center_freq; 363 priv->cur_freq = conf->channel->center_freq;
362 lbtf_set_channel(priv, conf->channel->hw_value); 364 lbtf_set_channel(priv, conf->channel->hw_value);
@@ -590,14 +592,14 @@ EXPORT_SYMBOL_GPL(lbtf_remove_card);
590void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail) 592void lbtf_send_tx_feedback(struct lbtf_private *priv, u8 retrycnt, u8 fail)
591{ 593{
592 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb); 594 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb);
593 memset(&info->status, 0, sizeof(info->status)); 595
596 ieee80211_tx_info_clear_status(info);
594 /* 597 /*
595 * Commented out, otherwise we never go beyond 1Mbit/s using mac80211 598 * Commented out, otherwise we never go beyond 1Mbit/s using mac80211
596 * default pid rc algorithm. 599 * default pid rc algorithm.
597 * 600 *
598 * info->status.retry_count = MRVL_DEFAULT_RETRIES - retrycnt; 601 * info->status.retry_count = MRVL_DEFAULT_RETRIES - retrycnt;
599 */ 602 */
600 info->status.excessive_retries = fail ? 1 : 0;
601 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !fail) 603 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !fail)
602 info->flags |= IEEE80211_TX_STAT_ACK; 604 info->flags |= IEEE80211_TX_STAT_ACK;
603 skb_pull(priv->tx_skb, sizeof(struct txpd)); 605 skb_pull(priv->tx_skb, sizeof(struct txpd));
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 1a019e98dac3..f83d69e813d3 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -21,6 +21,7 @@
21#include <linux/if_arp.h> 21#include <linux/if_arp.h>
22#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
23#include <linux/etherdevice.h> 23#include <linux/etherdevice.h>
24#include <linux/debugfs.h>
24 25
25MODULE_AUTHOR("Jouni Malinen"); 26MODULE_AUTHOR("Jouni Malinen");
26MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); 27MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
@@ -32,6 +33,9 @@ MODULE_PARM_DESC(radios, "Number of simulated radios");
32 33
33struct hwsim_vif_priv { 34struct hwsim_vif_priv {
34 u32 magic; 35 u32 magic;
36 u8 bssid[ETH_ALEN];
37 bool assoc;
38 u16 aid;
35}; 39};
36 40
37#define HWSIM_VIF_MAGIC 0x69537748 41#define HWSIM_VIF_MAGIC 0x69537748
@@ -63,13 +67,13 @@ struct hwsim_sta_priv {
63static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta) 67static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta)
64{ 68{
65 struct hwsim_sta_priv *sp = (void *)sta->drv_priv; 69 struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
66 WARN_ON(sp->magic != HWSIM_VIF_MAGIC); 70 WARN_ON(sp->magic != HWSIM_STA_MAGIC);
67} 71}
68 72
69static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta) 73static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta)
70{ 74{
71 struct hwsim_sta_priv *sp = (void *)sta->drv_priv; 75 struct hwsim_sta_priv *sp = (void *)sta->drv_priv;
72 sp->magic = HWSIM_VIF_MAGIC; 76 sp->magic = HWSIM_STA_MAGIC;
73} 77}
74 78
75static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta) 79static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta)
@@ -132,6 +136,12 @@ struct mac80211_hwsim_data {
132 unsigned int rx_filter; 136 unsigned int rx_filter;
133 int started; 137 int started;
134 struct timer_list beacon_timer; 138 struct timer_list beacon_timer;
139 enum ps_mode {
140 PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
141 } ps;
142 bool ps_poll_pending;
143 struct dentry *debugfs;
144 struct dentry *debugfs_ps;
135}; 145};
136 146
137 147
@@ -196,6 +206,34 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
196} 206}
197 207
198 208
209static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
210 struct sk_buff *skb)
211{
212 switch (data->ps) {
213 case PS_DISABLED:
214 return true;
215 case PS_ENABLED:
216 return false;
217 case PS_AUTO_POLL:
218 /* TODO: accept (some) Beacons by default and other frames only
219 * if pending PS-Poll has been sent */
220 return true;
221 case PS_MANUAL_POLL:
222 /* Allow unicast frames to own address if there is a pending
223 * PS-Poll */
224 if (data->ps_poll_pending &&
225 memcmp(data->hw->wiphy->perm_addr, skb->data + 4,
226 ETH_ALEN) == 0) {
227 data->ps_poll_pending = false;
228 return true;
229 }
230 return false;
231 }
232
233 return true;
234}
235
236
199static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, 237static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
200 struct sk_buff *skb) 238 struct sk_buff *skb)
201{ 239{
@@ -209,9 +247,12 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
209 /* TODO: set mactime */ 247 /* TODO: set mactime */
210 rx_status.freq = data->channel->center_freq; 248 rx_status.freq = data->channel->center_freq;
211 rx_status.band = data->channel->band; 249 rx_status.band = data->channel->band;
212 rx_status.rate_idx = info->tx_rate_idx; 250 rx_status.rate_idx = info->control.rates[0].idx;
213 /* TODO: simulate signal strength (and optional packet drop) */ 251 /* TODO: simulate signal strength (and optional packet drop) */
214 252
253 if (data->ps != PS_DISABLED)
254 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
255
215 /* Copy skb to all enabled radios that are on the current frequency */ 256 /* Copy skb to all enabled radios that are on the current frequency */
216 spin_lock(&hwsim_radio_lock); 257 spin_lock(&hwsim_radio_lock);
217 list_for_each_entry(data2, &hwsim_radios, list) { 258 list_for_each_entry(data2, &hwsim_radios, list) {
@@ -221,6 +262,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
221 continue; 262 continue;
222 263
223 if (!data2->started || !data2->radio_enabled || 264 if (!data2->started || !data2->radio_enabled ||
265 !hwsim_ps_rx_ok(data2, skb) ||
224 data->channel->center_freq != data2->channel->center_freq) 266 data->channel->center_freq != data2->channel->center_freq)
225 continue; 267 continue;
226 268
@@ -269,13 +311,9 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
269 if (txi->control.sta) 311 if (txi->control.sta)
270 hwsim_check_sta_magic(txi->control.sta); 312 hwsim_check_sta_magic(txi->control.sta);
271 313
272 memset(&txi->status, 0, sizeof(txi->status)); 314 ieee80211_tx_info_clear_status(txi);
273 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK)) { 315 if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack)
274 if (ack) 316 txi->flags |= IEEE80211_TX_STAT_ACK;
275 txi->flags |= IEEE80211_TX_STAT_ACK;
276 else
277 txi->status.excessive_retries = 1;
278 }
279 ieee80211_tx_status_irqsafe(hw, skb); 317 ieee80211_tx_status_irqsafe(hw, skb);
280 return NETDEV_TX_OK; 318 return NETDEV_TX_OK;
281} 319}
@@ -294,6 +332,7 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
294{ 332{
295 struct mac80211_hwsim_data *data = hw->priv; 333 struct mac80211_hwsim_data *data = hw->priv;
296 data->started = 0; 334 data->started = 0;
335 del_timer(&data->beacon_timer);
297 printk(KERN_DEBUG "%s:%s\n", wiphy_name(hw->wiphy), __func__); 336 printk(KERN_DEBUG "%s:%s\n", wiphy_name(hw->wiphy), __func__);
298} 337}
299 338
@@ -301,10 +340,9 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
301static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, 340static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
302 struct ieee80211_if_init_conf *conf) 341 struct ieee80211_if_init_conf *conf)
303{ 342{
304 DECLARE_MAC_BUF(mac); 343 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
305 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n",
306 wiphy_name(hw->wiphy), __func__, conf->type, 344 wiphy_name(hw->wiphy), __func__, conf->type,
307 print_mac(mac, conf->mac_addr)); 345 conf->mac_addr);
308 hwsim_set_magic(conf->vif); 346 hwsim_set_magic(conf->vif);
309 return 0; 347 return 0;
310} 348}
@@ -313,10 +351,9 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
313static void mac80211_hwsim_remove_interface( 351static void mac80211_hwsim_remove_interface(
314 struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf) 352 struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf)
315{ 353{
316 DECLARE_MAC_BUF(mac); 354 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
317 printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%s)\n",
318 wiphy_name(hw->wiphy), __func__, conf->type, 355 wiphy_name(hw->wiphy), __func__, conf->type,
319 print_mac(mac, conf->mac_addr)); 356 conf->mac_addr);
320 hwsim_check_magic(conf->vif); 357 hwsim_check_magic(conf->vif);
321 hwsim_clear_magic(conf->vif); 358 hwsim_clear_magic(conf->vif);
322} 359}
@@ -331,7 +368,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
331 368
332 hwsim_check_magic(vif); 369 hwsim_check_magic(vif);
333 370
334 if (vif->type != NL80211_IFTYPE_AP) 371 if (vif->type != NL80211_IFTYPE_AP &&
372 vif->type != NL80211_IFTYPE_MESH_POINT)
335 return; 373 return;
336 374
337 skb = ieee80211_beacon_get(hw, vif); 375 skb = ieee80211_beacon_get(hw, vif);
@@ -361,10 +399,10 @@ static void mac80211_hwsim_beacon(unsigned long arg)
361} 399}
362 400
363 401
364static int mac80211_hwsim_config(struct ieee80211_hw *hw, 402static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
365 struct ieee80211_conf *conf)
366{ 403{
367 struct mac80211_hwsim_data *data = hw->priv; 404 struct mac80211_hwsim_data *data = hw->priv;
405 struct ieee80211_conf *conf = &hw->conf;
368 406
369 printk(KERN_DEBUG "%s:%s (freq=%d radio_enabled=%d beacon_int=%d)\n", 407 printk(KERN_DEBUG "%s:%s (freq=%d radio_enabled=%d beacon_int=%d)\n",
370 wiphy_name(hw->wiphy), __func__, 408 wiphy_name(hw->wiphy), __func__,
@@ -409,7 +447,16 @@ static int mac80211_hwsim_config_interface(struct ieee80211_hw *hw,
409 struct ieee80211_vif *vif, 447 struct ieee80211_vif *vif,
410 struct ieee80211_if_conf *conf) 448 struct ieee80211_if_conf *conf)
411{ 449{
450 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
451
412 hwsim_check_magic(vif); 452 hwsim_check_magic(vif);
453 if (conf->changed & IEEE80211_IFCC_BSSID) {
454 DECLARE_MAC_BUF(mac);
455 printk(KERN_DEBUG "%s:%s: BSSID changed: %pM\n",
456 wiphy_name(hw->wiphy), __func__,
457 conf->bssid);
458 memcpy(vp->bssid, conf->bssid, ETH_ALEN);
459 }
413 return 0; 460 return 0;
414} 461}
415 462
@@ -418,7 +465,46 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
418 struct ieee80211_bss_conf *info, 465 struct ieee80211_bss_conf *info,
419 u32 changed) 466 u32 changed)
420{ 467{
468 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
469
421 hwsim_check_magic(vif); 470 hwsim_check_magic(vif);
471
472 printk(KERN_DEBUG "%s:%s(changed=0x%x)\n",
473 wiphy_name(hw->wiphy), __func__, changed);
474
475 if (changed & BSS_CHANGED_ASSOC) {
476 printk(KERN_DEBUG " %s: ASSOC: assoc=%d aid=%d\n",
477 wiphy_name(hw->wiphy), info->assoc, info->aid);
478 vp->assoc = info->assoc;
479 vp->aid = info->aid;
480 }
481
482 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
483 printk(KERN_DEBUG " %s: ERP_CTS_PROT: %d\n",
484 wiphy_name(hw->wiphy), info->use_cts_prot);
485 }
486
487 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
488 printk(KERN_DEBUG " %s: ERP_PREAMBLE: %d\n",
489 wiphy_name(hw->wiphy), info->use_short_preamble);
490 }
491
492 if (changed & BSS_CHANGED_ERP_SLOT) {
493 printk(KERN_DEBUG " %s: ERP_SLOT: %d\n",
494 wiphy_name(hw->wiphy), info->use_short_slot);
495 }
496
497 if (changed & BSS_CHANGED_HT) {
498 printk(KERN_DEBUG " %s: HT: op_mode=0x%x\n",
499 wiphy_name(hw->wiphy),
500 info->ht.operation_mode);
501 }
502
503 if (changed & BSS_CHANGED_BASIC_RATES) {
504 printk(KERN_DEBUG " %s: BASIC_RATES: 0x%llx\n",
505 wiphy_name(hw->wiphy),
506 (unsigned long long) info->basic_rates);
507 }
422} 508}
423 509
424static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw, 510static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw,
@@ -434,6 +520,10 @@ static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw,
434 case STA_NOTIFY_REMOVE: 520 case STA_NOTIFY_REMOVE:
435 hwsim_clear_sta_magic(sta); 521 hwsim_clear_sta_magic(sta);
436 break; 522 break;
523 case STA_NOTIFY_SLEEP:
524 case STA_NOTIFY_AWAKE:
525 /* TODO: make good use of these flags */
526 break;
437 } 527 }
438} 528}
439 529
@@ -445,6 +535,17 @@ static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw,
445 return 0; 535 return 0;
446} 536}
447 537
538static int mac80211_hwsim_conf_tx(
539 struct ieee80211_hw *hw, u16 queue,
540 const struct ieee80211_tx_queue_params *params)
541{
542 printk(KERN_DEBUG "%s:%s (queue=%d txop=%d cw_min=%d cw_max=%d "
543 "aifs=%d)\n",
544 wiphy_name(hw->wiphy), __func__, queue,
545 params->txop, params->cw_min, params->cw_max, params->aifs);
546 return 0;
547}
548
448static const struct ieee80211_ops mac80211_hwsim_ops = 549static const struct ieee80211_ops mac80211_hwsim_ops =
449{ 550{
450 .tx = mac80211_hwsim_tx, 551 .tx = mac80211_hwsim_tx,
@@ -458,6 +559,7 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
458 .bss_info_changed = mac80211_hwsim_bss_info_changed, 559 .bss_info_changed = mac80211_hwsim_bss_info_changed,
459 .sta_notify = mac80211_hwsim_sta_notify, 560 .sta_notify = mac80211_hwsim_sta_notify,
460 .set_tim = mac80211_hwsim_set_tim, 561 .set_tim = mac80211_hwsim_set_tim,
562 .conf_tx = mac80211_hwsim_conf_tx,
461}; 563};
462 564
463 565
@@ -474,6 +576,8 @@ static void mac80211_hwsim_free(void)
474 spin_unlock_bh(&hwsim_radio_lock); 576 spin_unlock_bh(&hwsim_radio_lock);
475 577
476 list_for_each_entry(data, &tmplist, list) { 578 list_for_each_entry(data, &tmplist, list) {
579 debugfs_remove(data->debugfs_ps);
580 debugfs_remove(data->debugfs);
477 ieee80211_unregister_hw(data->hw); 581 ieee80211_unregister_hw(data->hw);
478 device_unregister(data->dev); 582 device_unregister(data->dev);
479 ieee80211_free_hw(data->hw); 583 ieee80211_free_hw(data->hw);
@@ -499,13 +603,131 @@ static void hwsim_mon_setup(struct net_device *dev)
499} 603}
500 604
501 605
606static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
607{
608 struct mac80211_hwsim_data *data = dat;
609 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
610 DECLARE_MAC_BUF(buf);
611 struct sk_buff *skb;
612 struct ieee80211_pspoll *pspoll;
613
614 if (!vp->assoc)
615 return;
616
617 printk(KERN_DEBUG "%s:%s: send PS-Poll to %pM for aid %d\n",
618 wiphy_name(data->hw->wiphy), __func__, vp->bssid, vp->aid);
619
620 skb = dev_alloc_skb(sizeof(*pspoll));
621 if (!skb)
622 return;
623 pspoll = (void *) skb_put(skb, sizeof(*pspoll));
624 pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
625 IEEE80211_STYPE_PSPOLL |
626 IEEE80211_FCTL_PM);
627 pspoll->aid = cpu_to_le16(0xc000 | vp->aid);
628 memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
629 memcpy(pspoll->ta, mac, ETH_ALEN);
630 if (data->radio_enabled &&
631 !mac80211_hwsim_tx_frame(data->hw, skb))
632 printk(KERN_DEBUG "%s: PS-Poll frame not ack'ed\n", __func__);
633 dev_kfree_skb(skb);
634}
635
636
637static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
638 struct ieee80211_vif *vif, int ps)
639{
640 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
641 DECLARE_MAC_BUF(buf);
642 struct sk_buff *skb;
643 struct ieee80211_hdr *hdr;
644
645 if (!vp->assoc)
646 return;
647
648 printk(KERN_DEBUG "%s:%s: send data::nullfunc to %pM ps=%d\n",
649 wiphy_name(data->hw->wiphy), __func__, vp->bssid, ps);
650
651 skb = dev_alloc_skb(sizeof(*hdr));
652 if (!skb)
653 return;
654 hdr = (void *) skb_put(skb, sizeof(*hdr) - ETH_ALEN);
655 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
656 IEEE80211_STYPE_NULLFUNC |
657 (ps ? IEEE80211_FCTL_PM : 0));
658 hdr->duration_id = cpu_to_le16(0);
659 memcpy(hdr->addr1, vp->bssid, ETH_ALEN);
660 memcpy(hdr->addr2, mac, ETH_ALEN);
661 memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
662 if (data->radio_enabled &&
663 !mac80211_hwsim_tx_frame(data->hw, skb))
664 printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
665 dev_kfree_skb(skb);
666}
667
668
669static void hwsim_send_nullfunc_ps(void *dat, u8 *mac,
670 struct ieee80211_vif *vif)
671{
672 struct mac80211_hwsim_data *data = dat;
673 hwsim_send_nullfunc(data, mac, vif, 1);
674}
675
676
677static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac,
678 struct ieee80211_vif *vif)
679{
680 struct mac80211_hwsim_data *data = dat;
681 hwsim_send_nullfunc(data, mac, vif, 0);
682}
683
684
685static int hwsim_fops_ps_read(void *dat, u64 *val)
686{
687 struct mac80211_hwsim_data *data = dat;
688 *val = data->ps;
689 return 0;
690}
691
692static int hwsim_fops_ps_write(void *dat, u64 val)
693{
694 struct mac80211_hwsim_data *data = dat;
695 enum ps_mode old_ps;
696
697 if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL &&
698 val != PS_MANUAL_POLL)
699 return -EINVAL;
700
701 old_ps = data->ps;
702 data->ps = val;
703
704 if (val == PS_MANUAL_POLL) {
705 ieee80211_iterate_active_interfaces(data->hw,
706 hwsim_send_ps_poll, data);
707 data->ps_poll_pending = true;
708 } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
709 ieee80211_iterate_active_interfaces(data->hw,
710 hwsim_send_nullfunc_ps,
711 data);
712 } else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
713 ieee80211_iterate_active_interfaces(data->hw,
714 hwsim_send_nullfunc_no_ps,
715 data);
716 }
717
718 return 0;
719}
720
721DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write,
722 "%llu\n");
723
724
502static int __init init_mac80211_hwsim(void) 725static int __init init_mac80211_hwsim(void)
503{ 726{
504 int i, err = 0; 727 int i, err = 0;
505 u8 addr[ETH_ALEN]; 728 u8 addr[ETH_ALEN];
506 struct mac80211_hwsim_data *data; 729 struct mac80211_hwsim_data *data;
507 struct ieee80211_hw *hw; 730 struct ieee80211_hw *hw;
508 DECLARE_MAC_BUF(mac);
509 731
510 if (radios < 1 || radios > 100) 732 if (radios < 1 || radios > 100)
511 return -EINVAL; 733 return -EINVAL;
@@ -553,7 +775,8 @@ static int __init init_mac80211_hwsim(void)
553 hw->queues = 4; 775 hw->queues = 4;
554 hw->wiphy->interface_modes = 776 hw->wiphy->interface_modes =
555 BIT(NL80211_IFTYPE_STATION) | 777 BIT(NL80211_IFTYPE_STATION) |
556 BIT(NL80211_IFTYPE_AP); 778 BIT(NL80211_IFTYPE_AP) |
779 BIT(NL80211_IFTYPE_MESH_POINT);
557 hw->ampdu_queues = 1; 780 hw->ampdu_queues = 1;
558 781
559 /* ask mac80211 to reserve space for magic */ 782 /* ask mac80211 to reserve space for magic */
@@ -566,19 +789,18 @@ static int __init init_mac80211_hwsim(void)
566 data->band.n_channels = ARRAY_SIZE(hwsim_channels); 789 data->band.n_channels = ARRAY_SIZE(hwsim_channels);
567 data->band.bitrates = data->rates; 790 data->band.bitrates = data->rates;
568 data->band.n_bitrates = ARRAY_SIZE(hwsim_rates); 791 data->band.n_bitrates = ARRAY_SIZE(hwsim_rates);
569 data->band.ht_info.ht_supported = 1; 792 data->band.ht_cap.ht_supported = true;
570 data->band.ht_info.cap = IEEE80211_HT_CAP_SUP_WIDTH | 793 data->band.ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
571 IEEE80211_HT_CAP_GRN_FLD | 794 IEEE80211_HT_CAP_GRN_FLD |
572 IEEE80211_HT_CAP_SGI_40 | 795 IEEE80211_HT_CAP_SGI_40 |
573 IEEE80211_HT_CAP_DSSSCCK40; 796 IEEE80211_HT_CAP_DSSSCCK40;
574 data->band.ht_info.ampdu_factor = 0x3; 797 data->band.ht_cap.ampdu_factor = 0x3;
575 data->band.ht_info.ampdu_density = 0x6; 798 data->band.ht_cap.ampdu_density = 0x6;
576 memset(data->band.ht_info.supp_mcs_set, 0, 799 memset(&data->band.ht_cap.mcs, 0,
577 sizeof(data->band.ht_info.supp_mcs_set)); 800 sizeof(data->band.ht_cap.mcs));
578 data->band.ht_info.supp_mcs_set[0] = 0xff; 801 data->band.ht_cap.mcs.rx_mask[0] = 0xff;
579 data->band.ht_info.supp_mcs_set[1] = 0xff; 802 data->band.ht_cap.mcs.rx_mask[1] = 0xff;
580 data->band.ht_info.supp_mcs_set[12] = 803 data->band.ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
581 IEEE80211_HT_CAP_MCS_TX_DEFINED;
582 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &data->band; 804 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &data->band;
583 805
584 err = ieee80211_register_hw(hw); 806 err = ieee80211_register_hw(hw);
@@ -588,9 +810,15 @@ static int __init init_mac80211_hwsim(void)
588 goto failed_hw; 810 goto failed_hw;
589 } 811 }
590 812
591 printk(KERN_DEBUG "%s: hwaddr %s registered\n", 813 printk(KERN_DEBUG "%s: hwaddr %pM registered\n",
592 wiphy_name(hw->wiphy), 814 wiphy_name(hw->wiphy),
593 print_mac(mac, hw->wiphy->perm_addr)); 815 hw->wiphy->perm_addr);
816
817 data->debugfs = debugfs_create_dir("hwsim",
818 hw->wiphy->debugfsdir);
819 data->debugfs_ps = debugfs_create_file("ps", 0666,
820 data->debugfs, data,
821 &hwsim_fops_ps);
594 822
595 setup_timer(&data->beacon_timer, mac80211_hwsim_beacon, 823 setup_timer(&data->beacon_timer, mac80211_hwsim_beacon,
596 (unsigned long) hw); 824 (unsigned long) hw);
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index a670f36b5f3f..24caec6caf1f 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -737,7 +737,6 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
737 win_req_t req; 737 win_req_t req;
738 memreq_t mem; 738 memreq_t mem;
739 u_char __iomem *ramBase = NULL; 739 u_char __iomem *ramBase = NULL;
740 DECLARE_MAC_BUF(mac);
741 740
742 DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link); 741 DEBUG(0, "netwave_pcmcia_config(0x%p)\n", link);
743 742
@@ -808,12 +807,12 @@ static int netwave_pcmcia_config(struct pcmcia_device *link) {
808 dev->dev_addr[i] = readb(ramBase + NETWAVE_EREG_PA + i); 807 dev->dev_addr[i] = readb(ramBase + NETWAVE_EREG_PA + i);
809 808
810 printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx, " 809 printk(KERN_INFO "%s: Netwave: port %#3lx, irq %d, mem %lx, "
811 "id %c%c, hw_addr %s\n", 810 "id %c%c, hw_addr %pM\n",
812 dev->name, dev->base_addr, dev->irq, 811 dev->name, dev->base_addr, dev->irq,
813 (u_long) ramBase, 812 (u_long) ramBase,
814 (int) readb(ramBase+NETWAVE_EREG_NI), 813 (int) readb(ramBase+NETWAVE_EREG_NI),
815 (int) readb(ramBase+NETWAVE_EREG_NI+1), 814 (int) readb(ramBase+NETWAVE_EREG_NI+1),
816 print_mac(mac, dev->dev_addr)); 815 dev->dev_addr);
817 816
818 /* get revision words */ 817 /* get revision words */
819 printk(KERN_DEBUG "Netwave_reset: revision %04x %04x\n", 818 printk(KERN_DEBUG "Netwave_reset: revision %04x %04x\n",
@@ -1308,7 +1307,6 @@ static int netwave_rx(struct net_device *dev)
1308 /* Queue packet for network layer */ 1307 /* Queue packet for network layer */
1309 netif_rx(skb); 1308 netif_rx(skb);
1310 1309
1311 dev->last_rx = jiffies;
1312 priv->stats.rx_packets++; 1310 priv->stats.rx_packets++;
1313 priv->stats.rx_bytes += rcvLen; 1311 priv->stats.rx_bytes += rcvLen;
1314 1312
diff --git a/drivers/net/wireless/orinoco/Makefile b/drivers/net/wireless/orinoco/Makefile
new file mode 100644
index 000000000000..791366e08c50
--- /dev/null
+++ b/drivers/net/wireless/orinoco/Makefile
@@ -0,0 +1,12 @@
1#
2# Makefile for the orinoco wireless device drivers.
3#
4
5obj-$(CONFIG_HERMES) += orinoco.o hermes.o hermes_dld.o
6obj-$(CONFIG_PCMCIA_HERMES) += orinoco_cs.o
7obj-$(CONFIG_APPLE_AIRPORT) += airport.o
8obj-$(CONFIG_PLX_HERMES) += orinoco_plx.o
9obj-$(CONFIG_PCI_HERMES) += orinoco_pci.o
10obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
11obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
12obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/orinoco/airport.c
index ce03a2e865fa..28f1cae48439 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/orinoco/airport.c
@@ -279,7 +279,7 @@ init_airport(void)
279static void __exit 279static void __exit
280exit_airport(void) 280exit_airport(void)
281{ 281{
282 return macio_unregister_driver(&airport_driver); 282 macio_unregister_driver(&airport_driver);
283} 283}
284 284
285module_init(init_airport); 285module_init(init_airport);
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/orinoco/hermes.c
index bfa375369df3..bfa375369df3 100644
--- a/drivers/net/wireless/hermes.c
+++ b/drivers/net/wireless/orinoco/hermes.c
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index 8b13c8fef3dc..8b13c8fef3dc 100644
--- a/drivers/net/wireless/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
diff --git a/drivers/net/wireless/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index d8c626e61a3a..d8c626e61a3a 100644
--- a/drivers/net/wireless/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
diff --git a/drivers/net/wireless/hermes_dld.h b/drivers/net/wireless/orinoco/hermes_dld.h
index 6fcb26277999..6fcb26277999 100644
--- a/drivers/net/wireless/hermes_dld.h
+++ b/drivers/net/wireless/orinoco/hermes_dld.h
diff --git a/drivers/net/wireless/hermes_rid.h b/drivers/net/wireless/orinoco/hermes_rid.h
index 42eb67dea1df..42eb67dea1df 100644
--- a/drivers/net/wireless/hermes_rid.h
+++ b/drivers/net/wireless/orinoco/hermes_rid.h
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco/orinoco.c
index e0512e49d6d3..bc84e2792f8a 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco/orinoco.c
@@ -84,10 +84,11 @@
84#include <linux/etherdevice.h> 84#include <linux/etherdevice.h>
85#include <linux/ethtool.h> 85#include <linux/ethtool.h>
86#include <linux/firmware.h> 86#include <linux/firmware.h>
87#include <linux/suspend.h>
87#include <linux/if_arp.h> 88#include <linux/if_arp.h>
88#include <linux/wireless.h> 89#include <linux/wireless.h>
90#include <linux/ieee80211.h>
89#include <net/iw_handler.h> 91#include <net/iw_handler.h>
90#include <net/ieee80211.h>
91 92
92#include <linux/scatterlist.h> 93#include <linux/scatterlist.h>
93#include <linux/crypto.h> 94#include <linux/crypto.h>
@@ -143,7 +144,7 @@ static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
143#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2) 144#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
144 145
145#define ORINOCO_MIN_MTU 256 146#define ORINOCO_MIN_MTU 256
146#define ORINOCO_MAX_MTU (IEEE80211_DATA_LEN - ENCAPS_OVERHEAD) 147#define ORINOCO_MAX_MTU (IEEE80211_MAX_DATA_LEN - ENCAPS_OVERHEAD)
147 148
148#define SYMBOL_MAX_VER_LEN (14) 149#define SYMBOL_MAX_VER_LEN (14)
149#define USER_BAP 0 150#define USER_BAP 0
@@ -392,7 +393,7 @@ static void orinoco_bss_data_init(struct orinoco_private *priv)
392} 393}
393 394
394static inline u8 *orinoco_get_ie(u8 *data, size_t len, 395static inline u8 *orinoco_get_ie(u8 *data, size_t len,
395 enum ieee80211_mfie eid) 396 enum ieee80211_eid eid)
396{ 397{
397 u8 *p = data; 398 u8 *p = data;
398 while ((p + 2) < (data + len)) { 399 while ((p + 2) < (data + len)) {
@@ -409,7 +410,7 @@ static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
409{ 410{
410 u8 *p = data; 411 u8 *p = data;
411 while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) { 412 while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) {
412 if ((p[0] == MFIE_TYPE_GENERIC) && 413 if ((p[0] == WLAN_EID_GENERIC) &&
413 (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0)) 414 (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0))
414 return p; 415 return p;
415 p += p[1] + 2; 416 p += p[1] + 2;
@@ -431,9 +432,9 @@ struct fw_info {
431}; 432};
432 433
433const static struct fw_info orinoco_fw[] = { 434const static struct fw_info orinoco_fw[] = {
434 { "", "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 }, 435 { NULL, "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 },
435 { "", "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 }, 436 { NULL, "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
436 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 512 } 437 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", NULL, 0x00003100, 512 }
437}; 438};
438 439
439/* Structure used to access fields in FW 440/* Structure used to access fields in FW
@@ -487,13 +488,17 @@ orinoco_dl_firmware(struct orinoco_private *priv,
487 if (err) 488 if (err)
488 goto free; 489 goto free;
489 490
490 err = request_firmware(&fw_entry, firmware, priv->dev); 491 if (!priv->cached_fw) {
491 if (err) { 492 err = request_firmware(&fw_entry, firmware, priv->dev);
492 printk(KERN_ERR "%s: Cannot find firmware %s\n", 493
493 dev->name, firmware); 494 if (err) {
494 err = -ENOENT; 495 printk(KERN_ERR "%s: Cannot find firmware %s\n",
495 goto free; 496 dev->name, firmware);
496 } 497 err = -ENOENT;
498 goto free;
499 }
500 } else
501 fw_entry = priv->cached_fw;
497 502
498 hdr = (const struct orinoco_fw_header *) fw_entry->data; 503 hdr = (const struct orinoco_fw_header *) fw_entry->data;
499 504
@@ -535,7 +540,9 @@ orinoco_dl_firmware(struct orinoco_private *priv,
535 dev->name, hermes_present(hw)); 540 dev->name, hermes_present(hw));
536 541
537abort: 542abort:
538 release_firmware(fw_entry); 543 /* If we requested the firmware, release it. */
544 if (!priv->cached_fw)
545 release_firmware(fw_entry);
539 546
540free: 547free:
541 kfree(pda); 548 kfree(pda);
@@ -639,34 +646,41 @@ symbol_dl_firmware(struct orinoco_private *priv,
639 int ret; 646 int ret;
640 const struct firmware *fw_entry; 647 const struct firmware *fw_entry;
641 648
642 if (request_firmware(&fw_entry, fw->pri_fw, 649 if (!priv->cached_pri_fw) {
643 priv->dev) != 0) { 650 if (request_firmware(&fw_entry, fw->pri_fw, priv->dev) != 0) {
644 printk(KERN_ERR "%s: Cannot find firmware: %s\n", 651 printk(KERN_ERR "%s: Cannot find firmware: %s\n",
645 dev->name, fw->pri_fw); 652 dev->name, fw->pri_fw);
646 return -ENOENT; 653 return -ENOENT;
647 } 654 }
655 } else
656 fw_entry = priv->cached_pri_fw;
648 657
649 /* Load primary firmware */ 658 /* Load primary firmware */
650 ret = symbol_dl_image(priv, fw, fw_entry->data, 659 ret = symbol_dl_image(priv, fw, fw_entry->data,
651 fw_entry->data + fw_entry->size, 0); 660 fw_entry->data + fw_entry->size, 0);
652 release_firmware(fw_entry); 661
662 if (!priv->cached_pri_fw)
663 release_firmware(fw_entry);
653 if (ret) { 664 if (ret) {
654 printk(KERN_ERR "%s: Primary firmware download failed\n", 665 printk(KERN_ERR "%s: Primary firmware download failed\n",
655 dev->name); 666 dev->name);
656 return ret; 667 return ret;
657 } 668 }
658 669
659 if (request_firmware(&fw_entry, fw->sta_fw, 670 if (!priv->cached_fw) {
660 priv->dev) != 0) { 671 if (request_firmware(&fw_entry, fw->sta_fw, priv->dev) != 0) {
661 printk(KERN_ERR "%s: Cannot find firmware: %s\n", 672 printk(KERN_ERR "%s: Cannot find firmware: %s\n",
662 dev->name, fw->sta_fw); 673 dev->name, fw->sta_fw);
663 return -ENOENT; 674 return -ENOENT;
664 } 675 }
676 } else
677 fw_entry = priv->cached_fw;
665 678
666 /* Load secondary firmware */ 679 /* Load secondary firmware */
667 ret = symbol_dl_image(priv, fw, fw_entry->data, 680 ret = symbol_dl_image(priv, fw, fw_entry->data,
668 fw_entry->data + fw_entry->size, 1); 681 fw_entry->data + fw_entry->size, 1);
669 release_firmware(fw_entry); 682 if (!priv->cached_fw)
683 release_firmware(fw_entry);
670 if (ret) { 684 if (ret) {
671 printk(KERN_ERR "%s: Secondary firmware download failed\n", 685 printk(KERN_ERR "%s: Secondary firmware download failed\n",
672 dev->name); 686 dev->name);
@@ -699,6 +713,45 @@ static int orinoco_download(struct orinoco_private *priv)
699 return err; 713 return err;
700} 714}
701 715
716#if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP)
717static void orinoco_cache_fw(struct orinoco_private *priv, int ap)
718{
719 const struct firmware *fw_entry = NULL;
720 const char *pri_fw;
721 const char *fw;
722
723 pri_fw = orinoco_fw[priv->firmware_type].pri_fw;
724 if (ap)
725 fw = orinoco_fw[priv->firmware_type].ap_fw;
726 else
727 fw = orinoco_fw[priv->firmware_type].sta_fw;
728
729 if (pri_fw) {
730 if (request_firmware(&fw_entry, pri_fw, priv->dev) == 0)
731 priv->cached_pri_fw = fw_entry;
732 }
733
734 if (fw) {
735 if (request_firmware(&fw_entry, fw, priv->dev) == 0)
736 priv->cached_fw = fw_entry;
737 }
738}
739
740static void orinoco_uncache_fw(struct orinoco_private *priv)
741{
742 if (priv->cached_pri_fw)
743 release_firmware(priv->cached_pri_fw);
744 if (priv->cached_fw)
745 release_firmware(priv->cached_fw);
746
747 priv->cached_pri_fw = NULL;
748 priv->cached_fw = NULL;
749}
750#else
751#define orinoco_cache_fw(priv, ap)
752#define orinoco_uncache_fw(priv)
753#endif
754
702/********************************************************************/ 755/********************************************************************/
703/* Device methods */ 756/* Device methods */
704/********************************************************************/ 757/********************************************************************/
@@ -800,7 +853,7 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
800 wstats->qual.qual = (int)le16_to_cpu(cq.qual); 853 wstats->qual.qual = (int)le16_to_cpu(cq.qual);
801 wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95; 854 wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95;
802 wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95; 855 wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95;
803 wstats->qual.updated = 7; 856 wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
804 } 857 }
805 } 858 }
806 859
@@ -830,7 +883,8 @@ static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
830 if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) ) 883 if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) )
831 return -EINVAL; 884 return -EINVAL;
832 885
833 if ( (new_mtu + ENCAPS_OVERHEAD + IEEE80211_HLEN) > 886 /* MTU + encapsulation + header length */
887 if ( (new_mtu + ENCAPS_OVERHEAD + sizeof(struct ieee80211_hdr)) >
834 (priv->nicbuf_size - ETH_HLEN) ) 888 (priv->nicbuf_size - ETH_HLEN) )
835 return -EINVAL; 889 return -EINVAL;
836 890
@@ -1158,7 +1212,7 @@ static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
1158 wstats.level = level - 0x95; 1212 wstats.level = level - 0x95;
1159 wstats.noise = noise - 0x95; 1213 wstats.noise = noise - 0x95;
1160 wstats.qual = (level > noise) ? (level - noise) : 0; 1214 wstats.qual = (level > noise) ? (level - noise) : 0;
1161 wstats.updated = 7; 1215 wstats.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
1162 /* Update spy records */ 1216 /* Update spy records */
1163 wireless_spy_update(dev, mac, &wstats); 1217 wireless_spy_update(dev, mac, &wstats);
1164} 1218}
@@ -1245,7 +1299,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
1245 } 1299 }
1246 1300
1247 /* sanity check the length */ 1301 /* sanity check the length */
1248 if (datalen > IEEE80211_DATA_LEN + 12) { 1302 if (datalen > IEEE80211_MAX_DATA_LEN + 12) {
1249 printk(KERN_DEBUG "%s: oversized monitor frame, " 1303 printk(KERN_DEBUG "%s: oversized monitor frame, "
1250 "data length = %d\n", dev->name, datalen); 1304 "data length = %d\n", dev->name, datalen);
1251 stats->rx_length_errors++; 1305 stats->rx_length_errors++;
@@ -1280,7 +1334,6 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
1280 skb->pkt_type = PACKET_OTHERHOST; 1334 skb->pkt_type = PACKET_OTHERHOST;
1281 skb->protocol = __constant_htons(ETH_P_802_2); 1335 skb->protocol = __constant_htons(ETH_P_802_2);
1282 1336
1283 dev->last_rx = jiffies;
1284 stats->rx_packets++; 1337 stats->rx_packets++;
1285 stats->rx_bytes += skb->len; 1338 stats->rx_bytes += skb->len;
1286 1339
@@ -1374,7 +1427,7 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
1374 data. */ 1427 data. */
1375 goto out; 1428 goto out;
1376 } 1429 }
1377 if (length > IEEE80211_DATA_LEN) { 1430 if (length > IEEE80211_MAX_DATA_LEN) {
1378 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n", 1431 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
1379 dev->name, length); 1432 dev->name, length);
1380 stats->rx_length_errors++; 1433 stats->rx_length_errors++;
@@ -1477,12 +1530,11 @@ static void orinoco_rx(struct net_device *dev,
1477 MICHAEL_MIC_LEN)) { 1530 MICHAEL_MIC_LEN)) {
1478 union iwreq_data wrqu; 1531 union iwreq_data wrqu;
1479 struct iw_michaelmicfailure wxmic; 1532 struct iw_michaelmicfailure wxmic;
1480 DECLARE_MAC_BUF(mac);
1481 1533
1482 printk(KERN_WARNING "%s: " 1534 printk(KERN_WARNING "%s: "
1483 "Invalid Michael MIC in data frame from %s, " 1535 "Invalid Michael MIC in data frame from %pM, "
1484 "using key %i\n", 1536 "using key %i\n",
1485 dev->name, print_mac(mac, src), key_id); 1537 dev->name, src, key_id);
1486 1538
1487 /* TODO: update stats */ 1539 /* TODO: update stats */
1488 1540
@@ -1530,7 +1582,6 @@ static void orinoco_rx(struct net_device *dev,
1530 else 1582 else
1531 memcpy(hdr->h_source, desc->addr2, ETH_ALEN); 1583 memcpy(hdr->h_source, desc->addr2, ETH_ALEN);
1532 1584
1533 dev->last_rx = jiffies;
1534 skb->protocol = eth_type_trans(skb, dev); 1585 skb->protocol = eth_type_trans(skb, dev);
1535 skb->ip_summed = CHECKSUM_NONE; 1586 skb->ip_summed = CHECKSUM_NONE;
1536 if (fc & IEEE80211_FCTL_TODS) 1587 if (fc & IEEE80211_FCTL_TODS)
@@ -1699,7 +1750,7 @@ static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
1699 union iwreq_data wrqu; 1750 union iwreq_data wrqu;
1700 int err; 1751 int err;
1701 1752
1702 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENTBSSID, 1753 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
1703 ETH_ALEN, NULL, wrqu.ap_addr.sa_data); 1754 ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
1704 if (err != 0) 1755 if (err != 0)
1705 return; 1756 return;
@@ -1722,7 +1773,7 @@ static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
1722 if (!priv->has_wpa) 1773 if (!priv->has_wpa)
1723 return; 1774 return;
1724 1775
1725 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO, 1776 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
1726 sizeof(buf), NULL, &buf); 1777 sizeof(buf), NULL, &buf);
1727 if (err != 0) 1778 if (err != 0)
1728 return; 1779 return;
@@ -1752,7 +1803,7 @@ static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
1752 if (!priv->has_wpa) 1803 if (!priv->has_wpa)
1753 return; 1804 return;
1754 1805
1755 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO, 1806 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO,
1756 sizeof(buf), NULL, &buf); 1807 sizeof(buf), NULL, &buf);
1757 if (err != 0) 1808 if (err != 0)
1758 return; 1809 return;
@@ -2301,6 +2352,11 @@ int orinoco_reinit_firmware(struct net_device *dev)
2301 int err; 2352 int err;
2302 2353
2303 err = hermes_init(hw); 2354 err = hermes_init(hw);
2355 if (priv->do_fw_download && !err) {
2356 err = orinoco_download(priv);
2357 if (err)
2358 priv->do_fw_download = 0;
2359 }
2304 if (!err) 2360 if (!err)
2305 err = orinoco_allocate_fid(dev); 2361 err = orinoco_allocate_fid(dev);
2306 2362
@@ -2926,12 +2982,6 @@ static void orinoco_reset(struct work_struct *work)
2926 } 2982 }
2927 } 2983 }
2928 2984
2929 if (priv->do_fw_download) {
2930 err = orinoco_download(priv);
2931 if (err)
2932 priv->do_fw_download = 0;
2933 }
2934
2935 err = orinoco_reinit_firmware(dev); 2985 err = orinoco_reinit_firmware(dev);
2936 if (err) { 2986 if (err) {
2937 printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n", 2987 printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n",
@@ -3056,6 +3106,50 @@ irqreturn_t orinoco_interrupt(int irq, void *dev_id)
3056} 3106}
3057 3107
3058/********************************************************************/ 3108/********************************************************************/
3109/* Power management */
3110/********************************************************************/
3111#if defined(CONFIG_PM_SLEEP) && !defined(CONFIG_HERMES_CACHE_FW_ON_INIT)
3112static int orinoco_pm_notifier(struct notifier_block *notifier,
3113 unsigned long pm_event,
3114 void *unused)
3115{
3116 struct orinoco_private *priv = container_of(notifier,
3117 struct orinoco_private,
3118 pm_notifier);
3119
3120 /* All we need to do is cache the firmware before suspend, and
3121 * release it when we come out.
3122 *
3123 * Only need to do this if we're downloading firmware. */
3124 if (!priv->do_fw_download)
3125 return NOTIFY_DONE;
3126
3127 switch (pm_event) {
3128 case PM_HIBERNATION_PREPARE:
3129 case PM_SUSPEND_PREPARE:
3130 orinoco_cache_fw(priv, 0);
3131 break;
3132
3133 case PM_POST_RESTORE:
3134 /* Restore from hibernation failed. We need to clean
3135 * up in exactly the same way, so fall through. */
3136 case PM_POST_HIBERNATION:
3137 case PM_POST_SUSPEND:
3138 orinoco_uncache_fw(priv);
3139 break;
3140
3141 case PM_RESTORE_PREPARE:
3142 default:
3143 break;
3144 }
3145
3146 return NOTIFY_DONE;
3147}
3148#else /* !PM_SLEEP || HERMES_CACHE_FW_ON_INIT */
3149#define orinoco_pm_notifier NULL
3150#endif
3151
3152/********************************************************************/
3059/* Initialization */ 3153/* Initialization */
3060/********************************************************************/ 3154/********************************************************************/
3061 3155
@@ -3277,11 +3371,10 @@ static int orinoco_init(struct net_device *dev)
3277 struct hermes_idstring nickbuf; 3371 struct hermes_idstring nickbuf;
3278 u16 reclen; 3372 u16 reclen;
3279 int len; 3373 int len;
3280 DECLARE_MAC_BUF(mac);
3281 3374
3282 /* No need to lock, the hw_unavailable flag is already set in 3375 /* No need to lock, the hw_unavailable flag is already set in
3283 * alloc_orinocodev() */ 3376 * alloc_orinocodev() */
3284 priv->nicbuf_size = IEEE80211_FRAME_LEN + ETH_HLEN; 3377 priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN;
3285 3378
3286 /* Initialize the firmware */ 3379 /* Initialize the firmware */
3287 err = hermes_init(hw); 3380 err = hermes_init(hw);
@@ -3299,6 +3392,10 @@ static int orinoco_init(struct net_device *dev)
3299 } 3392 }
3300 3393
3301 if (priv->do_fw_download) { 3394 if (priv->do_fw_download) {
3395#ifdef CONFIG_HERMES_CACHE_FW_ON_INIT
3396 orinoco_cache_fw(priv, 0);
3397#endif
3398
3302 err = orinoco_download(priv); 3399 err = orinoco_download(priv);
3303 if (err) 3400 if (err)
3304 priv->do_fw_download = 0; 3401 priv->do_fw_download = 0;
@@ -3348,8 +3445,8 @@ static int orinoco_init(struct net_device *dev)
3348 goto out; 3445 goto out;
3349 } 3446 }
3350 3447
3351 printk(KERN_DEBUG "%s: MAC address %s\n", 3448 printk(KERN_DEBUG "%s: MAC address %pM\n",
3352 dev->name, print_mac(mac, dev->dev_addr)); 3449 dev->name, dev->dev_addr);
3353 3450
3354 /* Get the station name */ 3451 /* Get the station name */
3355 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME, 3452 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
@@ -3535,6 +3632,13 @@ struct net_device
3535 netif_carrier_off(dev); 3632 netif_carrier_off(dev);
3536 priv->last_linkstatus = 0xffff; 3633 priv->last_linkstatus = 0xffff;
3537 3634
3635 priv->cached_pri_fw = NULL;
3636 priv->cached_fw = NULL;
3637
3638 /* Register PM notifiers */
3639 priv->pm_notifier.notifier_call = orinoco_pm_notifier;
3640 register_pm_notifier(&priv->pm_notifier);
3641
3538 return dev; 3642 return dev;
3539} 3643}
3540 3644
@@ -3546,6 +3650,10 @@ void free_orinocodev(struct net_device *dev)
3546 * when we call tasklet_kill it will run one final time, 3650 * when we call tasklet_kill it will run one final time,
3547 * emptying the list */ 3651 * emptying the list */
3548 tasklet_kill(&priv->rx_tasklet); 3652 tasklet_kill(&priv->rx_tasklet);
3653
3654 unregister_pm_notifier(&priv->pm_notifier);
3655 orinoco_uncache_fw(priv);
3656
3549 priv->wpa_ie_len = 0; 3657 priv->wpa_ie_len = 0;
3550 kfree(priv->wpa_ie); 3658 kfree(priv->wpa_ie);
3551 orinoco_mic_free(priv); 3659 orinoco_mic_free(priv);
@@ -4672,7 +4780,7 @@ static int orinoco_ioctl_set_encodeext(struct net_device *dev,
4672 /* Determine and validate the key index */ 4780 /* Determine and validate the key index */
4673 idx = encoding->flags & IW_ENCODE_INDEX; 4781 idx = encoding->flags & IW_ENCODE_INDEX;
4674 if (idx) { 4782 if (idx) {
4675 if ((idx < 1) || (idx > WEP_KEYS)) 4783 if ((idx < 1) || (idx > 4))
4676 goto out; 4784 goto out;
4677 idx--; 4785 idx--;
4678 } else 4786 } else
@@ -4777,7 +4885,7 @@ static int orinoco_ioctl_get_encodeext(struct net_device *dev,
4777 4885
4778 idx = encoding->flags & IW_ENCODE_INDEX; 4886 idx = encoding->flags & IW_ENCODE_INDEX;
4779 if (idx) { 4887 if (idx) {
4780 if ((idx < 1) || (idx > WEP_KEYS)) 4888 if ((idx < 1) || (idx > 4))
4781 goto out; 4889 goto out;
4782 idx--; 4890 idx--;
4783 } else 4891 } else
@@ -4940,7 +5048,8 @@ static int orinoco_ioctl_set_genie(struct net_device *dev,
4940 unsigned long flags; 5048 unsigned long flags;
4941 int err = 0; 5049 int err = 0;
4942 5050
4943 if ((wrqu->data.length > MAX_WPA_IE_LEN) || 5051 /* cut off at IEEE80211_MAX_DATA_LEN */
5052 if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) ||
4944 (wrqu->data.length && (extra == NULL))) 5053 (wrqu->data.length && (extra == NULL)))
4945 return -EINVAL; 5054 return -EINVAL;
4946 5055
@@ -5433,7 +5542,7 @@ static inline char *orinoco_translate_scan(struct net_device *dev,
5433 char *current_ev, 5542 char *current_ev,
5434 char *end_buf, 5543 char *end_buf,
5435 union hermes_scan_info *bss, 5544 union hermes_scan_info *bss,
5436 unsigned int last_scanned) 5545 unsigned long last_scanned)
5437{ 5546{
5438 struct orinoco_private *priv = netdev_priv(dev); 5547 struct orinoco_private *priv = netdev_priv(dev);
5439 u16 capabilities; 5548 u16 capabilities;
@@ -5580,7 +5689,7 @@ static inline char *orinoco_translate_ext_scan(struct net_device *dev,
5580 char *current_ev, 5689 char *current_ev,
5581 char *end_buf, 5690 char *end_buf,
5582 struct agere_ext_scan_info *bss, 5691 struct agere_ext_scan_info *bss,
5583 unsigned int last_scanned) 5692 unsigned long last_scanned)
5584{ 5693{
5585 u16 capabilities; 5694 u16 capabilities;
5586 u16 channel; 5695 u16 channel;
@@ -5623,7 +5732,7 @@ static inline char *orinoco_translate_ext_scan(struct net_device *dev,
5623 &iwe, IW_EV_UINT_LEN); 5732 &iwe, IW_EV_UINT_LEN);
5624 } 5733 }
5625 5734
5626 ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_DS_SET); 5735 ie = orinoco_get_ie(bss->data, sizeof(bss->data), WLAN_EID_DS_PARAMS);
5627 channel = ie ? ie[2] : 0; 5736 channel = ie ? ie[2] : 0;
5628 if ((channel >= 1) && (channel <= NUM_CHANNELS)) { 5737 if ((channel >= 1) && (channel <= NUM_CHANNELS)) {
5629 /* Add channel and frequency */ 5738 /* Add channel and frequency */
@@ -5673,7 +5782,7 @@ static inline char *orinoco_translate_ext_scan(struct net_device *dev,
5673 } 5782 }
5674 5783
5675 /* RSN IE */ 5784 /* RSN IE */
5676 ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_RSN); 5785 ie = orinoco_get_ie(bss->data, sizeof(bss->data), WLAN_EID_RSN);
5677 if (ie) { 5786 if (ie) {
5678 iwe.cmd = IWEVGENIE; 5787 iwe.cmd = IWEVGENIE;
5679 iwe.u.data.length = ie[1] + 2; 5788 iwe.u.data.length = ie[1] + 2;
@@ -5681,7 +5790,7 @@ static inline char *orinoco_translate_ext_scan(struct net_device *dev,
5681 &iwe, ie); 5790 &iwe, ie);
5682 } 5791 }
5683 5792
5684 ie = orinoco_get_ie(bss->data, sizeof(bss->data), MFIE_TYPE_RATES); 5793 ie = orinoco_get_ie(bss->data, sizeof(bss->data), WLAN_EID_SUPP_RATES);
5685 if (ie) { 5794 if (ie) {
5686 char *p = current_ev + iwe_stream_lcp_len(info); 5795 char *p = current_ev + iwe_stream_lcp_len(info);
5687 int i; 5796 int i;
@@ -5976,7 +6085,7 @@ static void orinoco_get_drvinfo(struct net_device *dev,
5976 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1); 6085 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
5977 strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1); 6086 strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1);
5978 if (dev->dev.parent) 6087 if (dev->dev.parent)
5979 strncpy(info->bus_info, dev->dev.parent->bus_id, 6088 strncpy(info->bus_info, dev_name(dev->dev.parent),
5980 sizeof(info->bus_info) - 1); 6089 sizeof(info->bus_info) - 1);
5981 else 6090 else
5982 snprintf(info->bus_info, sizeof(info->bus_info) - 1, 6091 snprintf(info->bus_info, sizeof(info->bus_info) - 1,
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 981570bd3b9d..00750c8ba7db 100644
--- a/drivers/net/wireless/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -10,6 +10,7 @@
10#define DRIVER_VERSION "0.15" 10#define DRIVER_VERSION "0.15"
11 11
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/suspend.h>
13#include <linux/netdevice.h> 14#include <linux/netdevice.h>
14#include <linux/wireless.h> 15#include <linux/wireless.h>
15#include <net/iw_handler.h> 16#include <net/iw_handler.h>
@@ -66,6 +67,8 @@ struct orinoco_rx_data {
66 struct list_head list; 67 struct list_head list;
67}; 68};
68 69
70struct firmware;
71
69struct orinoco_private { 72struct orinoco_private {
70 void *card; /* Pointer to card dependent structure */ 73 void *card; /* Pointer to card dependent structure */
71 struct device *dev; 74 struct device *dev;
@@ -164,6 +167,12 @@ struct orinoco_private {
164 unsigned int wpa_enabled:1; 167 unsigned int wpa_enabled:1;
165 unsigned int tkip_cm_active:1; 168 unsigned int tkip_cm_active:1;
166 unsigned int key_mgmt:3; 169 unsigned int key_mgmt:3;
170
171 /* Cached in memory firmware to use during ->resume. */
172 const struct firmware *cached_pri_fw;
173 const struct firmware *cached_fw;
174
175 struct notifier_block pm_notifier;
167}; 176};
168 177
169#ifdef ORINOCO_DEBUG 178#ifdef ORINOCO_DEBUG
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 6fcf2bda7cdf..f127602670ec 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -178,13 +178,17 @@ static int orinoco_cs_config_check(struct pcmcia_device *p_dev,
178 /* Note that the CIS values need to be rescaled */ 178 /* Note that the CIS values need to be rescaled */
179 if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { 179 if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
180 if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) { 180 if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) {
181 DEBUG(2, "spectrum_cs_config: Vcc mismatch (vcc = %d, CIS = %d)\n", vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000); 181 DEBUG(2, "%s: Vcc mismatch (vcc = %d, CIS = %d)\n",
182 __func__, vcc,
183 cfg->vcc.param[CISTPL_POWER_VNOM] / 10000);
182 if (!ignore_cis_vcc) 184 if (!ignore_cis_vcc)
183 goto next_entry; 185 goto next_entry;
184 } 186 }
185 } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) { 187 } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) {
186 if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000) { 188 if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000) {
187 DEBUG(2, "spectrum_cs_config: Vcc mismatch (vcc = %d, CIS = %d)\n", vcc, dflt->vcc.param[CISTPL_POWER_VNOM] / 10000); 189 DEBUG(2, "%s: Vcc mismatch (vcc = %d, CIS = %d)\n",
190 __func__, vcc,
191 dflt->vcc.param[CISTPL_POWER_VNOM] / 10000);
188 if (!ignore_cis_vcc) 192 if (!ignore_cis_vcc)
189 goto next_entry; 193 goto next_entry;
190 } 194 }
@@ -308,7 +312,7 @@ orinoco_cs_config(struct pcmcia_device *link)
308 312
309 /* Finally, report what we've done */ 313 /* Finally, report what we've done */
310 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io " 314 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
311 "0x%04x-0x%04x\n", dev->name, dev->dev.parent->bus_id, 315 "0x%04x-0x%04x\n", dev->name, dev_name(dev->dev.parent),
312 link->irq.AssignedIRQ, link->io.BasePort1, 316 link->irq.AssignedIRQ, link->io.BasePort1,
313 link->io.BasePort1 + link->io.NumPorts1 - 1); 317 link->io.BasePort1 + link->io.NumPorts1 - 1);
314 return 0; 318 return 0;
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index 2fc86596302e..2fc86596302e 100644
--- a/drivers/net/wireless/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 4ebd638a073e..4ebd638a073e 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
diff --git a/drivers/net/wireless/orinoco_pci.h b/drivers/net/wireless/orinoco/orinoco_pci.h
index f4e5e06760c1..f4e5e06760c1 100644
--- a/drivers/net/wireless/orinoco_pci.h
+++ b/drivers/net/wireless/orinoco/orinoco_pci.h
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index ef761857bb38..ef761857bb38 100644
--- a/drivers/net/wireless/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index ede24ec309c0..ede24ec309c0 100644
--- a/drivers/net/wireless/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index 852789ad34b3..b2ca2e39c2cb 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -248,13 +248,17 @@ static int spectrum_cs_config_check(struct pcmcia_device *p_dev,
248 /* Note that the CIS values need to be rescaled */ 248 /* Note that the CIS values need to be rescaled */
249 if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { 249 if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
250 if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) { 250 if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) {
251 DEBUG(2, "spectrum_cs_config: Vcc mismatch (vcc = %d, CIS = %d)\n", vcc, cfg->vcc.param[CISTPL_POWER_VNOM] / 10000); 251 DEBUG(2, "%s: Vcc mismatch (vcc = %d, CIS = %d)\n",
252 __func__, vcc,
253 cfg->vcc.param[CISTPL_POWER_VNOM] / 10000);
252 if (!ignore_cis_vcc) 254 if (!ignore_cis_vcc)
253 goto next_entry; 255 goto next_entry;
254 } 256 }
255 } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) { 257 } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) {
256 if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000) { 258 if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000) {
257 DEBUG(2, "spectrum_cs_config: Vcc mismatch (vcc = %d, CIS = %d)\n", vcc, dflt->vcc.param[CISTPL_POWER_VNOM] / 10000); 259 DEBUG(2, "%s: Vcc mismatch (vcc = %d, CIS = %d)\n",
260 __func__, vcc,
261 dflt->vcc.param[CISTPL_POWER_VNOM] / 10000);
258 if (!ignore_cis_vcc) 262 if (!ignore_cis_vcc)
259 goto next_entry; 263 goto next_entry;
260 } 264 }
@@ -383,7 +387,7 @@ spectrum_cs_config(struct pcmcia_device *link)
383 387
384 /* Finally, report what we've done */ 388 /* Finally, report what we've done */
385 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io " 389 printk(KERN_DEBUG "%s: " DRIVER_NAME " at %s, irq %d, io "
386 "0x%04x-0x%04x\n", dev->name, dev->dev.parent->bus_id, 390 "0x%04x-0x%04x\n", dev->name, dev_name(dev->dev.parent),
387 link->irq.AssignedIRQ, link->io.BasePort1, 391 link->irq.AssignedIRQ, link->io.BasePort1,
388 link->io.BasePort1 + link->io.NumPorts1 - 1); 392 link->io.BasePort1 + link->io.NumPorts1 - 1);
389 393
@@ -450,10 +454,29 @@ spectrum_cs_resume(struct pcmcia_device *link)
450{ 454{
451 struct net_device *dev = link->priv; 455 struct net_device *dev = link->priv;
452 struct orinoco_private *priv = netdev_priv(dev); 456 struct orinoco_private *priv = netdev_priv(dev);
457 unsigned long flags;
458 int err;
459
460 err = orinoco_reinit_firmware(dev);
461 if (err) {
462 printk(KERN_ERR "%s: Error %d re-initializing firmware\n",
463 dev->name, err);
464 return -EIO;
465 }
466
467 spin_lock_irqsave(&priv->lock, flags);
453 468
454 netif_device_attach(dev); 469 netif_device_attach(dev);
455 priv->hw_unavailable--; 470 priv->hw_unavailable--;
456 schedule_work(&priv->reset_work); 471
472 if (priv->open && !priv->hw_unavailable) {
473 err = __orinoco_up(dev);
474 if (err)
475 printk(KERN_ERR "%s: Error %d restarting card\n",
476 dev->name, err);
477 }
478
479 spin_unlock_irqrestore(&priv->lock, flags);
457 480
458 return 0; 481 return 0;
459} 482}
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 1d0704fe146f..ab79e32f0b27 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -14,17 +14,17 @@
14 * published by the Free Software Foundation. 14 * published by the Free Software Foundation.
15 */ 15 */
16 16
17enum control_frame_types { 17enum p54_control_frame_types {
18 P54_CONTROL_TYPE_FILTER_SET = 0, 18 P54_CONTROL_TYPE_SETUP = 0,
19 P54_CONTROL_TYPE_CHANNEL_CHANGE, 19 P54_CONTROL_TYPE_SCAN,
20 P54_CONTROL_TYPE_FREQDONE, 20 P54_CONTROL_TYPE_TRAP,
21 P54_CONTROL_TYPE_DCFINIT, 21 P54_CONTROL_TYPE_DCFINIT,
22 P54_CONTROL_TYPE_ENCRYPTION, 22 P54_CONTROL_TYPE_RX_KEYCACHE,
23 P54_CONTROL_TYPE_TIM, 23 P54_CONTROL_TYPE_TIM,
24 P54_CONTROL_TYPE_POWERMGT, 24 P54_CONTROL_TYPE_PSM,
25 P54_CONTROL_TYPE_FREEQUEUE, 25 P54_CONTROL_TYPE_TXCANCEL,
26 P54_CONTROL_TYPE_TXDONE, 26 P54_CONTROL_TYPE_TXDONE,
27 P54_CONTROL_TYPE_PING, 27 P54_CONTROL_TYPE_BURST,
28 P54_CONTROL_TYPE_STAT_READBACK, 28 P54_CONTROL_TYPE_STAT_READBACK,
29 P54_CONTROL_TYPE_BBP, 29 P54_CONTROL_TYPE_BBP,
30 P54_CONTROL_TYPE_EEPROM_READBACK, 30 P54_CONTROL_TYPE_EEPROM_READBACK,
@@ -37,18 +37,44 @@ enum control_frame_types {
37 P54_CONTROL_TYPE_XBOW_SYNTH_CFG, 37 P54_CONTROL_TYPE_XBOW_SYNTH_CFG,
38 P54_CONTROL_TYPE_CCE_QUIET, 38 P54_CONTROL_TYPE_CCE_QUIET,
39 P54_CONTROL_TYPE_PSM_STA_UNLOCK, 39 P54_CONTROL_TYPE_PSM_STA_UNLOCK,
40 P54_CONTROL_TYPE_PCS,
41 P54_CONTROL_TYPE_BT_BALANCER = 28,
42 P54_CONTROL_TYPE_GROUP_ADDRESS_TABLE = 30,
43 P54_CONTROL_TYPE_ARPTABLE = 31,
44 P54_CONTROL_TYPE_BT_OPTIONS = 35
40}; 45};
41 46
42struct p54_control_hdr { 47#define P54_HDR_FLAG_CONTROL BIT(15)
43 __le16 magic1; 48#define P54_HDR_FLAG_CONTROL_OPSET (BIT(15) + BIT(0))
49
50struct p54_hdr {
51 __le16 flags;
44 __le16 len; 52 __le16 len;
45 __le32 req_id; 53 __le32 req_id;
46 __le16 type; /* enum control_frame_types */ 54 __le16 type; /* enum p54_control_frame_types */
47 u8 retry1; 55 u8 rts_tries;
48 u8 retry2; 56 u8 tries;
49 u8 data[0]; 57 u8 data[0];
50} __attribute__ ((packed)); 58} __attribute__ ((packed));
51 59
60#define FREE_AFTER_TX(skb) \
61 ((((struct p54_hdr *) ((struct sk_buff *) skb)->data)-> \
62 flags) == cpu_to_le16(P54_HDR_FLAG_CONTROL_OPSET))
63
64struct p54_edcf_queue_param {
65 __le16 aifs;
66 __le16 cwmin;
67 __le16 cwmax;
68 __le16 txop;
69} __attribute__ ((packed));
70
71struct p54_rssi_linear_approximation {
72 s16 mul;
73 s16 add;
74 s16 longbow_unkn;
75 s16 longbow_unk2;
76};
77
52#define EEPROM_READBACK_LEN 0x3fc 78#define EEPROM_READBACK_LEN 0x3fc
53 79
54#define ISL38XX_DEV_FIRMWARE_ADDR 0x20000 80#define ISL38XX_DEV_FIRMWARE_ADDR 0x20000
@@ -59,49 +85,53 @@ struct p54_control_hdr {
59#define FW_LM20 0x4c4d3230 85#define FW_LM20 0x4c4d3230
60 86
61struct p54_common { 87struct p54_common {
88 struct ieee80211_hw *hw;
62 u32 rx_start; 89 u32 rx_start;
63 u32 rx_end; 90 u32 rx_end;
64 struct sk_buff_head tx_queue; 91 struct sk_buff_head tx_queue;
65 void (*tx)(struct ieee80211_hw *dev, struct p54_control_hdr *data, 92 void (*tx)(struct ieee80211_hw *dev, struct sk_buff *skb);
66 size_t len, int free_on_tx);
67 int (*open)(struct ieee80211_hw *dev); 93 int (*open)(struct ieee80211_hw *dev);
68 void (*stop)(struct ieee80211_hw *dev); 94 void (*stop)(struct ieee80211_hw *dev);
69 int mode; 95 int mode;
70 u16 seqno;
71 u16 rx_mtu; 96 u16 rx_mtu;
72 u8 headroom; 97 u8 headroom;
73 u8 tailroom; 98 u8 tailroom;
74 struct mutex conf_mutex; 99 struct mutex conf_mutex;
75 u8 mac_addr[ETH_ALEN]; 100 u8 mac_addr[ETH_ALEN];
76 u8 bssid[ETH_ALEN]; 101 u8 bssid[ETH_ALEN];
77 __le16 filter_type;
78 struct pda_iq_autocal_entry *iq_autocal; 102 struct pda_iq_autocal_entry *iq_autocal;
79 unsigned int iq_autocal_len; 103 unsigned int iq_autocal_len;
80 struct pda_channel_output_limit *output_limit; 104 struct pda_channel_output_limit *output_limit;
81 unsigned int output_limit_len; 105 unsigned int output_limit_len;
82 struct pda_pa_curve_data *curve_data; 106 struct pda_pa_curve_data *curve_data;
107 struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS];
83 unsigned int filter_flags; 108 unsigned int filter_flags;
109 bool use_short_slot;
84 u16 rxhw; 110 u16 rxhw;
85 u8 version; 111 u8 version;
86 u8 rx_antenna;
87 unsigned int tx_hdr_len; 112 unsigned int tx_hdr_len;
88 void *cached_vdcf;
89 unsigned int fw_var; 113 unsigned int fw_var;
90 unsigned int fw_interface; 114 unsigned int fw_interface;
91 unsigned int output_power; 115 unsigned int output_power;
92 u32 tsf_low32; 116 u32 tsf_low32;
93 u32 tsf_high32; 117 u32 tsf_high32;
118 u64 basic_rate_mask;
119 u16 wakeup_timer;
120 u16 aid;
94 struct ieee80211_tx_queue_stats tx_stats[8]; 121 struct ieee80211_tx_queue_stats tx_stats[8];
122 struct p54_edcf_queue_param qos_params[8];
95 struct ieee80211_low_level_stats stats; 123 struct ieee80211_low_level_stats stats;
96 struct timer_list stats_timer; 124 struct delayed_work work;
97 struct completion stats_comp; 125 struct sk_buff *cached_beacon;
98 void *cached_stats;
99 int noise; 126 int noise;
100 void *eeprom; 127 void *eeprom;
101 struct completion eeprom_comp; 128 struct completion eeprom_comp;
129 u8 privacy_caps;
130 u8 rx_keycache_size;
102}; 131};
103 132
104int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb); 133int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb);
134void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb);
105int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw); 135int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw);
106int p54_read_eeprom(struct ieee80211_hw *dev); 136int p54_read_eeprom(struct ieee80211_hw *dev);
107struct ieee80211_hw *p54_init_common(size_t priv_data_len); 137struct ieee80211_hw *p54_init_common(size_t priv_data_len);
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 827ca0384a4c..82354b974a04 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -1,12 +1,15 @@
1
2/* 1/*
3 * Common code for mac80211 Prism54 drivers 2 * Common code for mac80211 Prism54 drivers
4 * 3 *
5 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> 4 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
6 * Copyright (c) 2007, Christian Lamparter <chunkeey@web.de> 5 * Copyright (c) 2007, Christian Lamparter <chunkeey@web.de>
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * 7 *
8 * Based on the islsm (softmac prism54) driver, which is: 8 * Based on:
9 * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. 9 * - the islsm (softmac prism54) driver, which is:
10 * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
11 * - stlc45xx driver
12 * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
10 * 13 *
11 * This program is free software; you can redistribute it and/or modify 14 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 15 * it under the terms of the GNU General Public License version 2 as
@@ -22,6 +25,9 @@
22#include "p54.h" 25#include "p54.h"
23#include "p54common.h" 26#include "p54common.h"
24 27
28static int modparam_nohwcrypt;
29module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
30MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
25MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); 31MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
26MODULE_DESCRIPTION("Softmac Prism54 common code"); 32MODULE_DESCRIPTION("Softmac Prism54 common code");
27MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
@@ -152,21 +158,21 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
152 priv->fw_interface = be32_to_cpup((__be32 *) 158 priv->fw_interface = be32_to_cpup((__be32 *)
153 bootrec->data); 159 bootrec->data);
154 switch (priv->fw_interface) { 160 switch (priv->fw_interface) {
155 case FW_FMAC:
156 printk(KERN_INFO "p54: FreeMAC firmware\n");
157 break;
158 case FW_LM20:
159 printk(KERN_INFO "p54: LM20 firmware\n");
160 break;
161 case FW_LM86: 161 case FW_LM86:
162 printk(KERN_INFO "p54: LM86 firmware\n"); 162 case FW_LM20:
163 break; 163 case FW_LM87: {
164 case FW_LM87: 164 char *iftype = (char *)bootrec->data;
165 printk(KERN_INFO "p54: LM87 firmware\n"); 165 printk(KERN_INFO "%s: p54 detected a LM%c%c "
166 "firmware\n",
167 wiphy_name(dev->wiphy),
168 iftype[2], iftype[3]);
166 break; 169 break;
170 }
171 case FW_FMAC:
167 default: 172 default:
168 printk(KERN_INFO "p54: unknown firmware\n"); 173 printk(KERN_ERR "%s: unsupported firmware\n",
169 break; 174 wiphy_name(dev->wiphy));
175 return -ENODEV;
170 } 176 }
171 break; 177 break;
172 case BR_CODE_COMPONENT_VERSION: 178 case BR_CODE_COMPONENT_VERSION:
@@ -182,8 +188,10 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
182 priv->rx_end = le32_to_cpu(desc->rx_end) - 0x3500; 188 priv->rx_end = le32_to_cpu(desc->rx_end) - 0x3500;
183 priv->headroom = desc->headroom; 189 priv->headroom = desc->headroom;
184 priv->tailroom = desc->tailroom; 190 priv->tailroom = desc->tailroom;
191 priv->privacy_caps = desc->privacy_caps;
192 priv->rx_keycache_size = desc->rx_keycache_size;
185 if (le32_to_cpu(bootrec->len) == 11) 193 if (le32_to_cpu(bootrec->len) == 11)
186 priv->rx_mtu = le16_to_cpu(bootrec->rx_mtu); 194 priv->rx_mtu = le16_to_cpu(desc->rx_mtu);
187 else 195 else
188 priv->rx_mtu = (size_t) 196 priv->rx_mtu = (size_t)
189 0x620 - priv->tx_hdr_len; 197 0x620 - priv->tx_hdr_len;
@@ -208,18 +216,35 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
208 } 216 }
209 217
210 if (fw_version) 218 if (fw_version)
211 printk(KERN_INFO "p54: FW rev %s - Softmac protocol %x.%x\n", 219 printk(KERN_INFO "%s: FW rev %s - Softmac protocol %x.%x\n",
212 fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); 220 wiphy_name(dev->wiphy), fw_version,
221 priv->fw_var >> 8, priv->fw_var & 0xff);
222
223 if (priv->fw_var < 0x500)
224 printk(KERN_INFO "%s: you are using an obsolete firmware. "
225 "visit http://wireless.kernel.org/en/users/Drivers/p54 "
226 "and grab one for \"kernel >= 2.6.28\"!\n",
227 wiphy_name(dev->wiphy));
213 228
214 if (priv->fw_var >= 0x300) { 229 if (priv->fw_var >= 0x300) {
215 /* Firmware supports QoS, use it! */ 230 /* Firmware supports QoS, use it! */
216 priv->tx_stats[4].limit = 3; 231 priv->tx_stats[4].limit = 3; /* AC_VO */
217 priv->tx_stats[5].limit = 4; 232 priv->tx_stats[5].limit = 4; /* AC_VI */
218 priv->tx_stats[6].limit = 3; 233 priv->tx_stats[6].limit = 3; /* AC_BE */
219 priv->tx_stats[7].limit = 1; 234 priv->tx_stats[7].limit = 2; /* AC_BK */
220 dev->queues = 4; 235 dev->queues = 4;
221 } 236 }
222 237
238 if (!modparam_nohwcrypt)
239 printk(KERN_INFO "%s: cryptographic accelerator "
240 "WEP:%s, TKIP:%s, CCMP:%s\n",
241 wiphy_name(dev->wiphy),
242 (priv->privacy_caps & BR_DESC_PRIV_CAP_WEP) ? "YES" :
243 "no", (priv->privacy_caps & (BR_DESC_PRIV_CAP_TKIP |
244 BR_DESC_PRIV_CAP_MICHAEL)) ? "YES" : "no",
245 (priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP) ?
246 "YES" : "no");
247
223 return 0; 248 return 0;
224} 249}
225EXPORT_SYMBOL_GPL(p54_parse_firmware); 250EXPORT_SYMBOL_GPL(p54_parse_firmware);
@@ -310,6 +335,36 @@ static const char *p54_rf_chips[] = { "NULL", "Duette3", "Duette2",
310 "Frisbee", "Xbow", "Longbow", "NULL", "NULL" }; 335 "Frisbee", "Xbow", "Longbow", "NULL", "NULL" };
311static int p54_init_xbow_synth(struct ieee80211_hw *dev); 336static int p54_init_xbow_synth(struct ieee80211_hw *dev);
312 337
338static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len,
339 u16 type)
340{
341 struct p54_common *priv = dev->priv;
342 int offset = (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) ? 2 : 0;
343 int entry_size = sizeof(struct pda_rssi_cal_entry) + offset;
344 int num_entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2;
345 int i;
346
347 if (len != (entry_size * num_entries)) {
348 printk(KERN_ERR "%s: unknown rssi calibration data packing "
349 " type:(%x) len:%d.\n",
350 wiphy_name(dev->wiphy), type, len);
351
352 print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE,
353 data, len);
354
355 printk(KERN_ERR "%s: please report this issue.\n",
356 wiphy_name(dev->wiphy));
357 return;
358 }
359
360 for (i = 0; i < num_entries; i++) {
361 struct pda_rssi_cal_entry *cal = data +
362 (offset + i * entry_size);
363 priv->rssical_db[i].mul = (s16) le16_to_cpu(cal->mul);
364 priv->rssical_db[i].add = (s16) le16_to_cpu(cal->add);
365 }
366}
367
313static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) 368static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
314{ 369{
315 struct p54_common *priv = dev->priv; 370 struct p54_common *priv = dev->priv;
@@ -320,7 +375,6 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
320 int err; 375 int err;
321 u8 *end = (u8 *)eeprom + len; 376 u8 *end = (u8 *)eeprom + len;
322 u16 synth = 0; 377 u16 synth = 0;
323 DECLARE_MAC_BUF(mac);
324 378
325 wrap = (struct eeprom_pda_wrap *) eeprom; 379 wrap = (struct eeprom_pda_wrap *) eeprom;
326 entry = (void *)wrap->data + le16_to_cpu(wrap->len); 380 entry = (void *)wrap->data + le16_to_cpu(wrap->len);
@@ -377,8 +431,9 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
377 err = p54_convert_rev1(dev, curve_data); 431 err = p54_convert_rev1(dev, curve_data);
378 break; 432 break;
379 default: 433 default:
380 printk(KERN_ERR "p54: unknown curve data " 434 printk(KERN_ERR "%s: unknown curve data "
381 "revision %d\n", 435 "revision %d\n",
436 wiphy_name(dev->wiphy),
382 curve_data->cal_method_rev); 437 curve_data->cal_method_rev);
383 err = -ENODEV; 438 err = -ENODEV;
384 break; 439 break;
@@ -409,12 +464,40 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
409 case PDR_HARDWARE_PLATFORM_COMPONENT_ID: 464 case PDR_HARDWARE_PLATFORM_COMPONENT_ID:
410 priv->version = *(u8 *)(entry->data + 1); 465 priv->version = *(u8 *)(entry->data + 1);
411 break; 466 break;
467 case PDR_RSSI_LINEAR_APPROXIMATION:
468 case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND:
469 case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED:
470 p54_parse_rssical(dev, entry->data, data_len,
471 le16_to_cpu(entry->code));
472 break;
412 case PDR_END: 473 case PDR_END:
413 /* make it overrun */ 474 /* make it overrun */
414 entry_len = len; 475 entry_len = len;
415 break; 476 break;
477 case PDR_MANUFACTURING_PART_NUMBER:
478 case PDR_PDA_VERSION:
479 case PDR_NIC_SERIAL_NUMBER:
480 case PDR_REGULATORY_DOMAIN_LIST:
481 case PDR_TEMPERATURE_TYPE:
482 case PDR_PRISM_PCI_IDENTIFIER:
483 case PDR_COUNTRY_INFORMATION:
484 case PDR_OEM_NAME:
485 case PDR_PRODUCT_NAME:
486 case PDR_UTF8_OEM_NAME:
487 case PDR_UTF8_PRODUCT_NAME:
488 case PDR_COUNTRY_LIST:
489 case PDR_DEFAULT_COUNTRY:
490 case PDR_ANTENNA_GAIN:
491 case PDR_PRISM_INDIGO_PA_CALIBRATION_DATA:
492 case PDR_REGULATORY_POWER_LIMITS:
493 case PDR_RADIATED_TRANSMISSION_CORRECTION:
494 case PDR_PRISM_TX_IQ_CALIBRATION:
495 case PDR_BASEBAND_REGISTERS:
496 case PDR_PER_CHANNEL_BASEBAND_REGISTERS:
497 break;
416 default: 498 default:
417 printk(KERN_INFO "p54: unknown eeprom code : 0x%x\n", 499 printk(KERN_INFO "%s: unknown eeprom code : 0x%x\n",
500 wiphy_name(dev->wiphy),
418 le16_to_cpu(entry->code)); 501 le16_to_cpu(entry->code));
419 break; 502 break;
420 } 503 }
@@ -424,17 +507,18 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
424 507
425 if (!synth || !priv->iq_autocal || !priv->output_limit || 508 if (!synth || !priv->iq_autocal || !priv->output_limit ||
426 !priv->curve_data) { 509 !priv->curve_data) {
427 printk(KERN_ERR "p54: not all required entries found in eeprom!\n"); 510 printk(KERN_ERR "%s: not all required entries found in eeprom!\n",
511 wiphy_name(dev->wiphy));
428 err = -EINVAL; 512 err = -EINVAL;
429 goto err; 513 goto err;
430 } 514 }
431 515
432 priv->rxhw = synth & 0x07; 516 priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK;
433 if (priv->rxhw == 4) 517 if (priv->rxhw == 4)
434 p54_init_xbow_synth(dev); 518 p54_init_xbow_synth(dev);
435 if (!(synth & 0x40)) 519 if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
436 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz; 520 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
437 if (!(synth & 0x80)) 521 if (!(synth & PDR_SYNTH_5_GHZ_DISABLED))
438 dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz; 522 dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz;
439 523
440 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { 524 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
@@ -446,9 +530,9 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
446 SET_IEEE80211_PERM_ADDR(dev, perm_addr); 530 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
447 } 531 }
448 532
449 printk(KERN_INFO "%s: hwaddr %s, MAC:isl38%02x RF:%s\n", 533 printk(KERN_INFO "%s: hwaddr %pM, MAC:isl38%02x RF:%s\n",
450 wiphy_name(dev->wiphy), 534 wiphy_name(dev->wiphy),
451 print_mac(mac, dev->wiphy->perm_addr), 535 dev->wiphy->perm_addr,
452 priv->version, p54_rf_chips[priv->rxhw]); 536 priv->version, p54_rf_chips[priv->rxhw]);
453 537
454 return 0; 538 return 0;
@@ -469,36 +553,56 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
469 priv->curve_data = NULL; 553 priv->curve_data = NULL;
470 } 554 }
471 555
472 printk(KERN_ERR "p54: eeprom parse failed!\n"); 556 printk(KERN_ERR "%s: eeprom parse failed!\n",
557 wiphy_name(dev->wiphy));
473 return err; 558 return err;
474} 559}
475 560
476static int p54_rssi_to_dbm(struct ieee80211_hw *dev, int rssi) 561static int p54_rssi_to_dbm(struct ieee80211_hw *dev, int rssi)
477{ 562{
478 /* TODO: get the rssi_add & rssi_mul data from the eeprom */ 563 struct p54_common *priv = dev->priv;
479 return ((rssi * 0x83) / 64 - 400) / 4; 564 int band = dev->conf.channel->band;
565
566 return ((rssi * priv->rssical_db[band].mul) / 64 +
567 priv->rssical_db[band].add) / 4;
480} 568}
481 569
482static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb) 570static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
483{ 571{
484 struct p54_common *priv = dev->priv; 572 struct p54_common *priv = dev->priv;
485 struct p54_rx_hdr *hdr = (struct p54_rx_hdr *) skb->data; 573 struct p54_rx_data *hdr = (struct p54_rx_data *) skb->data;
486 struct ieee80211_rx_status rx_status = {0}; 574 struct ieee80211_rx_status rx_status = {0};
487 u16 freq = le16_to_cpu(hdr->freq); 575 u16 freq = le16_to_cpu(hdr->freq);
488 size_t header_len = sizeof(*hdr); 576 size_t header_len = sizeof(*hdr);
489 u32 tsf32; 577 u32 tsf32;
490 578
491 if (!(hdr->magic & cpu_to_le16(0x0001))) { 579 /*
580 * If the device is in a unspecified state we have to
581 * ignore all data frames. Else we could end up with a
582 * nasty crash.
583 */
584 if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
585 return 0;
586
587 if (!(hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_IN_FCS_GOOD))) {
492 if (priv->filter_flags & FIF_FCSFAIL) 588 if (priv->filter_flags & FIF_FCSFAIL)
493 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 589 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
494 else 590 else
495 return 0; 591 return 0;
496 } 592 }
497 593
594 if (hdr->decrypt_status == P54_DECRYPT_OK)
595 rx_status.flag |= RX_FLAG_DECRYPTED;
596 if ((hdr->decrypt_status == P54_DECRYPT_FAIL_MICHAEL) ||
597 (hdr->decrypt_status == P54_DECRYPT_FAIL_TKIP))
598 rx_status.flag |= RX_FLAG_MMIC_ERROR;
599
498 rx_status.signal = p54_rssi_to_dbm(dev, hdr->rssi); 600 rx_status.signal = p54_rssi_to_dbm(dev, hdr->rssi);
499 rx_status.noise = priv->noise; 601 rx_status.noise = priv->noise;
500 /* XX correct? */ 602 /* XX correct? */
501 rx_status.qual = (100 * hdr->rssi) / 127; 603 rx_status.qual = (100 * hdr->rssi) / 127;
604 if (hdr->rate & 0x10)
605 rx_status.flag |= RX_FLAG_SHORTPRE;
502 rx_status.rate_idx = (dev->conf.channel->band == IEEE80211_BAND_2GHZ ? 606 rx_status.rate_idx = (dev->conf.channel->band == IEEE80211_BAND_2GHZ ?
503 hdr->rate : (hdr->rate - 4)) & 0xf; 607 hdr->rate : (hdr->rate - 4)) & 0xf;
504 rx_status.freq = freq; 608 rx_status.freq = freq;
@@ -513,7 +617,7 @@ static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
513 617
514 rx_status.flag |= RX_FLAG_TSFT; 618 rx_status.flag |= RX_FLAG_TSFT;
515 619
516 if (hdr->magic & cpu_to_le16(0x4000)) 620 if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
517 header_len += hdr->align[0]; 621 header_len += hdr->align[0];
518 622
519 skb_pull(skb, header_len); 623 skb_pull(skb, header_len);
@@ -521,6 +625,9 @@ static int p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
521 625
522 ieee80211_rx_irqsafe(dev, skb, &rx_status); 626 ieee80211_rx_irqsafe(dev, skb, &rx_status);
523 627
628 queue_delayed_work(dev->workqueue, &priv->work,
629 msecs_to_jiffies(P54_STATISTICS_UPDATE));
630
524 return -1; 631 return -1;
525} 632}
526 633
@@ -529,88 +636,197 @@ static void inline p54_wake_free_queues(struct ieee80211_hw *dev)
529 struct p54_common *priv = dev->priv; 636 struct p54_common *priv = dev->priv;
530 int i; 637 int i;
531 638
639 if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
640 return ;
641
532 for (i = 0; i < dev->queues; i++) 642 for (i = 0; i < dev->queues; i++)
533 if (priv->tx_stats[i + 4].len < priv->tx_stats[i + 4].limit) 643 if (priv->tx_stats[i + 4].len < priv->tx_stats[i + 4].limit)
534 ieee80211_wake_queue(dev, i); 644 ieee80211_wake_queue(dev, i);
535} 645}
536 646
647void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
648{
649 struct p54_common *priv = dev->priv;
650 struct ieee80211_tx_info *info;
651 struct memrecord *range;
652 unsigned long flags;
653 u32 freed = 0, last_addr = priv->rx_start;
654
655 if (unlikely(!skb || !dev || !skb_queue_len(&priv->tx_queue)))
656 return;
657
658 /*
659 * don't try to free an already unlinked skb
660 */
661 if (unlikely((!skb->next) || (!skb->prev)))
662 return;
663
664 spin_lock_irqsave(&priv->tx_queue.lock, flags);
665 info = IEEE80211_SKB_CB(skb);
666 range = (void *)info->rate_driver_data;
667 if (skb->prev != (struct sk_buff *)&priv->tx_queue) {
668 struct ieee80211_tx_info *ni;
669 struct memrecord *mr;
670
671 ni = IEEE80211_SKB_CB(skb->prev);
672 mr = (struct memrecord *)ni->rate_driver_data;
673 last_addr = mr->end_addr;
674 }
675 if (skb->next != (struct sk_buff *)&priv->tx_queue) {
676 struct ieee80211_tx_info *ni;
677 struct memrecord *mr;
678
679 ni = IEEE80211_SKB_CB(skb->next);
680 mr = (struct memrecord *)ni->rate_driver_data;
681 freed = mr->start_addr - last_addr;
682 } else
683 freed = priv->rx_end - last_addr;
684 __skb_unlink(skb, &priv->tx_queue);
685 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
686 dev_kfree_skb_any(skb);
687
688 if (freed >= priv->headroom + sizeof(struct p54_hdr) + 48 +
689 IEEE80211_MAX_RTS_THRESHOLD + priv->tailroom)
690 p54_wake_free_queues(dev);
691}
692EXPORT_SYMBOL_GPL(p54_free_skb);
693
694static struct sk_buff *p54_find_tx_entry(struct ieee80211_hw *dev,
695 __le32 req_id)
696{
697 struct p54_common *priv = dev->priv;
698 struct sk_buff *entry = priv->tx_queue.next;
699 unsigned long flags;
700
701 spin_lock_irqsave(&priv->tx_queue.lock, flags);
702 while (entry != (struct sk_buff *)&priv->tx_queue) {
703 struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
704
705 if (hdr->req_id == req_id) {
706 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
707 return entry;
708 }
709 entry = entry->next;
710 }
711 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
712 return NULL;
713}
714
537static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb) 715static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
538{ 716{
539 struct p54_common *priv = dev->priv; 717 struct p54_common *priv = dev->priv;
540 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; 718 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
541 struct p54_frame_sent_hdr *payload = (struct p54_frame_sent_hdr *) hdr->data; 719 struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data;
542 struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next; 720 struct sk_buff *entry = (struct sk_buff *) priv->tx_queue.next;
543 u32 addr = le32_to_cpu(hdr->req_id) - priv->headroom; 721 u32 addr = le32_to_cpu(hdr->req_id) - priv->headroom;
544 struct memrecord *range = NULL; 722 struct memrecord *range = NULL;
545 u32 freed = 0; 723 u32 freed = 0;
546 u32 last_addr = priv->rx_start; 724 u32 last_addr = priv->rx_start;
547 unsigned long flags; 725 unsigned long flags;
726 int count, idx;
548 727
549 spin_lock_irqsave(&priv->tx_queue.lock, flags); 728 spin_lock_irqsave(&priv->tx_queue.lock, flags);
550 while (entry != (struct sk_buff *)&priv->tx_queue) { 729 while (entry != (struct sk_buff *)&priv->tx_queue) {
551 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 730 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
552 range = (void *)info->driver_data; 731 struct p54_hdr *entry_hdr;
553 if (range->start_addr == addr) { 732 struct p54_tx_data *entry_data;
554 struct p54_control_hdr *entry_hdr; 733 int pad = 0;
555 struct p54_tx_control_allocdata *entry_data;
556 int pad = 0;
557
558 if (entry->next != (struct sk_buff *)&priv->tx_queue) {
559 struct ieee80211_tx_info *ni;
560 struct memrecord *mr;
561
562 ni = IEEE80211_SKB_CB(entry->next);
563 mr = (struct memrecord *)ni->driver_data;
564 freed = mr->start_addr - last_addr;
565 } else
566 freed = priv->rx_end - last_addr;
567 734
735 range = (void *)info->rate_driver_data;
736 if (range->start_addr != addr) {
568 last_addr = range->end_addr; 737 last_addr = range->end_addr;
569 __skb_unlink(entry, &priv->tx_queue); 738 entry = entry->next;
570 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 739 continue;
740 }
571 741
572 memset(&info->status, 0, sizeof(info->status)); 742 if (entry->next != (struct sk_buff *)&priv->tx_queue) {
573 entry_hdr = (struct p54_control_hdr *) entry->data; 743 struct ieee80211_tx_info *ni;
574 entry_data = (struct p54_tx_control_allocdata *) entry_hdr->data; 744 struct memrecord *mr;
575 if ((entry_hdr->magic1 & cpu_to_le16(0x4000)) != 0) 745
576 pad = entry_data->align[0]; 746 ni = IEEE80211_SKB_CB(entry->next);
577 747 mr = (struct memrecord *)ni->rate_driver_data;
578 priv->tx_stats[entry_data->hw_queue].len--; 748 freed = mr->start_addr - last_addr;
579 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
580 if (!(payload->status & 0x01))
581 info->flags |= IEEE80211_TX_STAT_ACK;
582 else
583 info->status.excessive_retries = 1;
584 }
585 info->status.retry_count = payload->retries - 1;
586 info->status.ack_signal = p54_rssi_to_dbm(dev,
587 le16_to_cpu(payload->ack_rssi));
588 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
589 ieee80211_tx_status_irqsafe(dev, entry);
590 goto out;
591 } else 749 } else
592 last_addr = range->end_addr; 750 freed = priv->rx_end - last_addr;
593 entry = entry->next; 751
752 last_addr = range->end_addr;
753 __skb_unlink(entry, &priv->tx_queue);
754 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
755
756 entry_hdr = (struct p54_hdr *) entry->data;
757 entry_data = (struct p54_tx_data *) entry_hdr->data;
758 priv->tx_stats[entry_data->hw_queue].len--;
759 priv->stats.dot11ACKFailureCount += payload->tries - 1;
760
761 if (unlikely(entry == priv->cached_beacon)) {
762 kfree_skb(entry);
763 priv->cached_beacon = NULL;
764 goto out;
765 }
766
767 /*
768 * Clear manually, ieee80211_tx_info_clear_status would
769 * clear the counts too and we need them.
770 */
771 memset(&info->status.ampdu_ack_len, 0,
772 sizeof(struct ieee80211_tx_info) -
773 offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
774 BUILD_BUG_ON(offsetof(struct ieee80211_tx_info,
775 status.ampdu_ack_len) != 23);
776
777 if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
778 pad = entry_data->align[0];
779
780 /* walk through the rates array and adjust the counts */
781 count = payload->tries;
782 for (idx = 0; idx < 4; idx++) {
783 if (count >= info->status.rates[idx].count) {
784 count -= info->status.rates[idx].count;
785 } else if (count > 0) {
786 info->status.rates[idx].count = count;
787 count = 0;
788 } else {
789 info->status.rates[idx].idx = -1;
790 info->status.rates[idx].count = 0;
791 }
792 }
793
794 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
795 (!payload->status))
796 info->flags |= IEEE80211_TX_STAT_ACK;
797 if (payload->status & P54_TX_PSM_CANCELLED)
798 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
799 info->status.ack_signal = p54_rssi_to_dbm(dev,
800 (int)payload->ack_rssi);
801 skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data));
802 ieee80211_tx_status_irqsafe(dev, entry);
803 goto out;
594 } 804 }
595 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 805 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
596 806
597out: 807out:
598 if (freed >= IEEE80211_MAX_RTS_THRESHOLD + 0x170 + 808 if (freed >= priv->headroom + sizeof(struct p54_hdr) + 48 +
599 sizeof(struct p54_control_hdr)) 809 IEEE80211_MAX_RTS_THRESHOLD + priv->tailroom)
600 p54_wake_free_queues(dev); 810 p54_wake_free_queues(dev);
601} 811}
602 812
603static void p54_rx_eeprom_readback(struct ieee80211_hw *dev, 813static void p54_rx_eeprom_readback(struct ieee80211_hw *dev,
604 struct sk_buff *skb) 814 struct sk_buff *skb)
605{ 815{
606 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; 816 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
607 struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data; 817 struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data;
608 struct p54_common *priv = dev->priv; 818 struct p54_common *priv = dev->priv;
609 819
610 if (!priv->eeprom) 820 if (!priv->eeprom)
611 return ; 821 return ;
612 822
613 memcpy(priv->eeprom, eeprom->data, le16_to_cpu(eeprom->len)); 823 if (priv->fw_var >= 0x509) {
824 memcpy(priv->eeprom, eeprom->v2.data,
825 le16_to_cpu(eeprom->v2.len));
826 } else {
827 memcpy(priv->eeprom, eeprom->v1.data,
828 le16_to_cpu(eeprom->v1.len));
829 }
614 830
615 complete(&priv->eeprom_comp); 831 complete(&priv->eeprom_comp);
616} 832}
@@ -618,10 +834,14 @@ static void p54_rx_eeprom_readback(struct ieee80211_hw *dev,
618static void p54_rx_stats(struct ieee80211_hw *dev, struct sk_buff *skb) 834static void p54_rx_stats(struct ieee80211_hw *dev, struct sk_buff *skb)
619{ 835{
620 struct p54_common *priv = dev->priv; 836 struct p54_common *priv = dev->priv;
621 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; 837 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
622 struct p54_statistics *stats = (struct p54_statistics *) hdr->data; 838 struct p54_statistics *stats = (struct p54_statistics *) hdr->data;
623 u32 tsf32 = le32_to_cpu(stats->tsf32); 839 u32 tsf32;
840
841 if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
842 return ;
624 843
844 tsf32 = le32_to_cpu(stats->tsf32);
625 if (tsf32 < priv->tsf_low32) 845 if (tsf32 < priv->tsf_low32)
626 priv->tsf_high32++; 846 priv->tsf_high32++;
627 priv->tsf_low32 = tsf32; 847 priv->tsf_low32 = tsf32;
@@ -631,19 +851,50 @@ static void p54_rx_stats(struct ieee80211_hw *dev, struct sk_buff *skb)
631 priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs); 851 priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs);
632 852
633 priv->noise = p54_rssi_to_dbm(dev, le32_to_cpu(stats->noise)); 853 priv->noise = p54_rssi_to_dbm(dev, le32_to_cpu(stats->noise));
634 complete(&priv->stats_comp);
635 854
636 mod_timer(&priv->stats_timer, jiffies + 5 * HZ); 855 p54_free_skb(dev, p54_find_tx_entry(dev, hdr->req_id));
856}
857
858static void p54_rx_trap(struct ieee80211_hw *dev, struct sk_buff *skb)
859{
860 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
861 struct p54_trap *trap = (struct p54_trap *) hdr->data;
862 u16 event = le16_to_cpu(trap->event);
863 u16 freq = le16_to_cpu(trap->frequency);
864
865 switch (event) {
866 case P54_TRAP_BEACON_TX:
867 break;
868 case P54_TRAP_RADAR:
869 printk(KERN_INFO "%s: radar (freq:%d MHz)\n",
870 wiphy_name(dev->wiphy), freq);
871 break;
872 case P54_TRAP_NO_BEACON:
873 break;
874 case P54_TRAP_SCAN:
875 break;
876 case P54_TRAP_TBTT:
877 break;
878 case P54_TRAP_TIMER:
879 break;
880 default:
881 printk(KERN_INFO "%s: received event:%x freq:%d\n",
882 wiphy_name(dev->wiphy), event, freq);
883 break;
884 }
637} 885}
638 886
639static int p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb) 887static int p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb)
640{ 888{
641 struct p54_control_hdr *hdr = (struct p54_control_hdr *) skb->data; 889 struct p54_hdr *hdr = (struct p54_hdr *) skb->data;
642 890
643 switch (le16_to_cpu(hdr->type)) { 891 switch (le16_to_cpu(hdr->type)) {
644 case P54_CONTROL_TYPE_TXDONE: 892 case P54_CONTROL_TYPE_TXDONE:
645 p54_rx_frame_sent(dev, skb); 893 p54_rx_frame_sent(dev, skb);
646 break; 894 break;
895 case P54_CONTROL_TYPE_TRAP:
896 p54_rx_trap(dev, skb);
897 break;
647 case P54_CONTROL_TYPE_BBP: 898 case P54_CONTROL_TYPE_BBP:
648 break; 899 break;
649 case P54_CONTROL_TYPE_STAT_READBACK: 900 case P54_CONTROL_TYPE_STAT_READBACK:
@@ -664,9 +915,9 @@ static int p54_rx_control(struct ieee80211_hw *dev, struct sk_buff *skb)
664/* returns zero if skb can be reused */ 915/* returns zero if skb can be reused */
665int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb) 916int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb)
666{ 917{
667 u8 type = le16_to_cpu(*((__le16 *)skb->data)) >> 8; 918 u16 type = le16_to_cpu(*((__le16 *)skb->data));
668 919
669 if (type == 0x80) 920 if (type & P54_HDR_FLAG_CONTROL)
670 return p54_rx_control(dev, skb); 921 return p54_rx_control(dev, skb);
671 else 922 else
672 return p54_rx_data(dev, skb); 923 return p54_rx_data(dev, skb);
@@ -682,12 +933,14 @@ EXPORT_SYMBOL_GPL(p54_rx);
682 * marks allocated areas as reserved if necessary. p54_rx_frame_sent frees 933 * marks allocated areas as reserved if necessary. p54_rx_frame_sent frees
683 * allocated areas. 934 * allocated areas.
684 */ 935 */
685static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb, 936static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
686 struct p54_control_hdr *data, u32 len) 937 struct p54_hdr *data, u32 len)
687{ 938{
688 struct p54_common *priv = dev->priv; 939 struct p54_common *priv = dev->priv;
689 struct sk_buff *entry = priv->tx_queue.next; 940 struct sk_buff *entry = priv->tx_queue.next;
690 struct sk_buff *target_skb = NULL; 941 struct sk_buff *target_skb = NULL;
942 struct ieee80211_tx_info *info;
943 struct memrecord *range;
691 u32 last_addr = priv->rx_start; 944 u32 last_addr = priv->rx_start;
692 u32 largest_hole = 0; 945 u32 largest_hole = 0;
693 u32 target_addr = priv->rx_start; 946 u32 target_addr = priv->rx_start;
@@ -695,12 +948,37 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
695 unsigned int left; 948 unsigned int left;
696 len = (len + priv->headroom + priv->tailroom + 3) & ~0x3; 949 len = (len + priv->headroom + priv->tailroom + 3) & ~0x3;
697 950
951 if (!skb)
952 return -EINVAL;
953
698 spin_lock_irqsave(&priv->tx_queue.lock, flags); 954 spin_lock_irqsave(&priv->tx_queue.lock, flags);
955
699 left = skb_queue_len(&priv->tx_queue); 956 left = skb_queue_len(&priv->tx_queue);
957 if (unlikely(left >= 28)) {
958 /*
959 * The tx_queue is nearly full!
960 * We have throttle normal data traffic, because we must
961 * have a few spare slots for control frames left.
962 */
963 ieee80211_stop_queues(dev);
964 queue_delayed_work(dev->workqueue, &priv->work,
965 msecs_to_jiffies(P54_TX_TIMEOUT));
966
967 if (unlikely(left == 32)) {
968 /*
969 * The tx_queue is now really full.
970 *
971 * TODO: check if the device has crashed and reset it.
972 */
973 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
974 return -ENOSPC;
975 }
976 }
977
700 while (left--) { 978 while (left--) {
701 u32 hole_size; 979 u32 hole_size;
702 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry); 980 info = IEEE80211_SKB_CB(entry);
703 struct memrecord *range = (void *)info->driver_data; 981 range = (void *)info->rate_driver_data;
704 hole_size = range->start_addr - last_addr; 982 hole_size = range->start_addr - last_addr;
705 if (!target_skb && hole_size >= len) { 983 if (!target_skb && hole_size >= len) {
706 target_skb = entry->prev; 984 target_skb = entry->prev;
@@ -715,64 +993,102 @@ static void p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
715 target_skb = priv->tx_queue.prev; 993 target_skb = priv->tx_queue.prev;
716 largest_hole = max(largest_hole, priv->rx_end - last_addr - len); 994 largest_hole = max(largest_hole, priv->rx_end - last_addr - len);
717 if (!skb_queue_empty(&priv->tx_queue)) { 995 if (!skb_queue_empty(&priv->tx_queue)) {
718 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(target_skb); 996 info = IEEE80211_SKB_CB(target_skb);
719 struct memrecord *range = (void *)info->driver_data; 997 range = (void *)info->rate_driver_data;
720 target_addr = range->end_addr; 998 target_addr = range->end_addr;
721 } 999 }
722 } else 1000 } else
723 largest_hole = max(largest_hole, priv->rx_end - last_addr); 1001 largest_hole = max(largest_hole, priv->rx_end - last_addr);
724 1002
725 if (skb) { 1003 if (!target_skb) {
726 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1004 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
727 struct memrecord *range = (void *)info->driver_data; 1005 ieee80211_stop_queues(dev);
728 range->start_addr = target_addr; 1006 return -ENOSPC;
729 range->end_addr = target_addr + len;
730 __skb_queue_after(&priv->tx_queue, target_skb, skb);
731 if (largest_hole < priv->rx_mtu + priv->headroom +
732 priv->tailroom +
733 sizeof(struct p54_control_hdr))
734 ieee80211_stop_queues(dev);
735 } 1007 }
1008
1009 info = IEEE80211_SKB_CB(skb);
1010 range = (void *)info->rate_driver_data;
1011 range->start_addr = target_addr;
1012 range->end_addr = target_addr + len;
1013 __skb_queue_after(&priv->tx_queue, target_skb, skb);
736 spin_unlock_irqrestore(&priv->tx_queue.lock, flags); 1014 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
737 1015
1016 if (largest_hole < priv->headroom + sizeof(struct p54_hdr) +
1017 48 + IEEE80211_MAX_RTS_THRESHOLD + priv->tailroom)
1018 ieee80211_stop_queues(dev);
1019
738 data->req_id = cpu_to_le32(target_addr + priv->headroom); 1020 data->req_id = cpu_to_le32(target_addr + priv->headroom);
1021 return 0;
1022}
1023
1024static struct sk_buff *p54_alloc_skb(struct ieee80211_hw *dev,
1025 u16 hdr_flags, u16 len, u16 type, gfp_t memflags)
1026{
1027 struct p54_common *priv = dev->priv;
1028 struct p54_hdr *hdr;
1029 struct sk_buff *skb;
1030
1031 skb = __dev_alloc_skb(len + priv->tx_hdr_len, memflags);
1032 if (!skb)
1033 return NULL;
1034 skb_reserve(skb, priv->tx_hdr_len);
1035
1036 hdr = (struct p54_hdr *) skb_put(skb, sizeof(*hdr));
1037 hdr->flags = cpu_to_le16(hdr_flags);
1038 hdr->len = cpu_to_le16(len - sizeof(*hdr));
1039 hdr->type = cpu_to_le16(type);
1040 hdr->tries = hdr->rts_tries = 0;
1041
1042 if (unlikely(p54_assign_address(dev, skb, hdr, len))) {
1043 kfree_skb(skb);
1044 return NULL;
1045 }
1046 return skb;
739} 1047}
740 1048
741int p54_read_eeprom(struct ieee80211_hw *dev) 1049int p54_read_eeprom(struct ieee80211_hw *dev)
742{ 1050{
743 struct p54_common *priv = dev->priv; 1051 struct p54_common *priv = dev->priv;
744 struct p54_control_hdr *hdr = NULL; 1052 struct p54_hdr *hdr = NULL;
745 struct p54_eeprom_lm86 *eeprom_hdr; 1053 struct p54_eeprom_lm86 *eeprom_hdr;
746 size_t eeprom_size = 0x2020, offset = 0, blocksize; 1054 struct sk_buff *skb;
1055 size_t eeprom_size = 0x2020, offset = 0, blocksize, maxblocksize;
747 int ret = -ENOMEM; 1056 int ret = -ENOMEM;
748 void *eeprom = NULL; 1057 void *eeprom = NULL;
749 1058
750 hdr = (struct p54_control_hdr *)kzalloc(sizeof(*hdr) + 1059 maxblocksize = EEPROM_READBACK_LEN;
751 sizeof(*eeprom_hdr) + EEPROM_READBACK_LEN, GFP_KERNEL); 1060 if (priv->fw_var >= 0x509)
752 if (!hdr) 1061 maxblocksize -= 0xc;
753 goto free; 1062 else
1063 maxblocksize -= 0x4;
754 1064
1065 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL, sizeof(*hdr) +
1066 sizeof(*eeprom_hdr) + maxblocksize,
1067 P54_CONTROL_TYPE_EEPROM_READBACK, GFP_KERNEL);
1068 if (!skb)
1069 goto free;
755 priv->eeprom = kzalloc(EEPROM_READBACK_LEN, GFP_KERNEL); 1070 priv->eeprom = kzalloc(EEPROM_READBACK_LEN, GFP_KERNEL);
756 if (!priv->eeprom) 1071 if (!priv->eeprom)
757 goto free; 1072 goto free;
758
759 eeprom = kzalloc(eeprom_size, GFP_KERNEL); 1073 eeprom = kzalloc(eeprom_size, GFP_KERNEL);
760 if (!eeprom) 1074 if (!eeprom)
761 goto free; 1075 goto free;
762 1076
763 hdr->magic1 = cpu_to_le16(0x8000); 1077 eeprom_hdr = (struct p54_eeprom_lm86 *) skb_put(skb,
764 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_EEPROM_READBACK); 1078 sizeof(*eeprom_hdr) + maxblocksize);
765 hdr->retry1 = hdr->retry2 = 0;
766 eeprom_hdr = (struct p54_eeprom_lm86 *) hdr->data;
767 1079
768 while (eeprom_size) { 1080 while (eeprom_size) {
769 blocksize = min(eeprom_size, (size_t)EEPROM_READBACK_LEN); 1081 blocksize = min(eeprom_size, maxblocksize);
770 hdr->len = cpu_to_le16(blocksize + sizeof(*eeprom_hdr)); 1082 if (priv->fw_var < 0x509) {
771 eeprom_hdr->offset = cpu_to_le16(offset); 1083 eeprom_hdr->v1.offset = cpu_to_le16(offset);
772 eeprom_hdr->len = cpu_to_le16(blocksize); 1084 eeprom_hdr->v1.len = cpu_to_le16(blocksize);
773 p54_assign_address(dev, NULL, hdr, le16_to_cpu(hdr->len) + 1085 } else {
774 sizeof(*hdr)); 1086 eeprom_hdr->v2.offset = cpu_to_le32(offset);
775 priv->tx(dev, hdr, le16_to_cpu(hdr->len) + sizeof(*hdr), 0); 1087 eeprom_hdr->v2.len = cpu_to_le16(blocksize);
1088 eeprom_hdr->v2.magic2 = 0xf;
1089 memcpy(eeprom_hdr->v2.magic, (const char *)"LOCK", 4);
1090 }
1091 priv->tx(dev, skb);
776 1092
777 if (!wait_for_completion_interruptible_timeout(&priv->eeprom_comp, HZ)) { 1093 if (!wait_for_completion_interruptible_timeout(&priv->eeprom_comp, HZ)) {
778 printk(KERN_ERR "%s: device does not respond!\n", 1094 printk(KERN_ERR "%s: device does not respond!\n",
@@ -790,166 +1106,422 @@ int p54_read_eeprom(struct ieee80211_hw *dev)
790free: 1106free:
791 kfree(priv->eeprom); 1107 kfree(priv->eeprom);
792 priv->eeprom = NULL; 1108 priv->eeprom = NULL;
793 kfree(hdr); 1109 p54_free_skb(dev, skb);
794 kfree(eeprom); 1110 kfree(eeprom);
795 1111
796 return ret; 1112 return ret;
797} 1113}
798EXPORT_SYMBOL_GPL(p54_read_eeprom); 1114EXPORT_SYMBOL_GPL(p54_read_eeprom);
799 1115
1116static int p54_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
1117 bool set)
1118{
1119 struct p54_common *priv = dev->priv;
1120 struct sk_buff *skb;
1121 struct p54_tim *tim;
1122
1123 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET,
1124 sizeof(struct p54_hdr) + sizeof(*tim),
1125 P54_CONTROL_TYPE_TIM, GFP_KERNEL);
1126 if (!skb)
1127 return -ENOMEM;
1128
1129 tim = (struct p54_tim *) skb_put(skb, sizeof(*tim));
1130 tim->count = 1;
1131 tim->entry[0] = cpu_to_le16(set ? (sta->aid | 0x8000) : sta->aid);
1132 priv->tx(dev, skb);
1133 return 0;
1134}
1135
1136static int p54_sta_unlock(struct ieee80211_hw *dev, u8 *addr)
1137{
1138 struct p54_common *priv = dev->priv;
1139 struct sk_buff *skb;
1140 struct p54_sta_unlock *sta;
1141
1142 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET,
1143 sizeof(struct p54_hdr) + sizeof(*sta),
1144 P54_CONTROL_TYPE_PSM_STA_UNLOCK, GFP_ATOMIC);
1145 if (!skb)
1146 return -ENOMEM;
1147
1148 sta = (struct p54_sta_unlock *)skb_put(skb, sizeof(*sta));
1149 memcpy(sta->addr, addr, ETH_ALEN);
1150 priv->tx(dev, skb);
1151 return 0;
1152}
1153
1154static void p54_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
1155 enum sta_notify_cmd notify_cmd,
1156 struct ieee80211_sta *sta)
1157{
1158 switch (notify_cmd) {
1159 case STA_NOTIFY_ADD:
1160 case STA_NOTIFY_REMOVE:
1161 /*
1162 * Notify the firmware that we don't want or we don't
1163 * need to buffer frames for this station anymore.
1164 */
1165
1166 p54_sta_unlock(dev, sta->addr);
1167 break;
1168 case STA_NOTIFY_AWAKE:
1169 /* update the firmware's filter table */
1170 p54_sta_unlock(dev, sta->addr);
1171 break;
1172 default:
1173 break;
1174 }
1175}
1176
1177static int p54_tx_cancel(struct ieee80211_hw *dev, struct sk_buff *entry)
1178{
1179 struct p54_common *priv = dev->priv;
1180 struct sk_buff *skb;
1181 struct p54_hdr *hdr;
1182 struct p54_txcancel *cancel;
1183
1184 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET,
1185 sizeof(struct p54_hdr) + sizeof(*cancel),
1186 P54_CONTROL_TYPE_TXCANCEL, GFP_ATOMIC);
1187 if (!skb)
1188 return -ENOMEM;
1189
1190 hdr = (void *)entry->data;
1191 cancel = (struct p54_txcancel *)skb_put(skb, sizeof(*cancel));
1192 cancel->req_id = hdr->req_id;
1193 priv->tx(dev, skb);
1194 return 0;
1195}
1196
1197static int p54_tx_fill(struct ieee80211_hw *dev, struct sk_buff *skb,
1198 struct ieee80211_tx_info *info, u8 *queue, size_t *extra_len,
1199 u16 *flags, u16 *aid)
1200{
1201 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1202 struct p54_common *priv = dev->priv;
1203 int ret = 0;
1204
1205 if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) {
1206 if (ieee80211_is_beacon(hdr->frame_control)) {
1207 *aid = 0;
1208 *queue = 0;
1209 *extra_len = IEEE80211_MAX_TIM_LEN;
1210 *flags = P54_HDR_FLAG_DATA_OUT_TIMESTAMP;
1211 return 0;
1212 } else if (ieee80211_is_probe_resp(hdr->frame_control)) {
1213 *aid = 0;
1214 *queue = 2;
1215 *flags = P54_HDR_FLAG_DATA_OUT_TIMESTAMP |
1216 P54_HDR_FLAG_DATA_OUT_NOCANCEL;
1217 return 0;
1218 } else {
1219 *queue = 2;
1220 ret = 0;
1221 }
1222 } else {
1223 *queue += 4;
1224 ret = 1;
1225 }
1226
1227 switch (priv->mode) {
1228 case NL80211_IFTYPE_STATION:
1229 *aid = 1;
1230 break;
1231 case NL80211_IFTYPE_AP:
1232 case NL80211_IFTYPE_ADHOC:
1233 case NL80211_IFTYPE_MESH_POINT:
1234 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1235 *aid = 0;
1236 *queue = 3;
1237 return 0;
1238 }
1239 if (info->control.sta)
1240 *aid = info->control.sta->aid;
1241 else
1242 *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL;
1243 }
1244 return ret;
1245}
1246
1247static u8 p54_convert_algo(enum ieee80211_key_alg alg)
1248{
1249 switch (alg) {
1250 case ALG_WEP:
1251 return P54_CRYPTO_WEP;
1252 case ALG_TKIP:
1253 return P54_CRYPTO_TKIPMICHAEL;
1254 case ALG_CCMP:
1255 return P54_CRYPTO_AESCCMP;
1256 default:
1257 return 0;
1258 }
1259}
1260
800static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 1261static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
801{ 1262{
802 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1263 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
803 struct ieee80211_tx_queue_stats *current_queue; 1264 struct ieee80211_tx_queue_stats *current_queue = NULL;
804 struct p54_common *priv = dev->priv; 1265 struct p54_common *priv = dev->priv;
805 struct p54_control_hdr *hdr; 1266 struct p54_hdr *hdr;
806 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; 1267 struct p54_tx_data *txhdr;
807 struct p54_tx_control_allocdata *txhdr; 1268 size_t padding, len, tim_len = 0;
808 size_t padding, len; 1269 int i, j, ridx, ret;
809 u8 rate; 1270 u16 hdr_flags = 0, aid = 0;
1271 u8 rate, queue, crypt_offset = 0;
810 u8 cts_rate = 0x20; 1272 u8 cts_rate = 0x20;
1273 u8 rc_flags;
1274 u8 calculated_tries[4];
1275 u8 nrates = 0, nremaining = 8;
811 1276
812 current_queue = &priv->tx_stats[skb_get_queue_mapping(skb) + 4]; 1277 queue = skb_get_queue_mapping(skb);
813 if (unlikely(current_queue->len > current_queue->limit)) 1278
1279 ret = p54_tx_fill(dev, skb, info, &queue, &tim_len, &hdr_flags, &aid);
1280 current_queue = &priv->tx_stats[queue];
1281 if (unlikely((current_queue->len > current_queue->limit) && ret))
814 return NETDEV_TX_BUSY; 1282 return NETDEV_TX_BUSY;
815 current_queue->len++; 1283 current_queue->len++;
816 current_queue->count++; 1284 current_queue->count++;
817 if (current_queue->len == current_queue->limit) 1285 if ((current_queue->len == current_queue->limit) && ret)
818 ieee80211_stop_queue(dev, skb_get_queue_mapping(skb)); 1286 ieee80211_stop_queue(dev, skb_get_queue_mapping(skb));
819 1287
820 padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; 1288 padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3;
821 len = skb->len; 1289 len = skb->len;
822 1290
823 txhdr = (struct p54_tx_control_allocdata *) 1291 if (info->control.hw_key) {
824 skb_push(skb, sizeof(*txhdr) + padding); 1292 crypt_offset = ieee80211_get_hdrlen_from_skb(skb);
825 hdr = (struct p54_control_hdr *) skb_push(skb, sizeof(*hdr)); 1293 if (info->control.hw_key->alg == ALG_TKIP) {
1294 u8 *iv = (u8 *)(skb->data + crypt_offset);
1295 /*
1296 * The firmware excepts that the IV has to have
1297 * this special format
1298 */
1299 iv[1] = iv[0];
1300 iv[0] = iv[2];
1301 iv[2] = 0;
1302 }
1303 }
1304
1305 txhdr = (struct p54_tx_data *) skb_push(skb, sizeof(*txhdr) + padding);
1306 hdr = (struct p54_hdr *) skb_push(skb, sizeof(*hdr));
826 1307
827 if (padding) 1308 if (padding)
828 hdr->magic1 = cpu_to_le16(0x4010); 1309 hdr_flags |= P54_HDR_FLAG_DATA_ALIGN;
829 else 1310 hdr->type = cpu_to_le16(aid);
830 hdr->magic1 = cpu_to_le16(0x0010); 1311 hdr->rts_tries = info->control.rates[0].count;
831 hdr->len = cpu_to_le16(len); 1312
832 hdr->type = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 0 : cpu_to_le16(1); 1313 /*
833 hdr->retry1 = hdr->retry2 = info->control.retry_limit; 1314 * we register the rates in perfect order, and
834 1315 * RTS/CTS won't happen on 5 GHz
835 /* TODO: add support for alternate retry TX rates */ 1316 */
836 rate = ieee80211_get_tx_rate(dev, info)->hw_value; 1317 cts_rate = info->control.rts_cts_rate_idx;
837 if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) { 1318
838 rate |= 0x10; 1319 memset(&txhdr->rateset, 0, sizeof(txhdr->rateset));
839 cts_rate |= 0x10; 1320
1321 /* see how many rates got used */
1322 for (i = 0; i < 4; i++) {
1323 if (info->control.rates[i].idx < 0)
1324 break;
1325 nrates++;
1326 }
1327
1328 /* limit tries to 8/nrates per rate */
1329 for (i = 0; i < nrates; i++) {
1330 /*
1331 * The magic expression here is equivalent to 8/nrates for
1332 * all values that matter, but avoids division and jumps.
1333 * Note that nrates can only take the values 1 through 4.
1334 */
1335 calculated_tries[i] = min_t(int, ((15 >> nrates) | 1) + 1,
1336 info->control.rates[i].count);
1337 nremaining -= calculated_tries[i];
840 } 1338 }
841 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { 1339
842 rate |= 0x40; 1340 /* if there are tries left, distribute from back to front */
843 cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value; 1341 for (i = nrates - 1; nremaining > 0 && i >= 0; i--) {
844 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 1342 int tmp = info->control.rates[i].count - calculated_tries[i];
845 rate |= 0x20; 1343
846 cts_rate |= ieee80211_get_rts_cts_rate(dev, info)->hw_value; 1344 if (tmp <= 0)
1345 continue;
1346 /* RC requested more tries at this rate */
1347
1348 tmp = min_t(int, tmp, nremaining);
1349 calculated_tries[i] += tmp;
1350 nremaining -= tmp;
1351 }
1352
1353 ridx = 0;
1354 for (i = 0; i < nrates && ridx < 8; i++) {
1355 /* we register the rates in perfect order */
1356 rate = info->control.rates[i].idx;
1357 if (info->band == IEEE80211_BAND_5GHZ)
1358 rate += 4;
1359
1360 /* store the count we actually calculated for TX status */
1361 info->control.rates[i].count = calculated_tries[i];
1362
1363 rc_flags = info->control.rates[i].flags;
1364 if (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) {
1365 rate |= 0x10;
1366 cts_rate |= 0x10;
1367 }
1368 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
1369 rate |= 0x40;
1370 else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
1371 rate |= 0x20;
1372 for (j = 0; j < calculated_tries[i] && ridx < 8; j++) {
1373 txhdr->rateset[ridx] = rate;
1374 ridx++;
1375 }
1376 }
1377
1378 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
1379 hdr_flags |= P54_HDR_FLAG_DATA_OUT_SEQNR;
1380
1381 /* TODO: enable bursting */
1382 hdr->flags = cpu_to_le16(hdr_flags);
1383 hdr->tries = ridx;
1384 txhdr->rts_rate_idx = 0;
1385 if (info->control.hw_key) {
1386 crypt_offset += info->control.hw_key->iv_len;
1387 txhdr->key_type = p54_convert_algo(info->control.hw_key->alg);
1388 txhdr->key_len = min((u8)16, info->control.hw_key->keylen);
1389 memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len);
1390 if (info->control.hw_key->alg == ALG_TKIP) {
1391 if (unlikely(skb_tailroom(skb) < 12))
1392 goto err;
1393 /* reserve space for the MIC key */
1394 len += 8;
1395 memcpy(skb_put(skb, 8), &(info->control.hw_key->key
1396 [NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]), 8);
1397 }
1398 /* reserve some space for ICV */
1399 len += info->control.hw_key->icv_len;
1400 } else {
1401 txhdr->key_type = 0;
1402 txhdr->key_len = 0;
847 } 1403 }
848 memset(txhdr->rateset, rate, 8); 1404 txhdr->crypt_offset = crypt_offset;
849 txhdr->key_type = 0; 1405 txhdr->hw_queue = queue;
850 txhdr->key_len = 0; 1406 if (current_queue)
851 txhdr->hw_queue = skb_get_queue_mapping(skb) + 4; 1407 txhdr->backlog = current_queue->len;
1408 else
1409 txhdr->backlog = 0;
1410 memset(txhdr->durations, 0, sizeof(txhdr->durations));
852 txhdr->tx_antenna = (info->antenna_sel_tx == 0) ? 1411 txhdr->tx_antenna = (info->antenna_sel_tx == 0) ?
853 2 : info->antenna_sel_tx - 1; 1412 2 : info->antenna_sel_tx - 1;
854 txhdr->output_power = priv->output_power; 1413 txhdr->output_power = priv->output_power;
855 txhdr->cts_rate = (info->flags & IEEE80211_TX_CTL_NO_ACK) ? 1414 txhdr->cts_rate = cts_rate;
856 0 : cts_rate;
857 if (padding) 1415 if (padding)
858 txhdr->align[0] = padding; 1416 txhdr->align[0] = padding;
859 1417
860 /* FIXME: The sequence that follows is needed for this driver to 1418 hdr->len = cpu_to_le16(len);
861 * work with mac80211 since "mac80211: fix TX sequence numbers".
862 * As with the temporary code in rt2x00, changes will be needed
863 * to get proper sequence numbers on beacons. In addition, this
864 * patch places the sequence number in the hardware state, which
865 * limits us to a single virtual state.
866 */
867 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
868 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
869 priv->seqno += 0x10;
870 ieee80211hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
871 ieee80211hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
872 }
873 /* modifies skb->cb and with it info, so must be last! */ 1419 /* modifies skb->cb and with it info, so must be last! */
874 p54_assign_address(dev, skb, hdr, skb->len); 1420 if (unlikely(p54_assign_address(dev, skb, hdr, skb->len + tim_len)))
1421 goto err;
1422 priv->tx(dev, skb);
1423
1424 queue_delayed_work(dev->workqueue, &priv->work,
1425 msecs_to_jiffies(P54_TX_FRAME_LIFETIME));
875 1426
876 priv->tx(dev, hdr, skb->len, 0);
877 return 0; 1427 return 0;
1428
1429 err:
1430 skb_pull(skb, sizeof(*hdr) + sizeof(*txhdr) + padding);
1431 if (current_queue) {
1432 current_queue->len--;
1433 current_queue->count--;
1434 }
1435 return NETDEV_TX_BUSY;
878} 1436}
879 1437
880static int p54_set_filter(struct ieee80211_hw *dev, u16 filter_type, 1438static int p54_setup_mac(struct ieee80211_hw *dev)
881 const u8 *bssid)
882{ 1439{
883 struct p54_common *priv = dev->priv; 1440 struct p54_common *priv = dev->priv;
884 struct p54_control_hdr *hdr; 1441 struct sk_buff *skb;
885 struct p54_tx_control_filter *filter; 1442 struct p54_setup_mac *setup;
886 size_t data_len; 1443 u16 mode;
887 1444
888 hdr = kzalloc(sizeof(*hdr) + sizeof(*filter) + 1445 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*setup) +
889 priv->tx_hdr_len, GFP_ATOMIC); 1446 sizeof(struct p54_hdr), P54_CONTROL_TYPE_SETUP,
890 if (!hdr) 1447 GFP_ATOMIC);
1448 if (!skb)
891 return -ENOMEM; 1449 return -ENOMEM;
892 1450
893 hdr = (void *)hdr + priv->tx_hdr_len; 1451 setup = (struct p54_setup_mac *) skb_put(skb, sizeof(*setup));
894 1452 if (dev->conf.radio_enabled) {
895 filter = (struct p54_tx_control_filter *) hdr->data; 1453 switch (priv->mode) {
896 hdr->magic1 = cpu_to_le16(0x8001); 1454 case NL80211_IFTYPE_STATION:
897 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_FILTER_SET); 1455 mode = P54_FILTER_TYPE_STATION;
898 1456 break;
899 priv->filter_type = filter->filter_type = cpu_to_le16(filter_type); 1457 case NL80211_IFTYPE_AP:
900 memcpy(filter->mac_addr, priv->mac_addr, ETH_ALEN); 1458 mode = P54_FILTER_TYPE_AP;
901 if (!bssid) 1459 break;
902 memset(filter->bssid, ~0, ETH_ALEN); 1460 case NL80211_IFTYPE_ADHOC:
903 else 1461 case NL80211_IFTYPE_MESH_POINT:
904 memcpy(filter->bssid, bssid, ETH_ALEN); 1462 mode = P54_FILTER_TYPE_IBSS;
905 1463 break;
906 filter->rx_antenna = priv->rx_antenna; 1464 default:
1465 mode = P54_FILTER_TYPE_NONE;
1466 break;
1467 }
1468 if (priv->filter_flags & FIF_PROMISC_IN_BSS)
1469 mode |= P54_FILTER_TYPE_TRANSPARENT;
1470 } else
1471 mode = P54_FILTER_TYPE_RX_DISABLED;
907 1472
1473 setup->mac_mode = cpu_to_le16(mode);
1474 memcpy(setup->mac_addr, priv->mac_addr, ETH_ALEN);
1475 memcpy(setup->bssid, priv->bssid, ETH_ALEN);
1476 setup->rx_antenna = 2; /* automatic */
1477 setup->rx_align = 0;
908 if (priv->fw_var < 0x500) { 1478 if (priv->fw_var < 0x500) {
909 data_len = P54_TX_CONTROL_FILTER_V1_LEN; 1479 setup->v1.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask);
910 filter->v1.basic_rate_mask = cpu_to_le32(0x15F); 1480 memset(setup->v1.rts_rates, 0, 8);
911 filter->v1.rx_addr = cpu_to_le32(priv->rx_end); 1481 setup->v1.rx_addr = cpu_to_le32(priv->rx_end);
912 filter->v1.max_rx = cpu_to_le16(priv->rx_mtu); 1482 setup->v1.max_rx = cpu_to_le16(priv->rx_mtu);
913 filter->v1.rxhw = cpu_to_le16(priv->rxhw); 1483 setup->v1.rxhw = cpu_to_le16(priv->rxhw);
914 filter->v1.wakeup_timer = cpu_to_le16(500); 1484 setup->v1.wakeup_timer = cpu_to_le16(priv->wakeup_timer);
1485 setup->v1.unalloc0 = cpu_to_le16(0);
915 } else { 1486 } else {
916 data_len = P54_TX_CONTROL_FILTER_V2_LEN; 1487 setup->v2.rx_addr = cpu_to_le32(priv->rx_end);
917 filter->v2.rx_addr = cpu_to_le32(priv->rx_end); 1488 setup->v2.max_rx = cpu_to_le16(priv->rx_mtu);
918 filter->v2.max_rx = cpu_to_le16(priv->rx_mtu); 1489 setup->v2.rxhw = cpu_to_le16(priv->rxhw);
919 filter->v2.rxhw = cpu_to_le16(priv->rxhw); 1490 setup->v2.timer = cpu_to_le16(priv->wakeup_timer);
920 filter->v2.timer = cpu_to_le16(1000); 1491 setup->v2.truncate = cpu_to_le16(48896);
1492 setup->v2.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask);
1493 setup->v2.sbss_offset = 0;
1494 setup->v2.mcast_window = 0;
1495 setup->v2.rx_rssi_threshold = 0;
1496 setup->v2.rx_ed_threshold = 0;
1497 setup->v2.ref_clock = cpu_to_le32(644245094);
1498 setup->v2.lpf_bandwidth = cpu_to_le16(65535);
1499 setup->v2.osc_start_delay = cpu_to_le16(65535);
921 } 1500 }
922 1501 priv->tx(dev, skb);
923 hdr->len = cpu_to_le16(data_len);
924 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + data_len);
925 priv->tx(dev, hdr, sizeof(*hdr) + data_len, 1);
926 return 0; 1502 return 0;
927} 1503}
928 1504
929static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq) 1505static int p54_scan(struct ieee80211_hw *dev, u16 mode, u16 dwell)
930{ 1506{
931 struct p54_common *priv = dev->priv; 1507 struct p54_common *priv = dev->priv;
932 struct p54_control_hdr *hdr; 1508 struct sk_buff *skb;
933 struct p54_tx_control_channel *chan; 1509 struct p54_scan *chan;
934 unsigned int i; 1510 unsigned int i;
935 size_t data_len;
936 void *entry; 1511 void *entry;
1512 __le16 freq = cpu_to_le16(dev->conf.channel->center_freq);
1513 int band = dev->conf.channel->band;
937 1514
938 hdr = kzalloc(sizeof(*hdr) + sizeof(*chan) + 1515 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*chan) +
939 priv->tx_hdr_len, GFP_KERNEL); 1516 sizeof(struct p54_hdr), P54_CONTROL_TYPE_SCAN,
940 if (!hdr) 1517 GFP_ATOMIC);
1518 if (!skb)
941 return -ENOMEM; 1519 return -ENOMEM;
942 1520
943 hdr = (void *)hdr + priv->tx_hdr_len; 1521 chan = (struct p54_scan *) skb_put(skb, sizeof(*chan));
944 1522 memset(chan->padding1, 0, sizeof(chan->padding1));
945 chan = (struct p54_tx_control_channel *) hdr->data; 1523 chan->mode = cpu_to_le16(mode);
946 1524 chan->dwell = cpu_to_le16(dwell);
947 hdr->magic1 = cpu_to_le16(0x8001);
948
949 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_CHANNEL_CHANGE);
950
951 chan->flags = cpu_to_le16(0x1);
952 chan->dwell = cpu_to_le16(0x0);
953 1525
954 for (i = 0; i < priv->iq_autocal_len; i++) { 1526 for (i = 0; i < priv->iq_autocal_len; i++) {
955 if (priv->iq_autocal[i].freq != freq) 1527 if (priv->iq_autocal[i].freq != freq)
@@ -990,61 +1562,50 @@ static int p54_set_freq(struct ieee80211_hw *dev, __le16 freq)
990 } 1562 }
991 1563
992 entry += sizeof(__le16); 1564 entry += sizeof(__le16);
993 chan->pa_points_per_curve = 1565 chan->pa_points_per_curve = 8;
994 min(priv->curve_data->points_per_channel, (u8) 8); 1566 memset(chan->curve_data, 0, sizeof(*chan->curve_data));
995 1567 memcpy(chan->curve_data, entry,
996 memcpy(chan->curve_data, entry, sizeof(*chan->curve_data) * 1568 sizeof(struct p54_pa_curve_data_sample) *
997 chan->pa_points_per_curve); 1569 min((u8)8, priv->curve_data->points_per_channel));
998 break; 1570 break;
999 } 1571 }
1000 1572
1001 if (priv->fw_var < 0x500) { 1573 if (priv->fw_var < 0x500) {
1002 data_len = P54_TX_CONTROL_CHANNEL_V1_LEN; 1574 chan->v1_rssi.mul = cpu_to_le16(priv->rssical_db[band].mul);
1003 chan->v1.rssical_mul = cpu_to_le16(130); 1575 chan->v1_rssi.add = cpu_to_le16(priv->rssical_db[band].add);
1004 chan->v1.rssical_add = cpu_to_le16(0xfe70);
1005 } else { 1576 } else {
1006 data_len = P54_TX_CONTROL_CHANNEL_V2_LEN; 1577 chan->v2.rssi.mul = cpu_to_le16(priv->rssical_db[band].mul);
1007 chan->v2.rssical_mul = cpu_to_le16(130); 1578 chan->v2.rssi.add = cpu_to_le16(priv->rssical_db[band].add);
1008 chan->v2.rssical_add = cpu_to_le16(0xfe70); 1579 chan->v2.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask);
1009 chan->v2.basic_rate_mask = cpu_to_le32(0x15f); 1580 memset(chan->v2.rts_rates, 0, 8);
1010 } 1581 }
1011 1582 priv->tx(dev, skb);
1012 hdr->len = cpu_to_le16(data_len);
1013 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + data_len);
1014 priv->tx(dev, hdr, sizeof(*hdr) + data_len, 1);
1015 return 0; 1583 return 0;
1016 1584
1017 err: 1585 err:
1018 printk(KERN_ERR "%s: frequency change failed\n", wiphy_name(dev->wiphy)); 1586 printk(KERN_ERR "%s: frequency change failed\n", wiphy_name(dev->wiphy));
1019 kfree(hdr); 1587 kfree_skb(skb);
1020 return -EINVAL; 1588 return -EINVAL;
1021} 1589}
1022 1590
1023static int p54_set_leds(struct ieee80211_hw *dev, int mode, int link, int act) 1591static int p54_set_leds(struct ieee80211_hw *dev, int mode, int link, int act)
1024{ 1592{
1025 struct p54_common *priv = dev->priv; 1593 struct p54_common *priv = dev->priv;
1026 struct p54_control_hdr *hdr; 1594 struct sk_buff *skb;
1027 struct p54_tx_control_led *led; 1595 struct p54_led *led;
1028 1596
1029 hdr = kzalloc(sizeof(*hdr) + sizeof(*led) + 1597 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*led) +
1030 priv->tx_hdr_len, GFP_KERNEL); 1598 sizeof(struct p54_hdr), P54_CONTROL_TYPE_LED,
1031 if (!hdr) 1599 GFP_ATOMIC);
1600 if (!skb)
1032 return -ENOMEM; 1601 return -ENOMEM;
1033 1602
1034 hdr = (void *)hdr + priv->tx_hdr_len; 1603 led = (struct p54_led *)skb_put(skb, sizeof(*led));
1035 hdr->magic1 = cpu_to_le16(0x8001);
1036 hdr->len = cpu_to_le16(sizeof(*led));
1037 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_LED);
1038 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*led));
1039
1040 led = (struct p54_tx_control_led *) hdr->data;
1041 led->mode = cpu_to_le16(mode); 1604 led->mode = cpu_to_le16(mode);
1042 led->led_permanent = cpu_to_le16(link); 1605 led->led_permanent = cpu_to_le16(link);
1043 led->led_temporary = cpu_to_le16(act); 1606 led->led_temporary = cpu_to_le16(act);
1044 led->duration = cpu_to_le16(1000); 1607 led->duration = cpu_to_le16(1000);
1045 1608 priv->tx(dev, skb);
1046 priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*led), 1);
1047
1048 return 0; 1609 return 0;
1049} 1610}
1050 1611
@@ -1056,88 +1617,144 @@ do { \
1056 queue.txop = cpu_to_le16(_txop); \ 1617 queue.txop = cpu_to_le16(_txop); \
1057} while(0) 1618} while(0)
1058 1619
1059static void p54_init_vdcf(struct ieee80211_hw *dev) 1620static int p54_set_edcf(struct ieee80211_hw *dev)
1060{ 1621{
1061 struct p54_common *priv = dev->priv; 1622 struct p54_common *priv = dev->priv;
1062 struct p54_control_hdr *hdr; 1623 struct sk_buff *skb;
1063 struct p54_tx_control_vdcf *vdcf; 1624 struct p54_edcf *edcf;
1064 1625
1065 /* all USB V1 adapters need a extra headroom */ 1626 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf) +
1066 hdr = (void *)priv->cached_vdcf + priv->tx_hdr_len; 1627 sizeof(struct p54_hdr), P54_CONTROL_TYPE_DCFINIT,
1067 hdr->magic1 = cpu_to_le16(0x8001); 1628 GFP_ATOMIC);
1068 hdr->len = cpu_to_le16(sizeof(*vdcf)); 1629 if (!skb)
1069 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_DCFINIT); 1630 return -ENOMEM;
1070 hdr->req_id = cpu_to_le32(priv->rx_start); 1631
1071 1632 edcf = (struct p54_edcf *)skb_put(skb, sizeof(*edcf));
1072 vdcf = (struct p54_tx_control_vdcf *) hdr->data; 1633 if (priv->use_short_slot) {
1073 1634 edcf->slottime = 9;
1074 P54_SET_QUEUE(vdcf->queue[0], 0x0002, 0x0003, 0x0007, 47); 1635 edcf->sifs = 0x10;
1075 P54_SET_QUEUE(vdcf->queue[1], 0x0002, 0x0007, 0x000f, 94); 1636 edcf->eofpad = 0x00;
1076 P54_SET_QUEUE(vdcf->queue[2], 0x0003, 0x000f, 0x03ff, 0); 1637 } else {
1077 P54_SET_QUEUE(vdcf->queue[3], 0x0007, 0x000f, 0x03ff, 0); 1638 edcf->slottime = 20;
1639 edcf->sifs = 0x0a;
1640 edcf->eofpad = 0x06;
1641 }
1642 /* (see prism54/isl_oid.h for further details) */
1643 edcf->frameburst = cpu_to_le16(0);
1644 edcf->round_trip_delay = cpu_to_le16(0);
1645 edcf->flags = 0;
1646 memset(edcf->mapping, 0, sizeof(edcf->mapping));
1647 memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue));
1648 priv->tx(dev, skb);
1649 return 0;
1078} 1650}
1079 1651
1080static void p54_set_vdcf(struct ieee80211_hw *dev) 1652static int p54_beacon_tim(struct sk_buff *skb)
1081{ 1653{
1082 struct p54_common *priv = dev->priv; 1654 /*
1083 struct p54_control_hdr *hdr; 1655 * the good excuse for this mess is ... the firmware.
1084 struct p54_tx_control_vdcf *vdcf; 1656 * The dummy TIM MUST be at the end of the beacon frame,
1657 * because it'll be overwritten!
1658 */
1085 1659
1086 hdr = (void *)priv->cached_vdcf + priv->tx_hdr_len; 1660 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1661 u8 *pos, *end;
1087 1662
1088 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*vdcf)); 1663 if (skb->len <= sizeof(mgmt))
1664 return -EINVAL;
1089 1665
1090 vdcf = (struct p54_tx_control_vdcf *) hdr->data; 1666 pos = (u8 *)mgmt->u.beacon.variable;
1667 end = skb->data + skb->len;
1668 while (pos < end) {
1669 if (pos + 2 + pos[1] > end)
1670 return -EINVAL;
1091 1671
1092 if (dev->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) { 1672 if (pos[0] == WLAN_EID_TIM) {
1093 vdcf->slottime = 9; 1673 u8 dtim_len = pos[1];
1094 vdcf->magic1 = 0x10; 1674 u8 dtim_period = pos[3];
1095 vdcf->magic2 = 0x00; 1675 u8 *next = pos + 2 + dtim_len;
1096 } else {
1097 vdcf->slottime = 20;
1098 vdcf->magic1 = 0x0a;
1099 vdcf->magic2 = 0x06;
1100 }
1101 1676
1102 /* (see prism54/isl_oid.h for further details) */ 1677 if (dtim_len < 3)
1103 vdcf->frameburst = cpu_to_le16(0); 1678 return -EINVAL;
1679
1680 memmove(pos, next, end - next);
1104 1681
1105 priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*vdcf), 0); 1682 if (dtim_len > 3)
1683 skb_trim(skb, skb->len - (dtim_len - 3));
1684
1685 pos = end - (dtim_len + 2);
1686
1687 /* add the dummy at the end */
1688 pos[0] = WLAN_EID_TIM;
1689 pos[1] = 3;
1690 pos[2] = 0;
1691 pos[3] = dtim_period;
1692 pos[4] = 0;
1693 return 0;
1694 }
1695 pos += 2 + pos[1];
1696 }
1697 return 0;
1106} 1698}
1107 1699
1108static int p54_start(struct ieee80211_hw *dev) 1700static int p54_beacon_update(struct ieee80211_hw *dev,
1701 struct ieee80211_vif *vif)
1109{ 1702{
1110 struct p54_common *priv = dev->priv; 1703 struct p54_common *priv = dev->priv;
1111 int err; 1704 struct sk_buff *beacon;
1112 1705 int ret;
1113 if (!priv->cached_vdcf) {
1114 priv->cached_vdcf = kzalloc(sizeof(struct p54_tx_control_vdcf)+
1115 priv->tx_hdr_len + sizeof(struct p54_control_hdr),
1116 GFP_KERNEL);
1117 1706
1118 if (!priv->cached_vdcf) 1707 if (priv->cached_beacon) {
1119 return -ENOMEM; 1708 p54_tx_cancel(dev, priv->cached_beacon);
1709 /* wait for the last beacon the be freed */
1710 msleep(10);
1120 } 1711 }
1121 1712
1122 if (!priv->cached_stats) { 1713 beacon = ieee80211_beacon_get(dev, vif);
1123 priv->cached_stats = kzalloc(sizeof(struct p54_statistics) + 1714 if (!beacon)
1124 priv->tx_hdr_len + sizeof(struct p54_control_hdr), 1715 return -ENOMEM;
1125 GFP_KERNEL); 1716 ret = p54_beacon_tim(beacon);
1717 if (ret)
1718 return ret;
1719 ret = p54_tx(dev, beacon);
1720 if (ret)
1721 return ret;
1722 priv->cached_beacon = beacon;
1723 priv->tsf_high32 = 0;
1724 priv->tsf_low32 = 0;
1126 1725
1127 if (!priv->cached_stats) { 1726 return 0;
1128 kfree(priv->cached_vdcf); 1727}
1129 priv->cached_vdcf = NULL;
1130 return -ENOMEM;
1131 }
1132 }
1133 1728
1729static int p54_start(struct ieee80211_hw *dev)
1730{
1731 struct p54_common *priv = dev->priv;
1732 int err;
1733
1734 mutex_lock(&priv->conf_mutex);
1134 err = priv->open(dev); 1735 err = priv->open(dev);
1135 if (!err) 1736 if (err)
1136 priv->mode = NL80211_IFTYPE_MONITOR; 1737 goto out;
1738 P54_SET_QUEUE(priv->qos_params[0], 0x0002, 0x0003, 0x0007, 47);
1739 P54_SET_QUEUE(priv->qos_params[1], 0x0002, 0x0007, 0x000f, 94);
1740 P54_SET_QUEUE(priv->qos_params[2], 0x0003, 0x000f, 0x03ff, 0);
1741 P54_SET_QUEUE(priv->qos_params[3], 0x0007, 0x000f, 0x03ff, 0);
1742 err = p54_set_edcf(dev);
1743 if (err)
1744 goto out;
1745
1746 memset(priv->bssid, ~0, ETH_ALEN);
1747 priv->mode = NL80211_IFTYPE_MONITOR;
1748 err = p54_setup_mac(dev);
1749 if (err) {
1750 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1751 goto out;
1752 }
1137 1753
1138 p54_init_vdcf(dev); 1754 queue_delayed_work(dev->workqueue, &priv->work, 0);
1139 1755
1140 mod_timer(&priv->stats_timer, jiffies + HZ); 1756out:
1757 mutex_unlock(&priv->conf_mutex);
1141 return err; 1758 return err;
1142} 1759}
1143 1760
@@ -1146,12 +1763,18 @@ static void p54_stop(struct ieee80211_hw *dev)
1146 struct p54_common *priv = dev->priv; 1763 struct p54_common *priv = dev->priv;
1147 struct sk_buff *skb; 1764 struct sk_buff *skb;
1148 1765
1149 del_timer(&priv->stats_timer); 1766 mutex_lock(&priv->conf_mutex);
1767 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
1768 cancel_delayed_work_sync(&priv->work);
1769 if (priv->cached_beacon)
1770 p54_tx_cancel(dev, priv->cached_beacon);
1771
1772 priv->stop(dev);
1150 while ((skb = skb_dequeue(&priv->tx_queue))) 1773 while ((skb = skb_dequeue(&priv->tx_queue)))
1151 kfree_skb(skb); 1774 kfree_skb(skb);
1152 priv->stop(dev); 1775 priv->cached_beacon = NULL;
1153 priv->tsf_high32 = priv->tsf_low32 = 0; 1776 priv->tsf_high32 = priv->tsf_low32 = 0;
1154 priv->mode = NL80211_IFTYPE_UNSPECIFIED; 1777 mutex_unlock(&priv->conf_mutex);
1155} 1778}
1156 1779
1157static int p54_add_interface(struct ieee80211_hw *dev, 1780static int p54_add_interface(struct ieee80211_hw *dev,
@@ -1159,32 +1782,28 @@ static int p54_add_interface(struct ieee80211_hw *dev,
1159{ 1782{
1160 struct p54_common *priv = dev->priv; 1783 struct p54_common *priv = dev->priv;
1161 1784
1162 if (priv->mode != NL80211_IFTYPE_MONITOR) 1785 mutex_lock(&priv->conf_mutex);
1786 if (priv->mode != NL80211_IFTYPE_MONITOR) {
1787 mutex_unlock(&priv->conf_mutex);
1163 return -EOPNOTSUPP; 1788 return -EOPNOTSUPP;
1789 }
1164 1790
1165 switch (conf->type) { 1791 switch (conf->type) {
1166 case NL80211_IFTYPE_STATION: 1792 case NL80211_IFTYPE_STATION:
1793 case NL80211_IFTYPE_ADHOC:
1794 case NL80211_IFTYPE_AP:
1795 case NL80211_IFTYPE_MESH_POINT:
1167 priv->mode = conf->type; 1796 priv->mode = conf->type;
1168 break; 1797 break;
1169 default: 1798 default:
1799 mutex_unlock(&priv->conf_mutex);
1170 return -EOPNOTSUPP; 1800 return -EOPNOTSUPP;
1171 } 1801 }
1172 1802
1173 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 1803 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
1174 1804 p54_setup_mac(dev);
1175 p54_set_filter(dev, 0, NULL);
1176
1177 switch (conf->type) {
1178 case NL80211_IFTYPE_STATION:
1179 p54_set_filter(dev, 1, NULL);
1180 break;
1181 default:
1182 BUG(); /* impossible */
1183 break;
1184 }
1185
1186 p54_set_leds(dev, 1, 0, 0); 1805 p54_set_leds(dev, 1, 0, 0);
1187 1806 mutex_unlock(&priv->conf_mutex);
1188 return 0; 1807 return 0;
1189} 1808}
1190 1809
@@ -1192,22 +1811,38 @@ static void p54_remove_interface(struct ieee80211_hw *dev,
1192 struct ieee80211_if_init_conf *conf) 1811 struct ieee80211_if_init_conf *conf)
1193{ 1812{
1194 struct p54_common *priv = dev->priv; 1813 struct p54_common *priv = dev->priv;
1814
1815 mutex_lock(&priv->conf_mutex);
1816 if (priv->cached_beacon)
1817 p54_tx_cancel(dev, priv->cached_beacon);
1195 priv->mode = NL80211_IFTYPE_MONITOR; 1818 priv->mode = NL80211_IFTYPE_MONITOR;
1196 memset(priv->mac_addr, 0, ETH_ALEN); 1819 memset(priv->mac_addr, 0, ETH_ALEN);
1197 p54_set_filter(dev, 0, NULL); 1820 memset(priv->bssid, 0, ETH_ALEN);
1821 p54_setup_mac(dev);
1822 mutex_unlock(&priv->conf_mutex);
1198} 1823}
1199 1824
1200static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) 1825static int p54_config(struct ieee80211_hw *dev, u32 changed)
1201{ 1826{
1202 int ret; 1827 int ret;
1203 struct p54_common *priv = dev->priv; 1828 struct p54_common *priv = dev->priv;
1829 struct ieee80211_conf *conf = &dev->conf;
1204 1830
1205 mutex_lock(&priv->conf_mutex); 1831 mutex_lock(&priv->conf_mutex);
1206 priv->rx_antenna = (conf->antenna_sel_rx == 0) ? 1832 if (changed & IEEE80211_CONF_CHANGE_POWER)
1207 2 : conf->antenna_sel_tx - 1; 1833 priv->output_power = conf->power_level << 2;
1208 priv->output_power = conf->power_level << 2; 1834 if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
1209 ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq)); 1835 ret = p54_setup_mac(dev);
1210 p54_set_vdcf(dev); 1836 if (ret)
1837 goto out;
1838 }
1839 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1840 ret = p54_scan(dev, P54_SCAN_EXIT, 0);
1841 if (ret)
1842 goto out;
1843 }
1844
1845out:
1211 mutex_unlock(&priv->conf_mutex); 1846 mutex_unlock(&priv->conf_mutex);
1212 return ret; 1847 return ret;
1213} 1848}
@@ -1217,13 +1852,36 @@ static int p54_config_interface(struct ieee80211_hw *dev,
1217 struct ieee80211_if_conf *conf) 1852 struct ieee80211_if_conf *conf)
1218{ 1853{
1219 struct p54_common *priv = dev->priv; 1854 struct p54_common *priv = dev->priv;
1855 int ret = 0;
1220 1856
1221 mutex_lock(&priv->conf_mutex); 1857 mutex_lock(&priv->conf_mutex);
1222 p54_set_filter(dev, 0, conf->bssid); 1858 if (conf->changed & IEEE80211_IFCC_BSSID) {
1223 p54_set_leds(dev, 1, !is_multicast_ether_addr(conf->bssid), 0); 1859 memcpy(priv->bssid, conf->bssid, ETH_ALEN);
1224 memcpy(priv->bssid, conf->bssid, ETH_ALEN); 1860 ret = p54_setup_mac(dev);
1861 if (ret)
1862 goto out;
1863 }
1864
1865 if (conf->changed & IEEE80211_IFCC_BEACON) {
1866 ret = p54_scan(dev, P54_SCAN_EXIT, 0);
1867 if (ret)
1868 goto out;
1869 ret = p54_setup_mac(dev);
1870 if (ret)
1871 goto out;
1872 ret = p54_beacon_update(dev, vif);
1873 if (ret)
1874 goto out;
1875 ret = p54_set_edcf(dev);
1876 if (ret)
1877 goto out;
1878 }
1879
1880 ret = p54_set_leds(dev, 1, !is_multicast_ether_addr(priv->bssid), 0);
1881
1882out:
1225 mutex_unlock(&priv->conf_mutex); 1883 mutex_unlock(&priv->conf_mutex);
1226 return 0; 1884 return ret;
1227} 1885}
1228 1886
1229static void p54_configure_filter(struct ieee80211_hw *dev, 1887static void p54_configure_filter(struct ieee80211_hw *dev,
@@ -1233,94 +1891,78 @@ static void p54_configure_filter(struct ieee80211_hw *dev,
1233{ 1891{
1234 struct p54_common *priv = dev->priv; 1892 struct p54_common *priv = dev->priv;
1235 1893
1236 *total_flags &= FIF_BCN_PRBRESP_PROMISC | 1894 *total_flags &= FIF_PROMISC_IN_BSS |
1237 FIF_PROMISC_IN_BSS | 1895 (*total_flags & FIF_PROMISC_IN_BSS) ?
1238 FIF_FCSFAIL; 1896 FIF_FCSFAIL : 0;
1239 1897
1240 priv->filter_flags = *total_flags; 1898 priv->filter_flags = *total_flags;
1241 1899
1242 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { 1900 if (changed_flags & FIF_PROMISC_IN_BSS)
1243 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) 1901 p54_setup_mac(dev);
1244 p54_set_filter(dev, le16_to_cpu(priv->filter_type),
1245 NULL);
1246 else
1247 p54_set_filter(dev, le16_to_cpu(priv->filter_type),
1248 priv->bssid);
1249 }
1250
1251 if (changed_flags & FIF_PROMISC_IN_BSS) {
1252 if (*total_flags & FIF_PROMISC_IN_BSS)
1253 p54_set_filter(dev, le16_to_cpu(priv->filter_type) |
1254 0x8, NULL);
1255 else
1256 p54_set_filter(dev, le16_to_cpu(priv->filter_type) &
1257 ~0x8, priv->bssid);
1258 }
1259} 1902}
1260 1903
1261static int p54_conf_tx(struct ieee80211_hw *dev, u16 queue, 1904static int p54_conf_tx(struct ieee80211_hw *dev, u16 queue,
1262 const struct ieee80211_tx_queue_params *params) 1905 const struct ieee80211_tx_queue_params *params)
1263{ 1906{
1264 struct p54_common *priv = dev->priv; 1907 struct p54_common *priv = dev->priv;
1265 struct p54_tx_control_vdcf *vdcf; 1908 int ret;
1266
1267 vdcf = (struct p54_tx_control_vdcf *)(((struct p54_control_hdr *)
1268 ((void *)priv->cached_vdcf + priv->tx_hdr_len))->data);
1269 1909
1910 mutex_lock(&priv->conf_mutex);
1270 if ((params) && !(queue > 4)) { 1911 if ((params) && !(queue > 4)) {
1271 P54_SET_QUEUE(vdcf->queue[queue], params->aifs, 1912 P54_SET_QUEUE(priv->qos_params[queue], params->aifs,
1272 params->cw_min, params->cw_max, params->txop); 1913 params->cw_min, params->cw_max, params->txop);
1914 ret = p54_set_edcf(dev);
1273 } else 1915 } else
1274 return -EINVAL; 1916 ret = -EINVAL;
1275 1917 mutex_unlock(&priv->conf_mutex);
1276 p54_set_vdcf(dev); 1918 return ret;
1277
1278 return 0;
1279} 1919}
1280 1920
1281static int p54_init_xbow_synth(struct ieee80211_hw *dev) 1921static int p54_init_xbow_synth(struct ieee80211_hw *dev)
1282{ 1922{
1283 struct p54_common *priv = dev->priv; 1923 struct p54_common *priv = dev->priv;
1284 struct p54_control_hdr *hdr; 1924 struct sk_buff *skb;
1285 struct p54_tx_control_xbow_synth *xbow; 1925 struct p54_xbow_synth *xbow;
1286 1926
1287 hdr = kzalloc(sizeof(*hdr) + sizeof(*xbow) + 1927 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*xbow) +
1288 priv->tx_hdr_len, GFP_KERNEL); 1928 sizeof(struct p54_hdr),
1289 if (!hdr) 1929 P54_CONTROL_TYPE_XBOW_SYNTH_CFG,
1930 GFP_KERNEL);
1931 if (!skb)
1290 return -ENOMEM; 1932 return -ENOMEM;
1291 1933
1292 hdr = (void *)hdr + priv->tx_hdr_len; 1934 xbow = (struct p54_xbow_synth *)skb_put(skb, sizeof(*xbow));
1293 hdr->magic1 = cpu_to_le16(0x8001);
1294 hdr->len = cpu_to_le16(sizeof(*xbow));
1295 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_XBOW_SYNTH_CFG);
1296 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*xbow));
1297
1298 xbow = (struct p54_tx_control_xbow_synth *) hdr->data;
1299 xbow->magic1 = cpu_to_le16(0x1); 1935 xbow->magic1 = cpu_to_le16(0x1);
1300 xbow->magic2 = cpu_to_le16(0x2); 1936 xbow->magic2 = cpu_to_le16(0x2);
1301 xbow->freq = cpu_to_le16(5390); 1937 xbow->freq = cpu_to_le16(5390);
1302 1938 memset(xbow->padding, 0, sizeof(xbow->padding));
1303 priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*xbow), 1); 1939 priv->tx(dev, skb);
1304
1305 return 0; 1940 return 0;
1306} 1941}
1307 1942
1308static void p54_statistics_timer(unsigned long data) 1943static void p54_work(struct work_struct *work)
1309{ 1944{
1310 struct ieee80211_hw *dev = (struct ieee80211_hw *) data; 1945 struct p54_common *priv = container_of(work, struct p54_common,
1311 struct p54_common *priv = dev->priv; 1946 work.work);
1312 struct p54_control_hdr *hdr; 1947 struct ieee80211_hw *dev = priv->hw;
1313 struct p54_statistics *stats; 1948 struct sk_buff *skb;
1314 1949
1315 BUG_ON(!priv->cached_stats); 1950 if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
1951 return ;
1316 1952
1317 hdr = (void *)priv->cached_stats + priv->tx_hdr_len; 1953 /*
1318 hdr->magic1 = cpu_to_le16(0x8000); 1954 * TODO: walk through tx_queue and do the following tasks
1319 hdr->len = cpu_to_le16(sizeof(*stats)); 1955 * 1. initiate bursts.
1320 hdr->type = cpu_to_le16(P54_CONTROL_TYPE_STAT_READBACK); 1956 * 2. cancel stuck frames / reset the device if necessary.
1321 p54_assign_address(dev, NULL, hdr, sizeof(*hdr) + sizeof(*stats)); 1957 */
1958
1959 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL, sizeof(struct p54_hdr) +
1960 sizeof(struct p54_statistics),
1961 P54_CONTROL_TYPE_STAT_READBACK, GFP_KERNEL);
1962 if (!skb)
1963 return ;
1322 1964
1323 priv->tx(dev, hdr, sizeof(*hdr) + sizeof(*stats), 0); 1965 priv->tx(dev, skb);
1324} 1966}
1325 1967
1326static int p54_get_stats(struct ieee80211_hw *dev, 1968static int p54_get_stats(struct ieee80211_hw *dev,
@@ -1328,17 +1970,7 @@ static int p54_get_stats(struct ieee80211_hw *dev,
1328{ 1970{
1329 struct p54_common *priv = dev->priv; 1971 struct p54_common *priv = dev->priv;
1330 1972
1331 del_timer(&priv->stats_timer);
1332 p54_statistics_timer((unsigned long)dev);
1333
1334 if (!wait_for_completion_interruptible_timeout(&priv->stats_comp, HZ)) {
1335 printk(KERN_ERR "%s: device does not respond!\n",
1336 wiphy_name(dev->wiphy));
1337 return -EBUSY;
1338 }
1339
1340 memcpy(stats, &priv->stats, sizeof(*stats)); 1973 memcpy(stats, &priv->stats, sizeof(*stats));
1341
1342 return 0; 1974 return 0;
1343} 1975}
1344 1976
@@ -1352,14 +1984,133 @@ static int p54_get_tx_stats(struct ieee80211_hw *dev,
1352 return 0; 1984 return 0;
1353} 1985}
1354 1986
1987static void p54_bss_info_changed(struct ieee80211_hw *dev,
1988 struct ieee80211_vif *vif,
1989 struct ieee80211_bss_conf *info,
1990 u32 changed)
1991{
1992 struct p54_common *priv = dev->priv;
1993
1994 if (changed & BSS_CHANGED_ERP_SLOT) {
1995 priv->use_short_slot = info->use_short_slot;
1996 p54_set_edcf(dev);
1997 }
1998 if (changed & BSS_CHANGED_BASIC_RATES) {
1999 if (dev->conf.channel->band == IEEE80211_BAND_5GHZ)
2000 priv->basic_rate_mask = (info->basic_rates << 4);
2001 else
2002 priv->basic_rate_mask = info->basic_rates;
2003 p54_setup_mac(dev);
2004 if (priv->fw_var >= 0x500)
2005 p54_scan(dev, P54_SCAN_EXIT, 0);
2006 }
2007 if (changed & BSS_CHANGED_ASSOC) {
2008 if (info->assoc) {
2009 priv->aid = info->aid;
2010 priv->wakeup_timer = info->beacon_int *
2011 info->dtim_period * 5;
2012 p54_setup_mac(dev);
2013 }
2014 }
2015
2016}
2017
2018static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
2019 const u8 *local_address, const u8 *address,
2020 struct ieee80211_key_conf *key)
2021{
2022 struct p54_common *priv = dev->priv;
2023 struct sk_buff *skb;
2024 struct p54_keycache *rxkey;
2025 u8 algo = 0;
2026
2027 if (modparam_nohwcrypt)
2028 return -EOPNOTSUPP;
2029
2030 if (cmd == DISABLE_KEY)
2031 algo = 0;
2032 else {
2033 switch (key->alg) {
2034 case ALG_TKIP:
2035 if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL |
2036 BR_DESC_PRIV_CAP_TKIP)))
2037 return -EOPNOTSUPP;
2038 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2039 algo = P54_CRYPTO_TKIPMICHAEL;
2040 break;
2041 case ALG_WEP:
2042 if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP))
2043 return -EOPNOTSUPP;
2044 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2045 algo = P54_CRYPTO_WEP;
2046 break;
2047 case ALG_CCMP:
2048 if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP))
2049 return -EOPNOTSUPP;
2050 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2051 algo = P54_CRYPTO_AESCCMP;
2052 break;
2053 default:
2054 return -EINVAL;
2055 }
2056 }
2057
2058 if (key->keyidx > priv->rx_keycache_size) {
2059 /*
2060 * The device supports the choosen algorithm, but the firmware
2061 * does not provide enough key slots to store all of them.
2062 * So, incoming frames have to be decoded by the mac80211 stack,
2063 * but we can still offload encryption for outgoing frames.
2064 */
2065
2066 return 0;
2067 }
2068
2069 mutex_lock(&priv->conf_mutex);
2070 skb = p54_alloc_skb(dev, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*rxkey) +
2071 sizeof(struct p54_hdr), P54_CONTROL_TYPE_RX_KEYCACHE,
2072 GFP_ATOMIC);
2073 if (!skb) {
2074 mutex_unlock(&priv->conf_mutex);
2075 return -ENOMEM;
2076 }
2077
2078 /* TODO: some devices have 4 more free slots for rx keys */
2079 rxkey = (struct p54_keycache *)skb_put(skb, sizeof(*rxkey));
2080 rxkey->entry = key->keyidx;
2081 rxkey->key_id = key->keyidx;
2082 rxkey->key_type = algo;
2083 if (address)
2084 memcpy(rxkey->mac, address, ETH_ALEN);
2085 else
2086 memset(rxkey->mac, ~0, ETH_ALEN);
2087 if (key->alg != ALG_TKIP) {
2088 rxkey->key_len = min((u8)16, key->keylen);
2089 memcpy(rxkey->key, key->key, rxkey->key_len);
2090 } else {
2091 rxkey->key_len = 24;
2092 memcpy(rxkey->key, key->key, 16);
2093 memcpy(&(rxkey->key[16]), &(key->key
2094 [NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]), 8);
2095 }
2096
2097 priv->tx(dev, skb);
2098 mutex_unlock(&priv->conf_mutex);
2099 return 0;
2100}
2101
1355static const struct ieee80211_ops p54_ops = { 2102static const struct ieee80211_ops p54_ops = {
1356 .tx = p54_tx, 2103 .tx = p54_tx,
1357 .start = p54_start, 2104 .start = p54_start,
1358 .stop = p54_stop, 2105 .stop = p54_stop,
1359 .add_interface = p54_add_interface, 2106 .add_interface = p54_add_interface,
1360 .remove_interface = p54_remove_interface, 2107 .remove_interface = p54_remove_interface,
2108 .set_tim = p54_set_tim,
2109 .sta_notify = p54_sta_notify,
2110 .set_key = p54_set_key,
1361 .config = p54_config, 2111 .config = p54_config,
1362 .config_interface = p54_config_interface, 2112 .config_interface = p54_config_interface,
2113 .bss_info_changed = p54_bss_info_changed,
1363 .configure_filter = p54_configure_filter, 2114 .configure_filter = p54_configure_filter,
1364 .conf_tx = p54_conf_tx, 2115 .conf_tx = p54_conf_tx,
1365 .get_stats = p54_get_stats, 2116 .get_stats = p54_get_stats,
@@ -1376,32 +2127,43 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
1376 return NULL; 2127 return NULL;
1377 2128
1378 priv = dev->priv; 2129 priv = dev->priv;
2130 priv->hw = dev;
1379 priv->mode = NL80211_IFTYPE_UNSPECIFIED; 2131 priv->mode = NL80211_IFTYPE_UNSPECIFIED;
2132 priv->basic_rate_mask = 0x15f;
1380 skb_queue_head_init(&priv->tx_queue); 2133 skb_queue_head_init(&priv->tx_queue);
1381 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */ 2134 dev->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1382 IEEE80211_HW_RX_INCLUDES_FCS |
1383 IEEE80211_HW_SIGNAL_DBM | 2135 IEEE80211_HW_SIGNAL_DBM |
1384 IEEE80211_HW_NOISE_DBM; 2136 IEEE80211_HW_NOISE_DBM;
1385 2137
1386 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 2138 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2139 BIT(NL80211_IFTYPE_ADHOC) |
2140 BIT(NL80211_IFTYPE_AP) |
2141 BIT(NL80211_IFTYPE_MESH_POINT);
1387 2142
1388 dev->channel_change_time = 1000; /* TODO: find actual value */ 2143 dev->channel_change_time = 1000; /* TODO: find actual value */
1389 2144 priv->tx_stats[0].limit = 1; /* Beacon queue */
1390 priv->tx_stats[0].limit = 1; 2145 priv->tx_stats[1].limit = 1; /* Probe queue for HW scan */
1391 priv->tx_stats[1].limit = 1; 2146 priv->tx_stats[2].limit = 3; /* queue for MLMEs */
1392 priv->tx_stats[2].limit = 1; 2147 priv->tx_stats[3].limit = 3; /* Broadcast / MC queue */
1393 priv->tx_stats[3].limit = 1; 2148 priv->tx_stats[4].limit = 5; /* Data */
1394 priv->tx_stats[4].limit = 5;
1395 dev->queues = 1; 2149 dev->queues = 1;
1396 priv->noise = -94; 2150 priv->noise = -94;
1397 dev->extra_tx_headroom = sizeof(struct p54_control_hdr) + 4 + 2151 /*
1398 sizeof(struct p54_tx_control_allocdata); 2152 * We support at most 8 tries no matter which rate they're at,
2153 * we cannot support max_rates * max_rate_tries as we set it
2154 * here, but setting it correctly to 4/2 or so would limit us
2155 * artificially if the RC algorithm wants just two rates, so
2156 * let's say 4/7, we'll redistribute it at TX time, see the
2157 * comments there.
2158 */
2159 dev->max_rates = 4;
2160 dev->max_rate_tries = 7;
2161 dev->extra_tx_headroom = sizeof(struct p54_hdr) + 4 +
2162 sizeof(struct p54_tx_data);
1399 2163
1400 mutex_init(&priv->conf_mutex); 2164 mutex_init(&priv->conf_mutex);
1401 init_completion(&priv->eeprom_comp); 2165 init_completion(&priv->eeprom_comp);
1402 init_completion(&priv->stats_comp); 2166 INIT_DELAYED_WORK(&priv->work, p54_work);
1403 setup_timer(&priv->stats_timer, p54_statistics_timer,
1404 (unsigned long)dev);
1405 2167
1406 return dev; 2168 return dev;
1407} 2169}
@@ -1410,11 +2172,9 @@ EXPORT_SYMBOL_GPL(p54_init_common);
1410void p54_free_common(struct ieee80211_hw *dev) 2172void p54_free_common(struct ieee80211_hw *dev)
1411{ 2173{
1412 struct p54_common *priv = dev->priv; 2174 struct p54_common *priv = dev->priv;
1413 kfree(priv->cached_stats);
1414 kfree(priv->iq_autocal); 2175 kfree(priv->iq_autocal);
1415 kfree(priv->output_limit); 2176 kfree(priv->output_limit);
1416 kfree(priv->curve_data); 2177 kfree(priv->curve_data);
1417 kfree(priv->cached_vdcf);
1418} 2178}
1419EXPORT_SYMBOL_GPL(p54_free_common); 2179EXPORT_SYMBOL_GPL(p54_free_common);
1420 2180
diff --git a/drivers/net/wireless/p54/p54common.h b/drivers/net/wireless/p54/p54common.h
index 2fa994cfcfed..f5729de83fe1 100644
--- a/drivers/net/wireless/p54/p54common.h
+++ b/drivers/net/wireless/p54/p54common.h
@@ -7,8 +7,12 @@
7 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> 7 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
8 * Copyright (c) 2007, Christian Lamparter <chunkeey@web.de> 8 * Copyright (c) 2007, Christian Lamparter <chunkeey@web.de>
9 * 9 *
10 * Based on the islsm (softmac prism54) driver, which is: 10 * Based on:
11 * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al. 11 * - the islsm (softmac prism54) driver, which is:
12 * Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
13 *
14 * - LMAC API interface header file for STLC4560 (lmac_longbow.h)
15 * Copyright (C) 2007 Conexant Systems, Inc.
12 * 16 *
13 * This program is free software; you can redistribute it and/or modify 17 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as 18 * it under the terms of the GNU General Public License version 2 as
@@ -19,9 +23,24 @@ struct bootrec {
19 __le32 code; 23 __le32 code;
20 __le32 len; 24 __le32 len;
21 u32 data[10]; 25 u32 data[10];
22 __le16 rx_mtu;
23} __attribute__((packed)); 26} __attribute__((packed));
24 27
28#define PDR_SYNTH_FRONTEND_MASK 0x0007
29#define PDR_SYNTH_IQ_CAL_MASK 0x0018
30#define PDR_SYNTH_IQ_CAL_PA_DETECTOR 0x0000
31#define PDR_SYNTH_IQ_CAL_DISABLED 0x0008
32#define PDR_SYNTH_IQ_CAL_ZIF 0x0010
33#define PDR_SYNTH_FAA_SWITCH_MASK 0x0020
34#define PDR_SYNTH_FAA_SWITCH_ENABLED 0x0001
35#define PDR_SYNTH_24_GHZ_MASK 0x0040
36#define PDR_SYNTH_24_GHZ_DISABLED 0x0040
37#define PDR_SYNTH_5_GHZ_MASK 0x0080
38#define PDR_SYNTH_5_GHZ_DISABLED 0x0080
39#define PDR_SYNTH_RX_DIV_MASK 0x0100
40#define PDR_SYNTH_RX_DIV_SUPPORTED 0x0100
41#define PDR_SYNTH_TX_DIV_MASK 0x0200
42#define PDR_SYNTH_TX_DIV_SUPPORTED 0x0200
43
25struct bootrec_exp_if { 44struct bootrec_exp_if {
26 __le16 role; 45 __le16 role;
27 __le16 if_id; 46 __le16 if_id;
@@ -30,6 +49,13 @@ struct bootrec_exp_if {
30 __le16 top_compat; 49 __le16 top_compat;
31} __attribute__((packed)); 50} __attribute__((packed));
32 51
52#define BR_DESC_PRIV_CAP_WEP BIT(0)
53#define BR_DESC_PRIV_CAP_TKIP BIT(1)
54#define BR_DESC_PRIV_CAP_MICHAEL BIT(2)
55#define BR_DESC_PRIV_CAP_CCX_CP BIT(3)
56#define BR_DESC_PRIV_CAP_CCX_MIC BIT(4)
57#define BR_DESC_PRIV_CAP_AESCCMP BIT(5)
58
33struct bootrec_desc { 59struct bootrec_desc {
34 __le16 modes; 60 __le16 modes;
35 __le16 flags; 61 __le16 flags;
@@ -37,8 +63,15 @@ struct bootrec_desc {
37 __le32 rx_end; 63 __le32 rx_end;
38 u8 headroom; 64 u8 headroom;
39 u8 tailroom; 65 u8 tailroom;
40 u8 unimportant[6]; 66 u8 tx_queues;
67 u8 tx_depth;
68 u8 privacy_caps;
69 u8 rx_keycache_size;
70 u8 time_size;
71 u8 padding;
41 u8 rates[16]; 72 u8 rates[16];
73 u8 padding2[4];
74 __le16 rx_mtu;
42} __attribute__((packed)); 75} __attribute__((packed));
43 76
44#define BR_CODE_MIN 0x80000000 77#define BR_CODE_MIN 0x80000000
@@ -51,6 +84,31 @@ struct bootrec_desc {
51#define BR_CODE_END_OF_BRA 0xFF0000FF 84#define BR_CODE_END_OF_BRA 0xFF0000FF
52#define LEGACY_BR_CODE_END_OF_BRA 0xFFFFFFFF 85#define LEGACY_BR_CODE_END_OF_BRA 0xFFFFFFFF
53 86
87#define P54_HDR_FLAG_DATA_ALIGN BIT(14)
88#define P54_HDR_FLAG_DATA_OUT_PROMISC BIT(0)
89#define P54_HDR_FLAG_DATA_OUT_TIMESTAMP BIT(1)
90#define P54_HDR_FLAG_DATA_OUT_SEQNR BIT(2)
91#define P54_HDR_FLAG_DATA_OUT_BIT3 BIT(3)
92#define P54_HDR_FLAG_DATA_OUT_BURST BIT(4)
93#define P54_HDR_FLAG_DATA_OUT_NOCANCEL BIT(5)
94#define P54_HDR_FLAG_DATA_OUT_CLEARTIM BIT(6)
95#define P54_HDR_FLAG_DATA_OUT_HITCHHIKE BIT(7)
96#define P54_HDR_FLAG_DATA_OUT_COMPRESS BIT(8)
97#define P54_HDR_FLAG_DATA_OUT_CONCAT BIT(9)
98#define P54_HDR_FLAG_DATA_OUT_PCS_ACCEPT BIT(10)
99#define P54_HDR_FLAG_DATA_OUT_WAITEOSP BIT(11)
100
101#define P54_HDR_FLAG_DATA_IN_FCS_GOOD BIT(0)
102#define P54_HDR_FLAG_DATA_IN_MATCH_MAC BIT(1)
103#define P54_HDR_FLAG_DATA_IN_MCBC BIT(2)
104#define P54_HDR_FLAG_DATA_IN_BEACON BIT(3)
105#define P54_HDR_FLAG_DATA_IN_MATCH_BSS BIT(4)
106#define P54_HDR_FLAG_DATA_IN_BCAST_BSS BIT(5)
107#define P54_HDR_FLAG_DATA_IN_DATA BIT(6)
108#define P54_HDR_FLAG_DATA_IN_TRUNCATED BIT(7)
109#define P54_HDR_FLAG_DATA_IN_BIT8 BIT(8)
110#define P54_HDR_FLAG_DATA_IN_TRANSPARENT BIT(9)
111
54/* PDA defines are Copyright (C) 2005 Nokia Corporation (taken from islsm_pda.h) */ 112/* PDA defines are Copyright (C) 2005 Nokia Corporation (taken from islsm_pda.h) */
55 113
56struct pda_entry { 114struct pda_entry {
@@ -117,6 +175,11 @@ struct pda_pa_curve_data {
117 u8 data[0]; 175 u8 data[0];
118} __attribute__ ((packed)); 176} __attribute__ ((packed));
119 177
178struct pda_rssi_cal_entry {
179 __le16 mul;
180 __le16 add;
181} __attribute__ ((packed));
182
120/* 183/*
121 * this defines the PDR codes used to build PDAs as defined in document 184 * this defines the PDR codes used to build PDAs as defined in document
122 * number 553155. The current implementation mirrors version 1.1 of the 185 * number 553155. The current implementation mirrors version 1.1 of the
@@ -165,6 +228,19 @@ struct pda_pa_curve_data {
165#define PDR_BASEBAND_REGISTERS 0x8000 228#define PDR_BASEBAND_REGISTERS 0x8000
166#define PDR_PER_CHANNEL_BASEBAND_REGISTERS 0x8001 229#define PDR_PER_CHANNEL_BASEBAND_REGISTERS 0x8001
167 230
231/* PDR definitions for default country & country list */
232#define PDR_COUNTRY_CERT_CODE 0x80
233#define PDR_COUNTRY_CERT_CODE_REAL 0x00
234#define PDR_COUNTRY_CERT_CODE_PSEUDO 0x80
235#define PDR_COUNTRY_CERT_BAND 0x40
236#define PDR_COUNTRY_CERT_BAND_2GHZ 0x00
237#define PDR_COUNTRY_CERT_BAND_5GHZ 0x40
238#define PDR_COUNTRY_CERT_IODOOR 0x30
239#define PDR_COUNTRY_CERT_IODOOR_BOTH 0x00
240#define PDR_COUNTRY_CERT_IODOOR_INDOOR 0x20
241#define PDR_COUNTRY_CERT_IODOOR_OUTDOOR 0x30
242#define PDR_COUNTRY_CERT_INDEX 0x0F
243
168/* stored in skb->cb */ 244/* stored in skb->cb */
169struct memrecord { 245struct memrecord {
170 u32 start_addr; 246 u32 start_addr;
@@ -172,41 +248,108 @@ struct memrecord {
172}; 248};
173 249
174struct p54_eeprom_lm86 { 250struct p54_eeprom_lm86 {
175 __le16 offset; 251 union {
176 __le16 len; 252 struct {
177 u8 data[0]; 253 __le16 offset;
254 __le16 len;
255 u8 data[0];
256 } v1;
257 struct {
258 __le32 offset;
259 __le16 len;
260 u8 magic2;
261 u8 pad;
262 u8 magic[4];
263 u8 data[0];
264 } v2;
265 } __attribute__ ((packed));
178} __attribute__ ((packed)); 266} __attribute__ ((packed));
179 267
180struct p54_rx_hdr { 268enum p54_rx_decrypt_status {
181 __le16 magic; 269 P54_DECRYPT_NONE = 0,
270 P54_DECRYPT_OK,
271 P54_DECRYPT_NOKEY,
272 P54_DECRYPT_NOMICHAEL,
273 P54_DECRYPT_NOCKIPMIC,
274 P54_DECRYPT_FAIL_WEP,
275 P54_DECRYPT_FAIL_TKIP,
276 P54_DECRYPT_FAIL_MICHAEL,
277 P54_DECRYPT_FAIL_CKIPKP,
278 P54_DECRYPT_FAIL_CKIPMIC,
279 P54_DECRYPT_FAIL_AESCCMP
280};
281
282struct p54_rx_data {
283 __le16 flags;
182 __le16 len; 284 __le16 len;
183 __le16 freq; 285 __le16 freq;
184 u8 antenna; 286 u8 antenna;
185 u8 rate; 287 u8 rate;
186 u8 rssi; 288 u8 rssi;
187 u8 quality; 289 u8 quality;
188 u16 unknown2; 290 u8 decrypt_status;
291 u8 rssi_raw;
189 __le32 tsf32; 292 __le32 tsf32;
190 __le32 unalloc0; 293 __le32 unalloc0;
191 u8 align[0]; 294 u8 align[0];
192} __attribute__ ((packed)); 295} __attribute__ ((packed));
193 296
194struct p54_frame_sent_hdr { 297enum p54_trap_type {
298 P54_TRAP_SCAN = 0,
299 P54_TRAP_TIMER,
300 P54_TRAP_BEACON_TX,
301 P54_TRAP_FAA_RADIO_ON,
302 P54_TRAP_FAA_RADIO_OFF,
303 P54_TRAP_RADAR,
304 P54_TRAP_NO_BEACON,
305 P54_TRAP_TBTT,
306 P54_TRAP_SCO_ENTER,
307 P54_TRAP_SCO_EXIT
308};
309
310struct p54_trap {
311 __le16 event;
312 __le16 frequency;
313} __attribute__ ((packed));
314
315enum p54_frame_sent_status {
316 P54_TX_OK = 0,
317 P54_TX_FAILED,
318 P54_TX_PSM,
319 P54_TX_PSM_CANCELLED = 4
320};
321
322struct p54_frame_sent {
195 u8 status; 323 u8 status;
196 u8 retries; 324 u8 tries;
197 __le16 ack_rssi; 325 u8 ack_rssi;
326 u8 quality;
198 __le16 seq; 327 __le16 seq;
199 u16 rate; 328 u8 antenna;
329 u8 padding;
200} __attribute__ ((packed)); 330} __attribute__ ((packed));
201 331
202struct p54_tx_control_allocdata { 332enum p54_tx_data_crypt {
333 P54_CRYPTO_NONE = 0,
334 P54_CRYPTO_WEP,
335 P54_CRYPTO_TKIP,
336 P54_CRYPTO_TKIPMICHAEL,
337 P54_CRYPTO_CCX_WEPMIC,
338 P54_CRYPTO_CCX_KPMIC,
339 P54_CRYPTO_CCX_KP,
340 P54_CRYPTO_AESCCMP
341};
342
343struct p54_tx_data {
203 u8 rateset[8]; 344 u8 rateset[8];
204 u8 unalloc0[2]; 345 u8 rts_rate_idx;
346 u8 crypt_offset;
205 u8 key_type; 347 u8 key_type;
206 u8 key_len; 348 u8 key_len;
207 u8 key[16]; 349 u8 key[16];
208 u8 hw_queue; 350 u8 hw_queue;
209 u8 unalloc1[9]; 351 u8 backlog;
352 __le16 durations[4];
210 u8 tx_antenna; 353 u8 tx_antenna;
211 u8 output_power; 354 u8 output_power;
212 u8 cts_rate; 355 u8 cts_rate;
@@ -214,8 +357,23 @@ struct p54_tx_control_allocdata {
214 u8 align[0]; 357 u8 align[0];
215} __attribute__ ((packed)); 358} __attribute__ ((packed));
216 359
217struct p54_tx_control_filter { 360/* unit is ms */
218 __le16 filter_type; 361#define P54_TX_FRAME_LIFETIME 2000
362#define P54_TX_TIMEOUT 4000
363#define P54_STATISTICS_UPDATE 5000
364
365#define P54_FILTER_TYPE_NONE 0
366#define P54_FILTER_TYPE_STATION BIT(0)
367#define P54_FILTER_TYPE_IBSS BIT(1)
368#define P54_FILTER_TYPE_AP BIT(2)
369#define P54_FILTER_TYPE_TRANSPARENT BIT(3)
370#define P54_FILTER_TYPE_PROMISCUOUS BIT(4)
371#define P54_FILTER_TYPE_HIBERNATE BIT(5)
372#define P54_FILTER_TYPE_NOACK BIT(6)
373#define P54_FILTER_TYPE_RX_DISABLED BIT(7)
374
375struct p54_setup_mac {
376 __le16 mac_mode;
219 u8 mac_addr[ETH_ALEN]; 377 u8 mac_addr[ETH_ALEN];
220 u8 bssid[ETH_ALEN]; 378 u8 bssid[ETH_ALEN];
221 u8 rx_antenna; 379 u8 rx_antenna;
@@ -235,17 +393,29 @@ struct p54_tx_control_filter {
235 __le16 max_rx; 393 __le16 max_rx;
236 __le16 rxhw; 394 __le16 rxhw;
237 __le16 timer; 395 __le16 timer;
238 __le16 unalloc0; 396 __le16 truncate;
239 __le32 unalloc1; 397 __le32 basic_rate_mask;
398 u8 sbss_offset;
399 u8 mcast_window;
400 u8 rx_rssi_threshold;
401 u8 rx_ed_threshold;
402 __le32 ref_clock;
403 __le16 lpf_bandwidth;
404 __le16 osc_start_delay;
240 } v2 __attribute__ ((packed)); 405 } v2 __attribute__ ((packed));
241 } __attribute__ ((packed)); 406 } __attribute__ ((packed));
242} __attribute__ ((packed)); 407} __attribute__ ((packed));
243 408
244#define P54_TX_CONTROL_FILTER_V1_LEN (sizeof(struct p54_tx_control_filter)) 409#define P54_SETUP_V1_LEN 40
245#define P54_TX_CONTROL_FILTER_V2_LEN (sizeof(struct p54_tx_control_filter)-8) 410#define P54_SETUP_V2_LEN (sizeof(struct p54_setup_mac))
246 411
247struct p54_tx_control_channel { 412#define P54_SCAN_EXIT BIT(0)
248 __le16 flags; 413#define P54_SCAN_TRAP BIT(1)
414#define P54_SCAN_ACTIVE BIT(2)
415#define P54_SCAN_FILTER BIT(3)
416
417struct p54_scan {
418 __le16 mode;
249 __le16 dwell; 419 __le16 dwell;
250 u8 padding1[20]; 420 u8 padding1[20];
251 struct pda_iq_autocal_entry iq_autocal; 421 struct pda_iq_autocal_entry iq_autocal;
@@ -261,45 +431,35 @@ struct p54_tx_control_channel {
261 u8 dup_16qam; 431 u8 dup_16qam;
262 u8 dup_64qam; 432 u8 dup_64qam;
263 union { 433 union {
264 struct { 434 struct pda_rssi_cal_entry v1_rssi;
265 __le16 rssical_mul;
266 __le16 rssical_add;
267 } v1 __attribute__ ((packed));
268 435
269 struct { 436 struct {
270 __le32 basic_rate_mask; 437 __le32 basic_rate_mask;
271 u8 rts_rates[8]; 438 u8 rts_rates[8];
272 __le16 rssical_mul; 439 struct pda_rssi_cal_entry rssi;
273 __le16 rssical_add;
274 } v2 __attribute__ ((packed)); 440 } v2 __attribute__ ((packed));
275 } __attribute__ ((packed)); 441 } __attribute__ ((packed));
276} __attribute__ ((packed)); 442} __attribute__ ((packed));
277 443
278#define P54_TX_CONTROL_CHANNEL_V1_LEN (sizeof(struct p54_tx_control_channel)-12) 444#define P54_SCAN_V1_LEN 0x70
279#define P54_TX_CONTROL_CHANNEL_V2_LEN (sizeof(struct p54_tx_control_channel)) 445#define P54_SCAN_V2_LEN 0x7c
280 446
281struct p54_tx_control_led { 447struct p54_led {
282 __le16 mode; 448 __le16 mode;
283 __le16 led_temporary; 449 __le16 led_temporary;
284 __le16 led_permanent; 450 __le16 led_permanent;
285 __le16 duration; 451 __le16 duration;
286} __attribute__ ((packed)); 452} __attribute__ ((packed));
287 453
288struct p54_tx_vdcf_queues { 454struct p54_edcf {
289 __le16 aifs; 455 u8 flags;
290 __le16 cwmin;
291 __le16 cwmax;
292 __le16 txop;
293} __attribute__ ((packed));
294
295struct p54_tx_control_vdcf {
296 u8 padding;
297 u8 slottime; 456 u8 slottime;
298 u8 magic1; 457 u8 sifs;
299 u8 magic2; 458 u8 eofpad;
300 struct p54_tx_vdcf_queues queue[8]; 459 struct p54_edcf_queue_param queue[8];
301 u8 pad2[4]; 460 u8 mapping[4];
302 __le16 frameburst; 461 __le16 frameburst;
462 __le16 round_trip_delay;
303} __attribute__ ((packed)); 463} __attribute__ ((packed));
304 464
305struct p54_statistics { 465struct p54_statistics {
@@ -312,14 +472,103 @@ struct p54_statistics {
312 __le32 tsf32; 472 __le32 tsf32;
313 __le32 airtime; 473 __le32 airtime;
314 __le32 noise; 474 __le32 noise;
315 __le32 unkn[10]; /* CCE / CCA / RADAR */ 475 __le32 sample_noise[8];
476 __le32 sample_cca;
477 __le32 sample_tx;
316} __attribute__ ((packed)); 478} __attribute__ ((packed));
317 479
318struct p54_tx_control_xbow_synth { 480struct p54_xbow_synth {
319 __le16 magic1; 481 __le16 magic1;
320 __le16 magic2; 482 __le16 magic2;
321 __le16 freq; 483 __le16 freq;
322 u32 padding[5]; 484 u32 padding[5];
323} __attribute__ ((packed)); 485} __attribute__ ((packed));
324 486
487struct p54_timer {
488 __le32 interval;
489} __attribute__ ((packed));
490
491struct p54_keycache {
492 u8 entry;
493 u8 key_id;
494 u8 mac[ETH_ALEN];
495 u8 padding[2];
496 u8 key_type;
497 u8 key_len;
498 u8 key[24];
499} __attribute__ ((packed));
500
501struct p54_burst {
502 u8 flags;
503 u8 queue;
504 u8 backlog;
505 u8 pad;
506 __le16 durations[32];
507} __attribute__ ((packed));
508
509struct p54_psm_interval {
510 __le16 interval;
511 __le16 periods;
512} __attribute__ ((packed));
513
514#define P54_PSM BIT(0)
515#define P54_PSM_DTIM BIT(1)
516#define P54_PSM_MCBC BIT(2)
517#define P54_PSM_CHECKSUM BIT(3)
518#define P54_PSM_SKIP_MORE_DATA BIT(4)
519#define P54_PSM_BEACON_TIMEOUT BIT(5)
520#define P54_PSM_HFOSLEEP BIT(6)
521#define P54_PSM_AUTOSWITCH_SLEEP BIT(7)
522#define P54_PSM_LPIT BIT(8)
523#define P54_PSM_BF_UCAST_SKIP BIT(9)
524#define P54_PSM_BF_MCAST_SKIP BIT(10)
525
526struct p54_psm {
527 __le16 mode;
528 __le16 aid;
529 struct p54_psm_interval intervals[4];
530 u8 beacon_rssi_skip_max;
531 u8 rssi_delta_threshold;
532 u8 nr;
533 u8 exclude[1];
534} __attribute__ ((packed));
535
536#define MC_FILTER_ADDRESS_NUM 4
537
538struct p54_group_address_table {
539 __le16 filter_enable;
540 __le16 num_address;
541 u8 mac_list[MC_FILTER_ADDRESS_NUM][ETH_ALEN];
542} __attribute__ ((packed));
543
544struct p54_txcancel {
545 __le32 req_id;
546} __attribute__ ((packed));
547
548struct p54_sta_unlock {
549 u8 addr[ETH_ALEN];
550 u16 padding;
551} __attribute__ ((packed));
552
553#define P54_TIM_CLEAR BIT(15)
554struct p54_tim {
555 u8 count;
556 u8 padding[3];
557 __le16 entry[8];
558} __attribute__ ((packed));
559
560struct p54_cce_quiet {
561 __le32 period;
562} __attribute__ ((packed));
563
564struct p54_bt_balancer {
565 __le16 prio_thresh;
566 __le16 acl_thresh;
567} __attribute__ ((packed));
568
569struct p54_arp_table {
570 __le16 filter_enable;
571 u8 ipv4_addr[4];
572} __attribute__ ((packed));
573
325#endif /* P54COMMON_H */ 574#endif /* P54COMMON_H */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 88b3cad8b65e..aa367a0ddc49 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -28,6 +28,7 @@ MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
28MODULE_DESCRIPTION("Prism54 PCI wireless driver"); 28MODULE_DESCRIPTION("Prism54 PCI wireless driver");
29MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
30MODULE_ALIAS("prism54pci"); 30MODULE_ALIAS("prism54pci");
31MODULE_FIRMWARE("isl3886pci");
31 32
32static struct pci_device_id p54p_table[] __devinitdata = { 33static struct pci_device_id p54p_table[] __devinitdata = {
33 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ 34 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
@@ -46,7 +47,6 @@ MODULE_DEVICE_TABLE(pci, p54p_table);
46static int p54p_upload_firmware(struct ieee80211_hw *dev) 47static int p54p_upload_firmware(struct ieee80211_hw *dev)
47{ 48{
48 struct p54p_priv *priv = dev->priv; 49 struct p54p_priv *priv = dev->priv;
49 const struct firmware *fw_entry = NULL;
50 __le32 reg; 50 __le32 reg;
51 int err; 51 int err;
52 __le32 *data; 52 __le32 *data;
@@ -72,21 +72,15 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
72 P54P_WRITE(ctrl_stat, reg); 72 P54P_WRITE(ctrl_stat, reg);
73 wmb(); 73 wmb();
74 74
75 err = request_firmware(&fw_entry, "isl3886", &priv->pdev->dev); 75 /* wait for the firmware to reset properly */
76 if (err) { 76 mdelay(10);
77 printk(KERN_ERR "%s (p54pci): cannot find firmware "
78 "(isl3886)\n", pci_name(priv->pdev));
79 return err;
80 }
81 77
82 err = p54_parse_firmware(dev, fw_entry); 78 err = p54_parse_firmware(dev, priv->firmware);
83 if (err) { 79 if (err)
84 release_firmware(fw_entry);
85 return err; 80 return err;
86 }
87 81
88 data = (__le32 *) fw_entry->data; 82 data = (__le32 *) priv->firmware->data;
89 remains = fw_entry->size; 83 remains = priv->firmware->size;
90 device_addr = ISL38XX_DEV_FIRMWARE_ADDR; 84 device_addr = ISL38XX_DEV_FIRMWARE_ADDR;
91 while (remains) { 85 while (remains) {
92 u32 i = 0; 86 u32 i = 0;
@@ -104,8 +98,6 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
104 P54P_READ(int_enable); 98 P54P_READ(int_enable);
105 } 99 }
106 100
107 release_firmware(fw_entry);
108
109 reg = P54P_READ(ctrl_stat); 101 reg = P54P_READ(ctrl_stat);
110 reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); 102 reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN);
111 reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); 103 reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
@@ -235,7 +227,9 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
235 227
236 while (i != idx) { 228 while (i != idx) {
237 desc = &ring[i]; 229 desc = &ring[i];
238 kfree(tx_buf[i]); 230 if (tx_buf[i])
231 if (FREE_AFTER_TX((struct sk_buff *) tx_buf[i]))
232 p54_free_skb(dev, tx_buf[i]);
239 tx_buf[i] = NULL; 233 tx_buf[i] = NULL;
240 234
241 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), 235 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
@@ -306,8 +300,7 @@ static irqreturn_t p54p_interrupt(int irq, void *dev_id)
306 return reg ? IRQ_HANDLED : IRQ_NONE; 300 return reg ? IRQ_HANDLED : IRQ_NONE;
307} 301}
308 302
309static void p54p_tx(struct ieee80211_hw *dev, struct p54_control_hdr *data, 303static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
310 size_t len, int free_on_tx)
311{ 304{
312 struct p54p_priv *priv = dev->priv; 305 struct p54p_priv *priv = dev->priv;
313 struct p54p_ring_control *ring_control = priv->ring_control; 306 struct p54p_ring_control *ring_control = priv->ring_control;
@@ -322,28 +315,21 @@ static void p54p_tx(struct ieee80211_hw *dev, struct p54_control_hdr *data,
322 idx = le32_to_cpu(ring_control->host_idx[1]); 315 idx = le32_to_cpu(ring_control->host_idx[1]);
323 i = idx % ARRAY_SIZE(ring_control->tx_data); 316 i = idx % ARRAY_SIZE(ring_control->tx_data);
324 317
325 mapping = pci_map_single(priv->pdev, data, len, PCI_DMA_TODEVICE); 318 priv->tx_buf_data[i] = skb;
319 mapping = pci_map_single(priv->pdev, skb->data, skb->len,
320 PCI_DMA_TODEVICE);
326 desc = &ring_control->tx_data[i]; 321 desc = &ring_control->tx_data[i];
327 desc->host_addr = cpu_to_le32(mapping); 322 desc->host_addr = cpu_to_le32(mapping);
328 desc->device_addr = data->req_id; 323 desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
329 desc->len = cpu_to_le16(len); 324 desc->len = cpu_to_le16(skb->len);
330 desc->flags = 0; 325 desc->flags = 0;
331 326
332 wmb(); 327 wmb();
333 ring_control->host_idx[1] = cpu_to_le32(idx + 1); 328 ring_control->host_idx[1] = cpu_to_le32(idx + 1);
334
335 if (free_on_tx)
336 priv->tx_buf_data[i] = data;
337
338 spin_unlock_irqrestore(&priv->lock, flags); 329 spin_unlock_irqrestore(&priv->lock, flags);
339 330
340 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); 331 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
341 P54P_READ(dev_int); 332 P54P_READ(dev_int);
342
343 /* FIXME: unlikely to happen because the device usually runs out of
344 memory before we fill the ring up, but we can make it impossible */
345 if (idx - device_idx > ARRAY_SIZE(ring_control->tx_data) - 2)
346 printk(KERN_INFO "%s: tx overflow.\n", wiphy_name(dev->wiphy));
347} 333}
348 334
349static void p54p_stop(struct ieee80211_hw *dev) 335static void p54p_stop(struct ieee80211_hw *dev)
@@ -393,7 +379,7 @@ static void p54p_stop(struct ieee80211_hw *dev)
393 le16_to_cpu(desc->len), 379 le16_to_cpu(desc->len),
394 PCI_DMA_TODEVICE); 380 PCI_DMA_TODEVICE);
395 381
396 kfree(priv->tx_buf_data[i]); 382 p54_free_skb(dev, priv->tx_buf_data[i]);
397 priv->tx_buf_data[i] = NULL; 383 priv->tx_buf_data[i] = NULL;
398 } 384 }
399 385
@@ -405,7 +391,7 @@ static void p54p_stop(struct ieee80211_hw *dev)
405 le16_to_cpu(desc->len), 391 le16_to_cpu(desc->len),
406 PCI_DMA_TODEVICE); 392 PCI_DMA_TODEVICE);
407 393
408 kfree(priv->tx_buf_mgmt[i]); 394 p54_free_skb(dev, priv->tx_buf_mgmt[i]);
409 priv->tx_buf_mgmt[i] = NULL; 395 priv->tx_buf_mgmt[i] = NULL;
410 } 396 }
411 397
@@ -481,7 +467,6 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
481 struct ieee80211_hw *dev; 467 struct ieee80211_hw *dev;
482 unsigned long mem_addr, mem_len; 468 unsigned long mem_addr, mem_len;
483 int err; 469 int err;
484 DECLARE_MAC_BUF(mac);
485 470
486 err = pci_enable_device(pdev); 471 err = pci_enable_device(pdev);
487 if (err) { 472 if (err) {
@@ -495,15 +480,14 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
495 if (mem_len < sizeof(struct p54p_csr)) { 480 if (mem_len < sizeof(struct p54p_csr)) {
496 printk(KERN_ERR "%s (p54pci): Too short PCI resources\n", 481 printk(KERN_ERR "%s (p54pci): Too short PCI resources\n",
497 pci_name(pdev)); 482 pci_name(pdev));
498 pci_disable_device(pdev); 483 goto err_disable_dev;
499 return err;
500 } 484 }
501 485
502 err = pci_request_regions(pdev, "p54pci"); 486 err = pci_request_regions(pdev, "p54pci");
503 if (err) { 487 if (err) {
504 printk(KERN_ERR "%s (p54pci): Cannot obtain PCI resources\n", 488 printk(KERN_ERR "%s (p54pci): Cannot obtain PCI resources\n",
505 pci_name(pdev)); 489 pci_name(pdev));
506 return err; 490 goto err_disable_dev;
507 } 491 }
508 492
509 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || 493 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
@@ -556,6 +540,17 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
556 spin_lock_init(&priv->lock); 540 spin_lock_init(&priv->lock);
557 tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev); 541 tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);
558 542
543 err = request_firmware(&priv->firmware, "isl3886pci",
544 &priv->pdev->dev);
545 if (err) {
546 printk(KERN_ERR "%s (p54pci): cannot find firmware "
547 "(isl3886pci)\n", pci_name(priv->pdev));
548 err = request_firmware(&priv->firmware, "isl3886",
549 &priv->pdev->dev);
550 if (err)
551 goto err_free_common;
552 }
553
559 err = p54p_open(dev); 554 err = p54p_open(dev);
560 if (err) 555 if (err)
561 goto err_free_common; 556 goto err_free_common;
@@ -574,6 +569,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
574 return 0; 569 return 0;
575 570
576 err_free_common: 571 err_free_common:
572 release_firmware(priv->firmware);
577 p54_free_common(dev); 573 p54_free_common(dev);
578 pci_free_consistent(pdev, sizeof(*priv->ring_control), 574 pci_free_consistent(pdev, sizeof(*priv->ring_control),
579 priv->ring_control, priv->ring_control_dma); 575 priv->ring_control, priv->ring_control_dma);
@@ -587,6 +583,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
587 583
588 err_free_reg: 584 err_free_reg:
589 pci_release_regions(pdev); 585 pci_release_regions(pdev);
586 err_disable_dev:
590 pci_disable_device(pdev); 587 pci_disable_device(pdev);
591 return err; 588 return err;
592} 589}
@@ -601,6 +598,7 @@ static void __devexit p54p_remove(struct pci_dev *pdev)
601 598
602 ieee80211_unregister_hw(dev); 599 ieee80211_unregister_hw(dev);
603 priv = dev->priv; 600 priv = dev->priv;
601 release_firmware(priv->firmware);
604 pci_free_consistent(pdev, sizeof(*priv->ring_control), 602 pci_free_consistent(pdev, sizeof(*priv->ring_control),
605 priv->ring_control, priv->ring_control_dma); 603 priv->ring_control, priv->ring_control_dma);
606 p54_free_common(dev); 604 p54_free_common(dev);
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 4a6778070afc..fbb683953fb2 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -93,7 +93,7 @@ struct p54p_priv {
93 struct pci_dev *pdev; 93 struct pci_dev *pdev;
94 struct p54p_csr __iomem *map; 94 struct p54p_csr __iomem *map;
95 struct tasklet_struct rx_tasklet; 95 struct tasklet_struct rx_tasklet;
96 96 const struct firmware *firmware;
97 spinlock_t lock; 97 spinlock_t lock;
98 struct p54p_ring_control *ring_control; 98 struct p54p_ring_control *ring_control;
99 dma_addr_t ring_control_dma; 99 dma_addr_t ring_control_dma;
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 75d749bccb0d..c44a200059d2 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -28,6 +28,8 @@ MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
28MODULE_DESCRIPTION("Prism54 USB wireless driver"); 28MODULE_DESCRIPTION("Prism54 USB wireless driver");
29MODULE_LICENSE("GPL"); 29MODULE_LICENSE("GPL");
30MODULE_ALIAS("prism54usb"); 30MODULE_ALIAS("prism54usb");
31MODULE_FIRMWARE("isl3886usb");
32MODULE_FIRMWARE("isl3887usb");
31 33
32static struct usb_device_id p54u_table[] __devinitdata = { 34static struct usb_device_id p54u_table[] __devinitdata = {
33 /* Version 1 devices (pci chip + net2280) */ 35 /* Version 1 devices (pci chip + net2280) */
@@ -84,13 +86,13 @@ static void p54u_rx_cb(struct urb *urb)
84 struct ieee80211_hw *dev = info->dev; 86 struct ieee80211_hw *dev = info->dev;
85 struct p54u_priv *priv = dev->priv; 87 struct p54u_priv *priv = dev->priv;
86 88
89 skb_unlink(skb, &priv->rx_queue);
90
87 if (unlikely(urb->status)) { 91 if (unlikely(urb->status)) {
88 info->urb = NULL; 92 dev_kfree_skb_irq(skb);
89 usb_free_urb(urb);
90 return; 93 return;
91 } 94 }
92 95
93 skb_unlink(skb, &priv->rx_queue);
94 skb_put(skb, urb->actual_length); 96 skb_put(skb, urb->actual_length);
95 97
96 if (priv->hw_type == P54U_NET2280) 98 if (priv->hw_type == P54U_NET2280)
@@ -103,7 +105,6 @@ static void p54u_rx_cb(struct urb *urb)
103 if (p54_rx(dev, skb)) { 105 if (p54_rx(dev, skb)) {
104 skb = dev_alloc_skb(priv->common.rx_mtu + 32); 106 skb = dev_alloc_skb(priv->common.rx_mtu + 32);
105 if (unlikely(!skb)) { 107 if (unlikely(!skb)) {
106 usb_free_urb(urb);
107 /* TODO check rx queue length and refill *somewhere* */ 108 /* TODO check rx queue length and refill *somewhere* */
108 return; 109 return;
109 } 110 }
@@ -113,7 +114,6 @@ static void p54u_rx_cb(struct urb *urb)
113 info->dev = dev; 114 info->dev = dev;
114 urb->transfer_buffer = skb_tail_pointer(skb); 115 urb->transfer_buffer = skb_tail_pointer(skb);
115 urb->context = skb; 116 urb->context = skb;
116 skb_queue_tail(&priv->rx_queue, skb);
117 } else { 117 } else {
118 if (priv->hw_type == P54U_NET2280) 118 if (priv->hw_type == P54U_NET2280)
119 skb_push(skb, priv->common.tx_hdr_len); 119 skb_push(skb, priv->common.tx_hdr_len);
@@ -128,40 +128,56 @@ static void p54u_rx_cb(struct urb *urb)
128 WARN_ON(1); 128 WARN_ON(1);
129 urb->transfer_buffer = skb_tail_pointer(skb); 129 urb->transfer_buffer = skb_tail_pointer(skb);
130 } 130 }
131
132 skb_queue_tail(&priv->rx_queue, skb);
133 } 131 }
134 132 skb_queue_tail(&priv->rx_queue, skb);
135 usb_submit_urb(urb, GFP_ATOMIC); 133 usb_anchor_urb(urb, &priv->submitted);
134 if (usb_submit_urb(urb, GFP_ATOMIC)) {
135 skb_unlink(skb, &priv->rx_queue);
136 usb_unanchor_urb(urb);
137 dev_kfree_skb_irq(skb);
138 }
136} 139}
137 140
138static void p54u_tx_cb(struct urb *urb) 141static void p54u_tx_cb(struct urb *urb)
139{ 142{
140 usb_free_urb(urb); 143 struct sk_buff *skb = urb->context;
144 struct ieee80211_hw *dev = (struct ieee80211_hw *)
145 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
146 struct p54u_priv *priv = dev->priv;
147
148 skb_pull(skb, priv->common.tx_hdr_len);
149 if (FREE_AFTER_TX(skb))
150 p54_free_skb(dev, skb);
141} 151}
142 152
143static void p54u_tx_free_cb(struct urb *urb) 153static void p54u_tx_dummy_cb(struct urb *urb) { }
154
155static void p54u_free_urbs(struct ieee80211_hw *dev)
144{ 156{
145 kfree(urb->transfer_buffer); 157 struct p54u_priv *priv = dev->priv;
146 usb_free_urb(urb); 158 usb_kill_anchored_urbs(&priv->submitted);
147} 159}
148 160
149static int p54u_init_urbs(struct ieee80211_hw *dev) 161static int p54u_init_urbs(struct ieee80211_hw *dev)
150{ 162{
151 struct p54u_priv *priv = dev->priv; 163 struct p54u_priv *priv = dev->priv;
152 struct urb *entry; 164 struct urb *entry = NULL;
153 struct sk_buff *skb; 165 struct sk_buff *skb;
154 struct p54u_rx_info *info; 166 struct p54u_rx_info *info;
167 int ret = 0;
155 168
156 while (skb_queue_len(&priv->rx_queue) < 32) { 169 while (skb_queue_len(&priv->rx_queue) < 32) {
157 skb = __dev_alloc_skb(priv->common.rx_mtu + 32, GFP_KERNEL); 170 skb = __dev_alloc_skb(priv->common.rx_mtu + 32, GFP_KERNEL);
158 if (!skb) 171 if (!skb) {
159 break; 172 ret = -ENOMEM;
173 goto err;
174 }
160 entry = usb_alloc_urb(0, GFP_KERNEL); 175 entry = usb_alloc_urb(0, GFP_KERNEL);
161 if (!entry) { 176 if (!entry) {
162 kfree_skb(skb); 177 ret = -ENOMEM;
163 break; 178 goto err;
164 } 179 }
180
165 usb_fill_bulk_urb(entry, priv->udev, 181 usb_fill_bulk_urb(entry, priv->udev,
166 usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), 182 usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA),
167 skb_tail_pointer(skb), 183 skb_tail_pointer(skb),
@@ -170,33 +186,32 @@ static int p54u_init_urbs(struct ieee80211_hw *dev)
170 info->urb = entry; 186 info->urb = entry;
171 info->dev = dev; 187 info->dev = dev;
172 skb_queue_tail(&priv->rx_queue, skb); 188 skb_queue_tail(&priv->rx_queue, skb);
173 usb_submit_urb(entry, GFP_KERNEL); 189
190 usb_anchor_urb(entry, &priv->submitted);
191 ret = usb_submit_urb(entry, GFP_KERNEL);
192 if (ret) {
193 skb_unlink(skb, &priv->rx_queue);
194 usb_unanchor_urb(entry);
195 goto err;
196 }
197 usb_free_urb(entry);
198 entry = NULL;
174 } 199 }
175 200
176 return 0; 201 return 0;
177}
178
179static void p54u_free_urbs(struct ieee80211_hw *dev)
180{
181 struct p54u_priv *priv = dev->priv;
182 struct p54u_rx_info *info;
183 struct sk_buff *skb;
184 202
185 while ((skb = skb_dequeue(&priv->rx_queue))) { 203 err:
186 info = (struct p54u_rx_info *) skb->cb; 204 usb_free_urb(entry);
187 if (!info->urb) 205 kfree_skb(skb);
188 continue; 206 p54u_free_urbs(dev);
189 207 return ret;
190 usb_kill_urb(info->urb);
191 kfree_skb(skb);
192 }
193} 208}
194 209
195static void p54u_tx_3887(struct ieee80211_hw *dev, struct p54_control_hdr *data, 210static void p54u_tx_3887(struct ieee80211_hw *dev, struct sk_buff *skb)
196 size_t len, int free_on_tx)
197{ 211{
198 struct p54u_priv *priv = dev->priv; 212 struct p54u_priv *priv = dev->priv;
199 struct urb *addr_urb, *data_urb; 213 struct urb *addr_urb, *data_urb;
214 int err = 0;
200 215
201 addr_urb = usb_alloc_urb(0, GFP_ATOMIC); 216 addr_urb = usb_alloc_urb(0, GFP_ATOMIC);
202 if (!addr_urb) 217 if (!addr_urb)
@@ -209,59 +224,85 @@ static void p54u_tx_3887(struct ieee80211_hw *dev, struct p54_control_hdr *data,
209 } 224 }
210 225
211 usb_fill_bulk_urb(addr_urb, priv->udev, 226 usb_fill_bulk_urb(addr_urb, priv->udev,
212 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), &data->req_id, 227 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
213 sizeof(data->req_id), p54u_tx_cb, dev); 228 &((struct p54_hdr *)skb->data)->req_id, 4,
229 p54u_tx_dummy_cb, dev);
214 usb_fill_bulk_urb(data_urb, priv->udev, 230 usb_fill_bulk_urb(data_urb, priv->udev,
215 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), data, len, 231 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
216 free_on_tx ? p54u_tx_free_cb : p54u_tx_cb, dev); 232 skb->data, skb->len, p54u_tx_cb, skb);
233
234 usb_anchor_urb(addr_urb, &priv->submitted);
235 err = usb_submit_urb(addr_urb, GFP_ATOMIC);
236 if (err) {
237 usb_unanchor_urb(addr_urb);
238 goto out;
239 }
217 240
218 usb_submit_urb(addr_urb, GFP_ATOMIC); 241 usb_anchor_urb(addr_urb, &priv->submitted);
219 usb_submit_urb(data_urb, GFP_ATOMIC); 242 err = usb_submit_urb(data_urb, GFP_ATOMIC);
243 if (err)
244 usb_unanchor_urb(data_urb);
245
246 out:
247 usb_free_urb(addr_urb);
248 usb_free_urb(data_urb);
249
250 if (err)
251 p54_free_skb(dev, skb);
220} 252}
221 253
222static __le32 p54u_lm87_chksum(const u32 *data, size_t length) 254static __le32 p54u_lm87_chksum(const __le32 *data, size_t length)
223{ 255{
224 u32 chk = 0; 256 u32 chk = 0;
225 257
226 length >>= 2; 258 length >>= 2;
227 while (length--) { 259 while (length--) {
228 chk ^= *data++; 260 chk ^= le32_to_cpu(*data++);
229 chk = (chk >> 5) ^ (chk << 3); 261 chk = (chk >> 5) ^ (chk << 3);
230 } 262 }
231 263
232 return cpu_to_le32(chk); 264 return cpu_to_le32(chk);
233} 265}
234 266
235static void p54u_tx_lm87(struct ieee80211_hw *dev, 267static void p54u_tx_lm87(struct ieee80211_hw *dev, struct sk_buff *skb)
236 struct p54_control_hdr *data,
237 size_t len, int free_on_tx)
238{ 268{
239 struct p54u_priv *priv = dev->priv; 269 struct p54u_priv *priv = dev->priv;
240 struct urb *data_urb; 270 struct urb *data_urb;
241 struct lm87_tx_hdr *hdr = (void *)data - sizeof(*hdr); 271 struct lm87_tx_hdr *hdr;
272 __le32 checksum;
273 __le32 addr = ((struct p54_hdr *)skb->data)->req_id;
242 274
243 data_urb = usb_alloc_urb(0, GFP_ATOMIC); 275 data_urb = usb_alloc_urb(0, GFP_ATOMIC);
244 if (!data_urb) 276 if (!data_urb)
245 return; 277 return;
246 278
247 hdr->chksum = p54u_lm87_chksum((u32 *)data, len); 279 checksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len);
248 hdr->device_addr = data->req_id; 280 hdr = (struct lm87_tx_hdr *)skb_push(skb, sizeof(*hdr));
281 hdr->chksum = checksum;
282 hdr->device_addr = addr;
249 283
250 usb_fill_bulk_urb(data_urb, priv->udev, 284 usb_fill_bulk_urb(data_urb, priv->udev,
251 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr, 285 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
252 len + sizeof(*hdr), free_on_tx ? p54u_tx_free_cb : p54u_tx_cb, 286 skb->data, skb->len, p54u_tx_cb, skb);
253 dev); 287
254 288 usb_anchor_urb(data_urb, &priv->submitted);
255 usb_submit_urb(data_urb, GFP_ATOMIC); 289 if (usb_submit_urb(data_urb, GFP_ATOMIC)) {
290 usb_unanchor_urb(data_urb);
291 skb_pull(skb, sizeof(*hdr));
292 p54_free_skb(dev, skb);
293 }
294 usb_free_urb(data_urb);
256} 295}
257 296
258static void p54u_tx_net2280(struct ieee80211_hw *dev, struct p54_control_hdr *data, 297static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb)
259 size_t len, int free_on_tx)
260{ 298{
261 struct p54u_priv *priv = dev->priv; 299 struct p54u_priv *priv = dev->priv;
262 struct urb *int_urb, *data_urb; 300 struct urb *int_urb, *data_urb;
263 struct net2280_tx_hdr *hdr; 301 struct net2280_tx_hdr *hdr;
264 struct net2280_reg_write *reg; 302 struct net2280_reg_write *reg;
303 int err = 0;
304 __le32 addr = ((struct p54_hdr *) skb->data)->req_id;
305 __le16 len = cpu_to_le16(skb->len);
265 306
266 reg = kmalloc(sizeof(*reg), GFP_ATOMIC); 307 reg = kmalloc(sizeof(*reg), GFP_ATOMIC);
267 if (!reg) 308 if (!reg)
@@ -284,21 +325,47 @@ static void p54u_tx_net2280(struct ieee80211_hw *dev, struct p54_control_hdr *da
284 reg->addr = cpu_to_le32(P54U_DEV_BASE); 325 reg->addr = cpu_to_le32(P54U_DEV_BASE);
285 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); 326 reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA);
286 327
287 len += sizeof(*data); 328 hdr = (void *)skb_push(skb, sizeof(*hdr));
288 hdr = (void *)data - sizeof(*hdr);
289 memset(hdr, 0, sizeof(*hdr)); 329 memset(hdr, 0, sizeof(*hdr));
290 hdr->device_addr = data->req_id; 330 hdr->len = len;
291 hdr->len = cpu_to_le16(len); 331 hdr->device_addr = addr;
292 332
293 usb_fill_bulk_urb(int_urb, priv->udev, 333 usb_fill_bulk_urb(int_urb, priv->udev,
294 usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg), 334 usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg),
295 p54u_tx_free_cb, dev); 335 p54u_tx_dummy_cb, dev);
296 usb_submit_urb(int_urb, GFP_ATOMIC); 336
337 /*
338 * This flag triggers a code path in the USB subsystem that will
339 * free what's inside the transfer_buffer after the callback routine
340 * has completed.
341 */
342 int_urb->transfer_flags |= URB_FREE_BUFFER;
297 343
298 usb_fill_bulk_urb(data_urb, priv->udev, 344 usb_fill_bulk_urb(data_urb, priv->udev,
299 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr, len + sizeof(*hdr), 345 usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA),
300 free_on_tx ? p54u_tx_free_cb : p54u_tx_cb, dev); 346 skb->data, skb->len, p54u_tx_cb, skb);
301 usb_submit_urb(data_urb, GFP_ATOMIC); 347
348 usb_anchor_urb(int_urb, &priv->submitted);
349 err = usb_submit_urb(int_urb, GFP_ATOMIC);
350 if (err) {
351 usb_unanchor_urb(int_urb);
352 goto out;
353 }
354
355 usb_anchor_urb(data_urb, &priv->submitted);
356 err = usb_submit_urb(data_urb, GFP_ATOMIC);
357 if (err) {
358 usb_unanchor_urb(data_urb);
359 goto out;
360 }
361 out:
362 usb_free_urb(int_urb);
363 usb_free_urb(data_urb);
364
365 if (err) {
366 skb_pull(skb, sizeof(*hdr));
367 p54_free_skb(dev, skb);
368 }
302} 369}
303 370
304static int p54u_write(struct p54u_priv *priv, 371static int p54u_write(struct p54u_priv *priv,
@@ -375,7 +442,8 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
375 442
376 tmp = buf = kmalloc(P54U_FW_BLOCK, GFP_KERNEL); 443 tmp = buf = kmalloc(P54U_FW_BLOCK, GFP_KERNEL);
377 if (!buf) { 444 if (!buf) {
378 printk(KERN_ERR "p54usb: cannot allocate firmware upload buffer!\n"); 445 dev_err(&priv->udev->dev, "(p54usb) cannot allocate firmware"
446 "upload buffer!\n");
379 err = -ENOMEM; 447 err = -ENOMEM;
380 goto err_bufalloc; 448 goto err_bufalloc;
381 } 449 }
@@ -383,14 +451,18 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
383 memcpy(buf, start_string, 4); 451 memcpy(buf, start_string, 4);
384 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 4); 452 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 4);
385 if (err) { 453 if (err) {
386 printk(KERN_ERR "p54usb: reset failed! (%d)\n", err); 454 dev_err(&priv->udev->dev, "(p54usb) reset failed! (%d)\n", err);
387 goto err_reset; 455 goto err_reset;
388 } 456 }
389 457
390 err = request_firmware(&fw_entry, "isl3887usb_bare", &priv->udev->dev); 458 err = request_firmware(&fw_entry, "isl3887usb", &priv->udev->dev);
391 if (err) { 459 if (err) {
392 printk(KERN_ERR "p54usb: cannot find firmware (isl3887usb_bare)!\n"); 460 dev_err(&priv->udev->dev, "p54usb: cannot find firmware "
393 goto err_req_fw_failed; 461 "(isl3887usb)\n");
462 err = request_firmware(&fw_entry, "isl3887usb_bare",
463 &priv->udev->dev);
464 if (err)
465 goto err_req_fw_failed;
394 } 466 }
395 467
396 err = p54_parse_firmware(dev, fw_entry); 468 err = p54_parse_firmware(dev, fw_entry);
@@ -441,7 +513,8 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
441 513
442 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_size); 514 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_size);
443 if (err) { 515 if (err) {
444 printk(KERN_ERR "p54usb: firmware upload failed!\n"); 516 dev_err(&priv->udev->dev, "(p54usb) firmware "
517 "upload failed!\n");
445 goto err_upload_failed; 518 goto err_upload_failed;
446 } 519 }
447 520
@@ -452,10 +525,9 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
452 *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, fw_entry->data, fw_entry->size)); 525 *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, fw_entry->data, fw_entry->size));
453 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32)); 526 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32));
454 if (err) { 527 if (err) {
455 printk(KERN_ERR "p54usb: firmware upload failed!\n"); 528 dev_err(&priv->udev->dev, "(p54usb) firmware upload failed!\n");
456 goto err_upload_failed; 529 goto err_upload_failed;
457 } 530 }
458
459 timeout = jiffies + msecs_to_jiffies(1000); 531 timeout = jiffies + msecs_to_jiffies(1000);
460 while (!(err = usb_bulk_msg(priv->udev, 532 while (!(err = usb_bulk_msg(priv->udev,
461 usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), buf, 128, &alen, 1000))) { 533 usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), buf, 128, &alen, 1000))) {
@@ -463,25 +535,27 @@ static int p54u_upload_firmware_3887(struct ieee80211_hw *dev)
463 break; 535 break;
464 536
465 if (alen > 5 && !memcmp(buf, "ERROR", 5)) { 537 if (alen > 5 && !memcmp(buf, "ERROR", 5)) {
466 printk(KERN_INFO "p54usb: firmware upload failed!\n");
467 err = -EINVAL; 538 err = -EINVAL;
468 break; 539 break;
469 } 540 }
470 541
471 if (time_after(jiffies, timeout)) { 542 if (time_after(jiffies, timeout)) {
472 printk(KERN_ERR "p54usb: firmware boot timed out!\n"); 543 dev_err(&priv->udev->dev, "(p54usb) firmware boot "
544 "timed out!\n");
473 err = -ETIMEDOUT; 545 err = -ETIMEDOUT;
474 break; 546 break;
475 } 547 }
476 } 548 }
477 if (err) 549 if (err) {
550 dev_err(&priv->udev->dev, "(p54usb) firmware upload failed!\n");
478 goto err_upload_failed; 551 goto err_upload_failed;
552 }
479 553
480 buf[0] = 'g'; 554 buf[0] = 'g';
481 buf[1] = '\r'; 555 buf[1] = '\r';
482 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 2); 556 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 2);
483 if (err) { 557 if (err) {
484 printk(KERN_ERR "p54usb: firmware boot failed!\n"); 558 dev_err(&priv->udev->dev, "(p54usb) firmware boot failed!\n");
485 goto err_upload_failed; 559 goto err_upload_failed;
486 } 560 }
487 561
@@ -521,15 +595,21 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
521 595
522 buf = kmalloc(512, GFP_KERNEL); 596 buf = kmalloc(512, GFP_KERNEL);
523 if (!buf) { 597 if (!buf) {
524 printk(KERN_ERR "p54usb: firmware buffer alloc failed!\n"); 598 dev_err(&priv->udev->dev, "(p54usb) firmware buffer "
599 "alloc failed!\n");
525 return -ENOMEM; 600 return -ENOMEM;
526 } 601 }
527 602
528 err = request_firmware(&fw_entry, "isl3890usb", &priv->udev->dev); 603 err = request_firmware(&fw_entry, "isl3886usb", &priv->udev->dev);
529 if (err) { 604 if (err) {
530 printk(KERN_ERR "p54usb: cannot find firmware (isl3890usb)!\n"); 605 dev_err(&priv->udev->dev, "(p54usb) cannot find firmware "
531 kfree(buf); 606 "(isl3886usb)\n");
532 return err; 607 err = request_firmware(&fw_entry, "isl3890usb",
608 &priv->udev->dev);
609 if (err) {
610 kfree(buf);
611 return err;
612 }
533 } 613 }
534 614
535 err = p54_parse_firmware(dev, fw_entry); 615 err = p54_parse_firmware(dev, fw_entry);
@@ -648,8 +728,8 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
648 728
649 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_len); 729 err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_len);
650 if (err) { 730 if (err) {
651 printk(KERN_ERR "p54usb: firmware block upload " 731 dev_err(&priv->udev->dev, "(p54usb) firmware block "
652 "failed\n"); 732 "upload failed\n");
653 goto fail; 733 goto fail;
654 } 734 }
655 735
@@ -682,8 +762,8 @@ static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev)
682 0x002C | (unsigned long)&devreg->direct_mem_win); 762 0x002C | (unsigned long)&devreg->direct_mem_win);
683 if (!(reg & cpu_to_le32(ISL38XX_DMA_STATUS_DONE)) || 763 if (!(reg & cpu_to_le32(ISL38XX_DMA_STATUS_DONE)) ||
684 !(reg & cpu_to_le32(ISL38XX_DMA_STATUS_READY))) { 764 !(reg & cpu_to_le32(ISL38XX_DMA_STATUS_READY))) {
685 printk(KERN_ERR "p54usb: firmware DMA transfer " 765 dev_err(&priv->udev->dev, "(p54usb) firmware DMA "
686 "failed\n"); 766 "transfer failed\n");
687 goto fail; 767 goto fail;
688 } 768 }
689 769
@@ -786,11 +866,11 @@ static int __devinit p54u_probe(struct usb_interface *intf,
786 struct p54u_priv *priv; 866 struct p54u_priv *priv;
787 int err; 867 int err;
788 unsigned int i, recognized_pipes; 868 unsigned int i, recognized_pipes;
789 DECLARE_MAC_BUF(mac);
790 869
791 dev = p54_init_common(sizeof(*priv)); 870 dev = p54_init_common(sizeof(*priv));
871
792 if (!dev) { 872 if (!dev) {
793 printk(KERN_ERR "p54usb: ieee80211 alloc failed\n"); 873 dev_err(&udev->dev, "(p54usb) ieee80211 alloc failed\n");
794 return -ENOMEM; 874 return -ENOMEM;
795 } 875 }
796 876
@@ -842,6 +922,7 @@ static int __devinit p54u_probe(struct usb_interface *intf,
842 goto err_free_dev; 922 goto err_free_dev;
843 923
844 skb_queue_head_init(&priv->rx_queue); 924 skb_queue_head_init(&priv->rx_queue);
925 init_usb_anchor(&priv->submitted);
845 926
846 p54u_open(dev); 927 p54u_open(dev);
847 err = p54_read_eeprom(dev); 928 err = p54_read_eeprom(dev);
@@ -851,7 +932,7 @@ static int __devinit p54u_probe(struct usb_interface *intf,
851 932
852 err = ieee80211_register_hw(dev); 933 err = ieee80211_register_hw(dev);
853 if (err) { 934 if (err) {
854 printk(KERN_ERR "p54usb: Cannot register netdevice\n"); 935 dev_err(&udev->dev, "(p54usb) Cannot register netdevice\n");
855 goto err_free_dev; 936 goto err_free_dev;
856 } 937 }
857 938
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index 5b8fe91379c3..54ee738bf2af 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -133,6 +133,7 @@ struct p54u_priv {
133 133
134 spinlock_t lock; 134 spinlock_t lock;
135 struct sk_buff_head rx_queue; 135 struct sk_buff_head rx_queue;
136 struct usb_anchor submitted;
136}; 137};
137 138
138#endif /* P54USB_H */ 139#endif /* P54USB_H */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 16e68f4b654a..57a150a22de5 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2028,12 +2028,11 @@ static void
2028format_event(islpci_private *priv, char *dest, const char *str, 2028format_event(islpci_private *priv, char *dest, const char *str,
2029 const struct obj_mlme *mlme, u16 *length, int error) 2029 const struct obj_mlme *mlme, u16 *length, int error)
2030{ 2030{
2031 DECLARE_MAC_BUF(mac);
2032 int n = snprintf(dest, IW_CUSTOM_MAX, 2031 int n = snprintf(dest, IW_CUSTOM_MAX,
2033 "%s %s %s %s (%2.2X)", 2032 "%s %s %pM %s (%2.2X)",
2034 str, 2033 str,
2035 ((priv->iw_mode == IW_MODE_MASTER) ? "from" : "to"), 2034 ((priv->iw_mode == IW_MODE_MASTER) ? "from" : "to"),
2036 print_mac(mac, mlme->address), 2035 mlme->address,
2037 (error ? (mlme->code ? " : REJECTED " : " : ACCEPTED ") 2036 (error ? (mlme->code ? " : REJECTED " : " : ACCEPTED ")
2038 : ""), mlme->code); 2037 : ""), mlme->code);
2039 BUG_ON(n > IW_CUSTOM_MAX); 2038 BUG_ON(n > IW_CUSTOM_MAX);
@@ -2113,7 +2112,6 @@ prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
2113{ 2112{
2114 struct list_head *ptr; 2113 struct list_head *ptr;
2115 struct islpci_bss_wpa_ie *bss = NULL; 2114 struct islpci_bss_wpa_ie *bss = NULL;
2116 DECLARE_MAC_BUF(mac);
2117 2115
2118 if (wpa_ie_len > MAX_WPA_IE_LEN) 2116 if (wpa_ie_len > MAX_WPA_IE_LEN)
2119 wpa_ie_len = MAX_WPA_IE_LEN; 2117 wpa_ie_len = MAX_WPA_IE_LEN;
@@ -2154,7 +2152,7 @@ prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
2154 bss->last_update = jiffies; 2152 bss->last_update = jiffies;
2155 } else { 2153 } else {
2156 printk(KERN_DEBUG "Failed to add BSS WPA entry for " 2154 printk(KERN_DEBUG "Failed to add BSS WPA entry for "
2157 "%s\n", print_mac(mac, bssid)); 2155 "%pM\n", bssid);
2158 } 2156 }
2159 2157
2160 /* expire old entries from WPA list */ 2158 /* expire old entries from WPA list */
@@ -2219,7 +2217,6 @@ prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr,
2219{ 2217{
2220 struct ieee80211_beacon_phdr *hdr; 2218 struct ieee80211_beacon_phdr *hdr;
2221 u8 *pos, *end; 2219 u8 *pos, *end;
2222 DECLARE_MAC_BUF(mac);
2223 2220
2224 if (!priv->wpa) 2221 if (!priv->wpa)
2225 return; 2222 return;
@@ -2230,7 +2227,7 @@ prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr,
2230 while (pos < end) { 2227 while (pos < end) {
2231 if (pos + 2 + pos[1] > end) { 2228 if (pos + 2 + pos[1] > end) {
2232 printk(KERN_DEBUG "Parsing Beacon/ProbeResp failed " 2229 printk(KERN_DEBUG "Parsing Beacon/ProbeResp failed "
2233 "for %s\n", print_mac(mac, addr)); 2230 "for %pM\n", addr);
2234 return; 2231 return;
2235 } 2232 }
2236 if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 && 2233 if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 &&
@@ -2269,7 +2266,6 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
2269 size_t len = 0; /* u16, better? */ 2266 size_t len = 0; /* u16, better? */
2270 u8 *payload = NULL, *pos = NULL; 2267 u8 *payload = NULL, *pos = NULL;
2271 int ret; 2268 int ret;
2272 DECLARE_MAC_BUF(mac);
2273 2269
2274 /* I think all trapable objects are listed here. 2270 /* I think all trapable objects are listed here.
2275 * Some oids have a EX version. The difference is that they are emitted 2271 * Some oids have a EX version. The difference is that they are emitted
@@ -2358,8 +2354,8 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
2358 break; 2354 break;
2359 2355
2360 memcpy(&confirm->address, mlmeex->address, ETH_ALEN); 2356 memcpy(&confirm->address, mlmeex->address, ETH_ALEN);
2361 printk(KERN_DEBUG "Authenticate from: address:\t%s\n", 2357 printk(KERN_DEBUG "Authenticate from: address:\t%pM\n",
2362 print_mac(mac, mlmeex->address)); 2358 mlmeex->address);
2363 confirm->id = -1; /* or mlmeex->id ? */ 2359 confirm->id = -1; /* or mlmeex->id ? */
2364 confirm->state = 0; /* not used */ 2360 confirm->state = 0; /* not used */
2365 confirm->code = 0; 2361 confirm->code = 0;
@@ -2404,8 +2400,8 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
2404 wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); 2400 wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie);
2405 2401
2406 if (!wpa_ie_len) { 2402 if (!wpa_ie_len) {
2407 printk(KERN_DEBUG "No WPA IE found from address:\t%s\n", 2403 printk(KERN_DEBUG "No WPA IE found from address:\t%pM\n",
2408 print_mac(mac, mlmeex->address)); 2404 mlmeex->address);
2409 kfree(confirm); 2405 kfree(confirm);
2410 break; 2406 break;
2411 } 2407 }
@@ -2441,8 +2437,8 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
2441 wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie); 2437 wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie);
2442 2438
2443 if (!wpa_ie_len) { 2439 if (!wpa_ie_len) {
2444 printk(KERN_DEBUG "No WPA IE found from address:\t%s\n", 2440 printk(KERN_DEBUG "No WPA IE found from address:\t%pM\n",
2445 print_mac(mac, mlmeex->address)); 2441 mlmeex->address);
2446 kfree(confirm); 2442 kfree(confirm);
2447 break; 2443 break;
2448 } 2444 }
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index af2e4f2405f2..9a72b1e3e163 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -93,7 +93,7 @@ static struct pci_driver prism54_driver = {
93 Module initialization functions 93 Module initialization functions
94******************************************************************************/ 94******************************************************************************/
95 95
96int 96static int
97prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) 97prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
98{ 98{
99 struct net_device *ndev; 99 struct net_device *ndev;
@@ -216,7 +216,7 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
216static volatile int __in_cleanup_module = 0; 216static volatile int __in_cleanup_module = 0;
217 217
218/* this one removes one(!!) instance only */ 218/* this one removes one(!!) instance only */
219void 219static void
220prism54_remove(struct pci_dev *pdev) 220prism54_remove(struct pci_dev *pdev)
221{ 221{
222 struct net_device *ndev = pci_get_drvdata(pdev); 222 struct net_device *ndev = pci_get_drvdata(pdev);
@@ -259,7 +259,7 @@ prism54_remove(struct pci_dev *pdev)
259 pci_disable_device(pdev); 259 pci_disable_device(pdev);
260} 260}
261 261
262int 262static int
263prism54_suspend(struct pci_dev *pdev, pm_message_t state) 263prism54_suspend(struct pci_dev *pdev, pm_message_t state)
264{ 264{
265 struct net_device *ndev = pci_get_drvdata(pdev); 265 struct net_device *ndev = pci_get_drvdata(pdev);
@@ -282,7 +282,7 @@ prism54_suspend(struct pci_dev *pdev, pm_message_t state)
282 return 0; 282 return 0;
283} 283}
284 284
285int 285static int
286prism54_resume(struct pci_dev *pdev) 286prism54_resume(struct pci_dev *pdev)
287{ 287{
288 struct net_device *ndev = pci_get_drvdata(pdev); 288 struct net_device *ndev = pci_get_drvdata(pdev);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 1404a5717520..99ec7d622518 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -414,7 +414,6 @@ static int ray_config(struct pcmcia_device *link)
414 memreq_t mem; 414 memreq_t mem;
415 struct net_device *dev = (struct net_device *)link->priv; 415 struct net_device *dev = (struct net_device *)link->priv;
416 ray_dev_t *local = netdev_priv(dev); 416 ray_dev_t *local = netdev_priv(dev);
417 DECLARE_MAC_BUF(mac);
418 417
419 DEBUG(1, "ray_config(0x%p)\n", link); 418 DEBUG(1, "ray_config(0x%p)\n", link);
420 419
@@ -485,8 +484,8 @@ static int ray_config(struct pcmcia_device *link)
485 strcpy(local->node.dev_name, dev->name); 484 strcpy(local->node.dev_name, dev->name);
486 link->dev_node = &local->node; 485 link->dev_node = &local->node;
487 486
488 printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %s\n", 487 printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %pM\n",
489 dev->name, dev->irq, print_mac(mac, dev->dev_addr)); 488 dev->name, dev->irq, dev->dev_addr);
490 489
491 return 0; 490 return 0;
492 491
@@ -829,7 +828,7 @@ static int ray_resume(struct pcmcia_device *link)
829} 828}
830 829
831/*===========================================================================*/ 830/*===========================================================================*/
832int ray_dev_init(struct net_device *dev) 831static int ray_dev_init(struct net_device *dev)
833{ 832{
834#ifdef RAY_IMMEDIATE_INIT 833#ifdef RAY_IMMEDIATE_INIT
835 int i; 834 int i;
@@ -2285,7 +2284,6 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs, unsigned i
2285 2284
2286 skb->protocol = eth_type_trans(skb,dev); 2285 skb->protocol = eth_type_trans(skb,dev);
2287 netif_rx(skb); 2286 netif_rx(skb);
2288 dev->last_rx = jiffies;
2289 local->stats.rx_packets++; 2287 local->stats.rx_packets++;
2290 local->stats.rx_bytes += total_len; 2288 local->stats.rx_bytes += total_len;
2291 2289
@@ -2595,7 +2593,6 @@ static int ray_cs_proc_show(struct seq_file *m, void *v)
2595 UCHAR *p; 2593 UCHAR *p;
2596 struct freq_hop_element *pfh; 2594 struct freq_hop_element *pfh;
2597 UCHAR c[33]; 2595 UCHAR c[33];
2598 DECLARE_MAC_BUF(mac);
2599 2596
2600 link = this_device; 2597 link = this_device;
2601 if (!link) 2598 if (!link)
@@ -2623,8 +2620,7 @@ static int ray_cs_proc_show(struct seq_file *m, void *v)
2623 nettype[local->sparm.b5.a_network_type], c); 2620 nettype[local->sparm.b5.a_network_type], c);
2624 2621
2625 p = local->bss_id; 2622 p = local->bss_id;
2626 seq_printf(m, "BSSID = %s\n", 2623 seq_printf(m, "BSSID = %pM\n", p);
2627 print_mac(mac, p));
2628 2624
2629 seq_printf(m, "Country code = %d\n", 2625 seq_printf(m, "Country code = %d\n",
2630 local->sparm.b5.a_curr_country_code); 2626 local->sparm.b5.a_curr_country_code);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2b414899dfa0..607ce9f61b54 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -37,11 +37,11 @@
37#include <linux/usb.h> 37#include <linux/usb.h>
38#include <linux/usb/cdc.h> 38#include <linux/usb/cdc.h>
39#include <linux/wireless.h> 39#include <linux/wireless.h>
40#include <linux/ieee80211.h>
40#include <linux/if_arp.h> 41#include <linux/if_arp.h>
41#include <linux/ctype.h> 42#include <linux/ctype.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
43#include <net/iw_handler.h> 44#include <net/iw_handler.h>
44#include <net/ieee80211.h>
45#include <linux/usb/usbnet.h> 45#include <linux/usb/usbnet.h>
46#include <linux/usb/rndis_host.h> 46#include <linux/usb/rndis_host.h>
47 47
@@ -1104,7 +1104,7 @@ static int rndis_iw_get_range(struct net_device *dev,
1104 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1104 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1105{ 1105{
1106 struct iw_range *range = (struct iw_range *)extra; 1106 struct iw_range *range = (struct iw_range *)extra;
1107 struct usbnet *usbdev = dev->priv; 1107 struct usbnet *usbdev = netdev_priv(dev);
1108 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1108 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1109 int len, ret, i, j, num, has_80211g_rates; 1109 int len, ret, i, j, num, has_80211g_rates;
1110 u8 rates[8]; 1110 u8 rates[8];
@@ -1210,7 +1210,7 @@ static int rndis_iw_get_range(struct net_device *dev,
1210static int rndis_iw_get_name(struct net_device *dev, 1210static int rndis_iw_get_name(struct net_device *dev,
1211 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1211 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1212{ 1212{
1213 struct usbnet *usbdev = dev->priv; 1213 struct usbnet *usbdev = netdev_priv(dev);
1214 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1214 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1215 1215
1216 strcpy(wrqu->name, priv->name); 1216 strcpy(wrqu->name, priv->name);
@@ -1223,7 +1223,7 @@ static int rndis_iw_set_essid(struct net_device *dev,
1223{ 1223{
1224 struct ndis_80211_ssid ssid; 1224 struct ndis_80211_ssid ssid;
1225 int length = wrqu->essid.length; 1225 int length = wrqu->essid.length;
1226 struct usbnet *usbdev = dev->priv; 1226 struct usbnet *usbdev = netdev_priv(dev);
1227 1227
1228 devdbg(usbdev, "SIOCSIWESSID: [flags:%d,len:%d] '%.32s'", 1228 devdbg(usbdev, "SIOCSIWESSID: [flags:%d,len:%d] '%.32s'",
1229 wrqu->essid.flags, wrqu->essid.length, essid); 1229 wrqu->essid.flags, wrqu->essid.length, essid);
@@ -1250,7 +1250,7 @@ static int rndis_iw_get_essid(struct net_device *dev,
1250 struct iw_request_info *info, union iwreq_data *wrqu, char *essid) 1250 struct iw_request_info *info, union iwreq_data *wrqu, char *essid)
1251{ 1251{
1252 struct ndis_80211_ssid ssid; 1252 struct ndis_80211_ssid ssid;
1253 struct usbnet *usbdev = dev->priv; 1253 struct usbnet *usbdev = netdev_priv(dev);
1254 int ret; 1254 int ret;
1255 1255
1256 ret = get_essid(usbdev, &ssid); 1256 ret = get_essid(usbdev, &ssid);
@@ -1273,15 +1273,14 @@ static int rndis_iw_get_essid(struct net_device *dev,
1273static int rndis_iw_get_bssid(struct net_device *dev, 1273static int rndis_iw_get_bssid(struct net_device *dev,
1274 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1274 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1275{ 1275{
1276 struct usbnet *usbdev = dev->priv; 1276 struct usbnet *usbdev = netdev_priv(dev);
1277 unsigned char bssid[ETH_ALEN]; 1277 unsigned char bssid[ETH_ALEN];
1278 int ret; 1278 int ret;
1279 DECLARE_MAC_BUF(mac);
1280 1279
1281 ret = get_bssid(usbdev, bssid); 1280 ret = get_bssid(usbdev, bssid);
1282 1281
1283 if (ret == 0) 1282 if (ret == 0)
1284 devdbg(usbdev, "SIOCGIWAP: %s", print_mac(mac, bssid)); 1283 devdbg(usbdev, "SIOCGIWAP: %pM", bssid);
1285 else 1284 else
1286 devdbg(usbdev, "SIOCGIWAP: <not associated>"); 1285 devdbg(usbdev, "SIOCGIWAP: <not associated>");
1287 1286
@@ -1295,12 +1294,11 @@ static int rndis_iw_get_bssid(struct net_device *dev,
1295static int rndis_iw_set_bssid(struct net_device *dev, 1294static int rndis_iw_set_bssid(struct net_device *dev,
1296 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1295 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1297{ 1296{
1298 struct usbnet *usbdev = dev->priv; 1297 struct usbnet *usbdev = netdev_priv(dev);
1299 u8 *bssid = (u8 *)wrqu->ap_addr.sa_data; 1298 u8 *bssid = (u8 *)wrqu->ap_addr.sa_data;
1300 DECLARE_MAC_BUF(mac);
1301 int ret; 1299 int ret;
1302 1300
1303 devdbg(usbdev, "SIOCSIWAP: %s", print_mac(mac, bssid)); 1301 devdbg(usbdev, "SIOCSIWAP: %pM", bssid);
1304 1302
1305 ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN); 1303 ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN);
1306 1304
@@ -1318,7 +1316,7 @@ static int rndis_iw_set_auth(struct net_device *dev,
1318 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1316 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1319{ 1317{
1320 struct iw_param *p = &wrqu->param; 1318 struct iw_param *p = &wrqu->param;
1321 struct usbnet *usbdev = dev->priv; 1319 struct usbnet *usbdev = netdev_priv(dev);
1322 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1320 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1323 int ret = -ENOTSUPP; 1321 int ret = -ENOTSUPP;
1324 1322
@@ -1399,7 +1397,7 @@ static int rndis_iw_get_auth(struct net_device *dev,
1399 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1397 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1400{ 1398{
1401 struct iw_param *p = &wrqu->param; 1399 struct iw_param *p = &wrqu->param;
1402 struct usbnet *usbdev = dev->priv; 1400 struct usbnet *usbdev = netdev_priv(dev);
1403 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1401 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1404 1402
1405 switch (p->flags & IW_AUTH_INDEX) { 1403 switch (p->flags & IW_AUTH_INDEX) {
@@ -1431,7 +1429,7 @@ static int rndis_iw_get_mode(struct net_device *dev,
1431 struct iw_request_info *info, 1429 struct iw_request_info *info,
1432 union iwreq_data *wrqu, char *extra) 1430 union iwreq_data *wrqu, char *extra)
1433{ 1431{
1434 struct usbnet *usbdev = dev->priv; 1432 struct usbnet *usbdev = netdev_priv(dev);
1435 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1433 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1436 1434
1437 switch (priv->infra_mode) { 1435 switch (priv->infra_mode) {
@@ -1454,7 +1452,7 @@ static int rndis_iw_get_mode(struct net_device *dev,
1454static int rndis_iw_set_mode(struct net_device *dev, 1452static int rndis_iw_set_mode(struct net_device *dev,
1455 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1453 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1456{ 1454{
1457 struct usbnet *usbdev = dev->priv; 1455 struct usbnet *usbdev = netdev_priv(dev);
1458 int mode; 1456 int mode;
1459 1457
1460 devdbg(usbdev, "SIOCSIWMODE: %08x", wrqu->mode); 1458 devdbg(usbdev, "SIOCSIWMODE: %08x", wrqu->mode);
@@ -1479,7 +1477,7 @@ static int rndis_iw_set_mode(struct net_device *dev,
1479static int rndis_iw_set_encode(struct net_device *dev, 1477static int rndis_iw_set_encode(struct net_device *dev,
1480 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1478 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1481{ 1479{
1482 struct usbnet *usbdev = dev->priv; 1480 struct usbnet *usbdev = netdev_priv(dev);
1483 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1481 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1484 int ret, index, key_len; 1482 int ret, index, key_len;
1485 u8 *key; 1483 u8 *key;
@@ -1542,7 +1540,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
1542 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1540 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1543{ 1541{
1544 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1542 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1545 struct usbnet *usbdev = dev->priv; 1543 struct usbnet *usbdev = netdev_priv(dev);
1546 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1544 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1547 struct ndis_80211_key ndis_key; 1545 struct ndis_80211_key ndis_key;
1548 int keyidx, ret; 1546 int keyidx, ret;
@@ -1627,7 +1625,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
1627static int rndis_iw_set_scan(struct net_device *dev, 1625static int rndis_iw_set_scan(struct net_device *dev,
1628 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1626 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1629{ 1627{
1630 struct usbnet *usbdev = dev->priv; 1628 struct usbnet *usbdev = netdev_priv(dev);
1631 union iwreq_data evt; 1629 union iwreq_data evt;
1632 int ret = -EINVAL; 1630 int ret = -EINVAL;
1633 __le32 tmp; 1631 __le32 tmp;
@@ -1652,19 +1650,18 @@ static char *rndis_translate_scan(struct net_device *dev,
1652 struct ndis_80211_bssid_ex *bssid) 1650 struct ndis_80211_bssid_ex *bssid)
1653{ 1651{
1654#ifdef DEBUG 1652#ifdef DEBUG
1655 struct usbnet *usbdev = dev->priv; 1653 struct usbnet *usbdev = netdev_priv(dev);
1656#endif 1654#endif
1657 struct ieee80211_info_element *ie; 1655 u8 *ie;
1658 char *current_val; 1656 char *current_val;
1659 int bssid_len, ie_len, i; 1657 int bssid_len, ie_len, i;
1660 u32 beacon, atim; 1658 u32 beacon, atim;
1661 struct iw_event iwe; 1659 struct iw_event iwe;
1662 unsigned char sbuf[32]; 1660 unsigned char sbuf[32];
1663 DECLARE_MAC_BUF(mac);
1664 1661
1665 bssid_len = le32_to_cpu(bssid->length); 1662 bssid_len = le32_to_cpu(bssid->length);
1666 1663
1667 devdbg(usbdev, "BSSID %s", print_mac(mac, bssid->mac)); 1664 devdbg(usbdev, "BSSID %pM", bssid->mac);
1668 iwe.cmd = SIOCGIWAP; 1665 iwe.cmd = SIOCGIWAP;
1669 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 1666 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
1670 memcpy(iwe.u.ap_addr.sa_data, bssid->mac, ETH_ALEN); 1667 memcpy(iwe.u.ap_addr.sa_data, bssid->mac, ETH_ALEN);
@@ -1753,20 +1750,20 @@ static char *rndis_translate_scan(struct net_device *dev,
1753 ie_len = min(bssid_len - (int)sizeof(*bssid), 1750 ie_len = min(bssid_len - (int)sizeof(*bssid),
1754 (int)le32_to_cpu(bssid->ie_length)); 1751 (int)le32_to_cpu(bssid->ie_length));
1755 ie_len -= sizeof(struct ndis_80211_fixed_ies); 1752 ie_len -= sizeof(struct ndis_80211_fixed_ies);
1756 while (ie_len >= sizeof(*ie) && sizeof(*ie) + ie->len <= ie_len) { 1753 while (ie_len >= 2 && 2 + ie[1] <= ie_len) {
1757 if ((ie->id == MFIE_TYPE_GENERIC && ie->len >= 4 && 1754 if ((ie[0] == WLAN_EID_GENERIC && ie[1] >= 4 &&
1758 memcmp(ie->data, "\x00\x50\xf2\x01", 4) == 0) || 1755 memcmp(ie + 2, "\x00\x50\xf2\x01", 4) == 0) ||
1759 ie->id == MFIE_TYPE_RSN) { 1756 ie[0] == WLAN_EID_RSN) {
1760 devdbg(usbdev, "IE: WPA%d", 1757 devdbg(usbdev, "IE: WPA%d",
1761 (ie->id == MFIE_TYPE_RSN) ? 2 : 1); 1758 (ie[0] == WLAN_EID_RSN) ? 2 : 1);
1762 iwe.cmd = IWEVGENIE; 1759 iwe.cmd = IWEVGENIE;
1763 iwe.u.data.length = min(ie->len + 2, MAX_WPA_IE_LEN); 1760 /* arbitrary cut-off at 64 */
1764 cev = iwe_stream_add_point(info, cev, end_buf, &iwe, 1761 iwe.u.data.length = min(ie[1] + 2, 64);
1765 (u8 *)ie); 1762 cev = iwe_stream_add_point(info, cev, end_buf, &iwe, ie);
1766 } 1763 }
1767 1764
1768 ie_len -= sizeof(*ie) + ie->len; 1765 ie_len -= 2 + ie[1];
1769 ie = (struct ieee80211_info_element *)&ie->data[ie->len]; 1766 ie += 2 + ie[1];
1770 } 1767 }
1771 1768
1772 return cev; 1769 return cev;
@@ -1776,7 +1773,7 @@ static char *rndis_translate_scan(struct net_device *dev,
1776static int rndis_iw_get_scan(struct net_device *dev, 1773static int rndis_iw_get_scan(struct net_device *dev,
1777 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1774 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1778{ 1775{
1779 struct usbnet *usbdev = dev->priv; 1776 struct usbnet *usbdev = netdev_priv(dev);
1780 void *buf = NULL; 1777 void *buf = NULL;
1781 char *cev = extra; 1778 char *cev = extra;
1782 struct ndis_80211_bssid_list_ex *bssid_list; 1779 struct ndis_80211_bssid_list_ex *bssid_list;
@@ -1822,7 +1819,7 @@ out:
1822static int rndis_iw_set_genie(struct net_device *dev, 1819static int rndis_iw_set_genie(struct net_device *dev,
1823 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1820 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1824{ 1821{
1825 struct usbnet *usbdev = dev->priv; 1822 struct usbnet *usbdev = netdev_priv(dev);
1826 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1823 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1827 int ret = 0; 1824 int ret = 0;
1828 1825
@@ -1856,7 +1853,7 @@ static int rndis_iw_set_genie(struct net_device *dev,
1856static int rndis_iw_get_genie(struct net_device *dev, 1853static int rndis_iw_get_genie(struct net_device *dev,
1857 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1854 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1858{ 1855{
1859 struct usbnet *usbdev = dev->priv; 1856 struct usbnet *usbdev = netdev_priv(dev);
1860 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1857 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1861 1858
1862 devdbg(usbdev, "SIOCGIWGENIE"); 1859 devdbg(usbdev, "SIOCGIWGENIE");
@@ -1879,7 +1876,7 @@ static int rndis_iw_get_genie(struct net_device *dev,
1879static int rndis_iw_set_rts(struct net_device *dev, 1876static int rndis_iw_set_rts(struct net_device *dev,
1880 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1877 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1881{ 1878{
1882 struct usbnet *usbdev = dev->priv; 1879 struct usbnet *usbdev = netdev_priv(dev);
1883 __le32 tmp; 1880 __le32 tmp;
1884 devdbg(usbdev, "SIOCSIWRTS"); 1881 devdbg(usbdev, "SIOCSIWRTS");
1885 1882
@@ -1892,7 +1889,7 @@ static int rndis_iw_set_rts(struct net_device *dev,
1892static int rndis_iw_get_rts(struct net_device *dev, 1889static int rndis_iw_get_rts(struct net_device *dev,
1893 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1890 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1894{ 1891{
1895 struct usbnet *usbdev = dev->priv; 1892 struct usbnet *usbdev = netdev_priv(dev);
1896 __le32 tmp; 1893 __le32 tmp;
1897 int len, ret; 1894 int len, ret;
1898 1895
@@ -1913,7 +1910,7 @@ static int rndis_iw_get_rts(struct net_device *dev,
1913static int rndis_iw_set_frag(struct net_device *dev, 1910static int rndis_iw_set_frag(struct net_device *dev,
1914 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1911 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1915{ 1912{
1916 struct usbnet *usbdev = dev->priv; 1913 struct usbnet *usbdev = netdev_priv(dev);
1917 __le32 tmp; 1914 __le32 tmp;
1918 1915
1919 devdbg(usbdev, "SIOCSIWFRAG"); 1916 devdbg(usbdev, "SIOCSIWFRAG");
@@ -1927,7 +1924,7 @@ static int rndis_iw_set_frag(struct net_device *dev,
1927static int rndis_iw_get_frag(struct net_device *dev, 1924static int rndis_iw_get_frag(struct net_device *dev,
1928 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1925 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1929{ 1926{
1930 struct usbnet *usbdev = dev->priv; 1927 struct usbnet *usbdev = netdev_priv(dev);
1931 __le32 tmp; 1928 __le32 tmp;
1932 int len, ret; 1929 int len, ret;
1933 1930
@@ -1947,7 +1944,7 @@ static int rndis_iw_get_frag(struct net_device *dev,
1947static int rndis_iw_set_nick(struct net_device *dev, 1944static int rndis_iw_set_nick(struct net_device *dev,
1948 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1945 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1949{ 1946{
1950 struct usbnet *usbdev = dev->priv; 1947 struct usbnet *usbdev = netdev_priv(dev);
1951 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1948 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1952 1949
1953 devdbg(usbdev, "SIOCSIWNICK"); 1950 devdbg(usbdev, "SIOCSIWNICK");
@@ -1964,7 +1961,7 @@ static int rndis_iw_set_nick(struct net_device *dev,
1964static int rndis_iw_get_nick(struct net_device *dev, 1961static int rndis_iw_get_nick(struct net_device *dev,
1965 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1962 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1966{ 1963{
1967 struct usbnet *usbdev = dev->priv; 1964 struct usbnet *usbdev = netdev_priv(dev);
1968 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1965 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1969 1966
1970 wrqu->data.flags = 1; 1967 wrqu->data.flags = 1;
@@ -1980,7 +1977,7 @@ static int rndis_iw_get_nick(struct net_device *dev,
1980static int rndis_iw_set_freq(struct net_device *dev, 1977static int rndis_iw_set_freq(struct net_device *dev,
1981 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1978 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1982{ 1979{
1983 struct usbnet *usbdev = dev->priv; 1980 struct usbnet *usbdev = netdev_priv(dev);
1984 struct ndis_80211_conf config; 1981 struct ndis_80211_conf config;
1985 unsigned int dsconfig; 1982 unsigned int dsconfig;
1986 int len, ret; 1983 int len, ret;
@@ -2011,7 +2008,7 @@ static int rndis_iw_set_freq(struct net_device *dev,
2011static int rndis_iw_get_freq(struct net_device *dev, 2008static int rndis_iw_get_freq(struct net_device *dev,
2012 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 2009 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
2013{ 2010{
2014 struct usbnet *usbdev = dev->priv; 2011 struct usbnet *usbdev = netdev_priv(dev);
2015 struct ndis_80211_conf config; 2012 struct ndis_80211_conf config;
2016 int len, ret; 2013 int len, ret;
2017 2014
@@ -2028,7 +2025,7 @@ static int rndis_iw_get_freq(struct net_device *dev,
2028static int rndis_iw_get_txpower(struct net_device *dev, 2025static int rndis_iw_get_txpower(struct net_device *dev,
2029 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 2026 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
2030{ 2027{
2031 struct usbnet *usbdev = dev->priv; 2028 struct usbnet *usbdev = netdev_priv(dev);
2032 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 2029 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
2033 __le32 tx_power; 2030 __le32 tx_power;
2034 int ret = 0, len; 2031 int ret = 0, len;
@@ -2062,7 +2059,7 @@ static int rndis_iw_get_txpower(struct net_device *dev,
2062static int rndis_iw_set_txpower(struct net_device *dev, 2059static int rndis_iw_set_txpower(struct net_device *dev,
2063 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 2060 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
2064{ 2061{
2065 struct usbnet *usbdev = dev->priv; 2062 struct usbnet *usbdev = netdev_priv(dev);
2066 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 2063 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
2067 __le32 tx_power = 0; 2064 __le32 tx_power = 0;
2068 int ret = 0; 2065 int ret = 0;
@@ -2114,7 +2111,7 @@ static int rndis_iw_set_txpower(struct net_device *dev,
2114static int rndis_iw_get_rate(struct net_device *dev, 2111static int rndis_iw_get_rate(struct net_device *dev,
2115 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 2112 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
2116{ 2113{
2117 struct usbnet *usbdev = dev->priv; 2114 struct usbnet *usbdev = netdev_priv(dev);
2118 __le32 tmp; 2115 __le32 tmp;
2119 int ret, len; 2116 int ret, len;
2120 2117
@@ -2132,7 +2129,7 @@ static int rndis_iw_get_rate(struct net_device *dev,
2132static int rndis_iw_set_mlme(struct net_device *dev, 2129static int rndis_iw_set_mlme(struct net_device *dev,
2133 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 2130 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
2134{ 2131{
2135 struct usbnet *usbdev = dev->priv; 2132 struct usbnet *usbdev = netdev_priv(dev);
2136 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 2133 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
2137 struct iw_mlme *mlme = (struct iw_mlme *)extra; 2134 struct iw_mlme *mlme = (struct iw_mlme *)extra;
2138 unsigned char bssid[ETH_ALEN]; 2135 unsigned char bssid[ETH_ALEN];
@@ -2157,7 +2154,7 @@ static int rndis_iw_set_mlme(struct net_device *dev,
2157 2154
2158static struct iw_statistics *rndis_get_wireless_stats(struct net_device *dev) 2155static struct iw_statistics *rndis_get_wireless_stats(struct net_device *dev)
2159{ 2156{
2160 struct usbnet *usbdev = dev->priv; 2157 struct usbnet *usbdev = netdev_priv(dev);
2161 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 2158 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
2162 unsigned long flags; 2159 unsigned long flags;
2163 2160
@@ -2287,7 +2284,7 @@ get_bssid:
2287 2284
2288static void rndis_wext_set_multicast_list(struct net_device *dev) 2285static void rndis_wext_set_multicast_list(struct net_device *dev)
2289{ 2286{
2290 struct usbnet *usbdev = dev->priv; 2287 struct usbnet *usbdev = netdev_priv(dev);
2291 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 2288 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
2292 2289
2293 if (test_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending)) 2290 if (test_bit(WORK_SET_MULTICAST_LIST, &priv->work_pending))
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 95511ac22470..178b313293b4 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -57,6 +57,7 @@ config RT2500USB
57 tristate "Ralink rt2500 (USB) support" 57 tristate "Ralink rt2500 (USB) support"
58 depends on USB 58 depends on USB
59 select RT2X00_LIB_USB 59 select RT2X00_LIB_USB
60 select RT2X00_LIB_CRYPTO
60 ---help--- 61 ---help---
61 This adds support for rt2500 wireless chipset family. 62 This adds support for rt2500 wireless chipset family.
62 Supported chips: RT2571 & RT2572. 63 Supported chips: RT2571 & RT2572.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 08cb9eec16a6..6a977679124d 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -49,45 +49,33 @@
49 * the access attempt is considered to have failed, 49 * the access attempt is considered to have failed,
50 * and we will print an error. 50 * and we will print an error.
51 */ 51 */
52static u32 rt2400pci_bbp_check(struct rt2x00_dev *rt2x00dev) 52#define WAIT_FOR_BBP(__dev, __reg) \
53{ 53 rt2x00pci_regbusy_read((__dev), BBPCSR, BBPCSR_BUSY, (__reg))
54 u32 reg; 54#define WAIT_FOR_RF(__dev, __reg) \
55 unsigned int i; 55 rt2x00pci_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg))
56
57 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
58 rt2x00pci_register_read(rt2x00dev, BBPCSR, &reg);
59 if (!rt2x00_get_field32(reg, BBPCSR_BUSY))
60 break;
61 udelay(REGISTER_BUSY_DELAY);
62 }
63
64 return reg;
65}
66 56
67static void rt2400pci_bbp_write(struct rt2x00_dev *rt2x00dev, 57static void rt2400pci_bbp_write(struct rt2x00_dev *rt2x00dev,
68 const unsigned int word, const u8 value) 58 const unsigned int word, const u8 value)
69{ 59{
70 u32 reg; 60 u32 reg;
71 61
72 /* 62 mutex_lock(&rt2x00dev->csr_mutex);
73 * Wait until the BBP becomes ready.
74 */
75 reg = rt2400pci_bbp_check(rt2x00dev);
76 if (rt2x00_get_field32(reg, BBPCSR_BUSY)) {
77 ERROR(rt2x00dev, "BBPCSR register busy. Write failed.\n");
78 return;
79 }
80 63
81 /* 64 /*
82 * Write the data into the BBP. 65 * Wait until the BBP becomes available, afterwards we
66 * can safely write the new data into the register.
83 */ 67 */
84 reg = 0; 68 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
85 rt2x00_set_field32(&reg, BBPCSR_VALUE, value); 69 reg = 0;
86 rt2x00_set_field32(&reg, BBPCSR_REGNUM, word); 70 rt2x00_set_field32(&reg, BBPCSR_VALUE, value);
87 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); 71 rt2x00_set_field32(&reg, BBPCSR_REGNUM, word);
88 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1); 72 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
73 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1);
74
75 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg);
76 }
89 77
90 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); 78 mutex_unlock(&rt2x00dev->csr_mutex);
91} 79}
92 80
93static void rt2400pci_bbp_read(struct rt2x00_dev *rt2x00dev, 81static void rt2400pci_bbp_read(struct rt2x00_dev *rt2x00dev,
@@ -95,66 +83,58 @@ static void rt2400pci_bbp_read(struct rt2x00_dev *rt2x00dev,
95{ 83{
96 u32 reg; 84 u32 reg;
97 85
98 /* 86 mutex_lock(&rt2x00dev->csr_mutex);
99 * Wait until the BBP becomes ready.
100 */
101 reg = rt2400pci_bbp_check(rt2x00dev);
102 if (rt2x00_get_field32(reg, BBPCSR_BUSY)) {
103 ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n");
104 return;
105 }
106 87
107 /* 88 /*
108 * Write the request into the BBP. 89 * Wait until the BBP becomes available, afterwards we
90 * can safely write the read request into the register.
91 * After the data has been written, we wait until hardware
92 * returns the correct value, if at any time the register
93 * doesn't become available in time, reg will be 0xffffffff
94 * which means we return 0xff to the caller.
109 */ 95 */
110 reg = 0; 96 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
111 rt2x00_set_field32(&reg, BBPCSR_REGNUM, word); 97 reg = 0;
112 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); 98 rt2x00_set_field32(&reg, BBPCSR_REGNUM, word);
113 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0); 99 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
100 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0);
114 101
115 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); 102 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg);
116 103
117 /* 104 WAIT_FOR_BBP(rt2x00dev, &reg);
118 * Wait until the BBP becomes ready.
119 */
120 reg = rt2400pci_bbp_check(rt2x00dev);
121 if (rt2x00_get_field32(reg, BBPCSR_BUSY)) {
122 ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n");
123 *value = 0xff;
124 return;
125 } 105 }
126 106
127 *value = rt2x00_get_field32(reg, BBPCSR_VALUE); 107 *value = rt2x00_get_field32(reg, BBPCSR_VALUE);
108
109 mutex_unlock(&rt2x00dev->csr_mutex);
128} 110}
129 111
130static void rt2400pci_rf_write(struct rt2x00_dev *rt2x00dev, 112static void rt2400pci_rf_write(struct rt2x00_dev *rt2x00dev,
131 const unsigned int word, const u32 value) 113 const unsigned int word, const u32 value)
132{ 114{
133 u32 reg; 115 u32 reg;
134 unsigned int i;
135 116
136 if (!word) 117 if (!word)
137 return; 118 return;
138 119
139 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 120 mutex_lock(&rt2x00dev->csr_mutex);
140 rt2x00pci_register_read(rt2x00dev, RFCSR, &reg);
141 if (!rt2x00_get_field32(reg, RFCSR_BUSY))
142 goto rf_write;
143 udelay(REGISTER_BUSY_DELAY);
144 }
145
146 ERROR(rt2x00dev, "RFCSR register busy. Write failed.\n");
147 return;
148 121
149rf_write: 122 /*
150 reg = 0; 123 * Wait until the RF becomes available, afterwards we
151 rt2x00_set_field32(&reg, RFCSR_VALUE, value); 124 * can safely write the new data into the register.
152 rt2x00_set_field32(&reg, RFCSR_NUMBER_OF_BITS, 20); 125 */
153 rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0); 126 if (WAIT_FOR_RF(rt2x00dev, &reg)) {
154 rt2x00_set_field32(&reg, RFCSR_BUSY, 1); 127 reg = 0;
128 rt2x00_set_field32(&reg, RFCSR_VALUE, value);
129 rt2x00_set_field32(&reg, RFCSR_NUMBER_OF_BITS, 20);
130 rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0);
131 rt2x00_set_field32(&reg, RFCSR_BUSY, 1);
132
133 rt2x00pci_register_write(rt2x00dev, RFCSR, reg);
134 rt2x00_rf_write(rt2x00dev, word, value);
135 }
155 136
156 rt2x00pci_register_write(rt2x00dev, RFCSR, reg); 137 mutex_unlock(&rt2x00dev->csr_mutex);
157 rt2x00_rf_write(rt2x00dev, word, value);
158} 138}
159 139
160static void rt2400pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 140static void rt2400pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -188,43 +168,34 @@ static void rt2400pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
188} 168}
189 169
190#ifdef CONFIG_RT2X00_LIB_DEBUGFS 170#ifdef CONFIG_RT2X00_LIB_DEBUGFS
191#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) )
192
193static void rt2400pci_read_csr(struct rt2x00_dev *rt2x00dev,
194 const unsigned int word, u32 *data)
195{
196 rt2x00pci_register_read(rt2x00dev, CSR_OFFSET(word), data);
197}
198
199static void rt2400pci_write_csr(struct rt2x00_dev *rt2x00dev,
200 const unsigned int word, u32 data)
201{
202 rt2x00pci_register_write(rt2x00dev, CSR_OFFSET(word), data);
203}
204
205static const struct rt2x00debug rt2400pci_rt2x00debug = { 171static const struct rt2x00debug rt2400pci_rt2x00debug = {
206 .owner = THIS_MODULE, 172 .owner = THIS_MODULE,
207 .csr = { 173 .csr = {
208 .read = rt2400pci_read_csr, 174 .read = rt2x00pci_register_read,
209 .write = rt2400pci_write_csr, 175 .write = rt2x00pci_register_write,
176 .flags = RT2X00DEBUGFS_OFFSET,
177 .word_base = CSR_REG_BASE,
210 .word_size = sizeof(u32), 178 .word_size = sizeof(u32),
211 .word_count = CSR_REG_SIZE / sizeof(u32), 179 .word_count = CSR_REG_SIZE / sizeof(u32),
212 }, 180 },
213 .eeprom = { 181 .eeprom = {
214 .read = rt2x00_eeprom_read, 182 .read = rt2x00_eeprom_read,
215 .write = rt2x00_eeprom_write, 183 .write = rt2x00_eeprom_write,
184 .word_base = EEPROM_BASE,
216 .word_size = sizeof(u16), 185 .word_size = sizeof(u16),
217 .word_count = EEPROM_SIZE / sizeof(u16), 186 .word_count = EEPROM_SIZE / sizeof(u16),
218 }, 187 },
219 .bbp = { 188 .bbp = {
220 .read = rt2400pci_bbp_read, 189 .read = rt2400pci_bbp_read,
221 .write = rt2400pci_bbp_write, 190 .write = rt2400pci_bbp_write,
191 .word_base = BBP_BASE,
222 .word_size = sizeof(u8), 192 .word_size = sizeof(u8),
223 .word_count = BBP_SIZE / sizeof(u8), 193 .word_count = BBP_SIZE / sizeof(u8),
224 }, 194 },
225 .rf = { 195 .rf = {
226 .read = rt2x00_rf_read, 196 .read = rt2x00_rf_read,
227 .write = rt2400pci_rf_write, 197 .write = rt2400pci_rf_write,
198 .word_base = RF_BASE,
228 .word_size = sizeof(u32), 199 .word_size = sizeof(u32),
229 .word_count = RF_SIZE / sizeof(u32), 200 .word_count = RF_SIZE / sizeof(u32),
230 }, 201 },
@@ -331,7 +302,7 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
331 /* 302 /*
332 * Enable beacon config 303 * Enable beacon config
333 */ 304 */
334 bcn_preload = PREAMBLE + get_duration(IEEE80211_HEADER, 20); 305 bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20);
335 rt2x00pci_register_read(rt2x00dev, BCNCSR1, &reg); 306 rt2x00pci_register_read(rt2x00dev, BCNCSR1, &reg);
336 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload); 307 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload);
337 rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg); 308 rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg);
@@ -376,32 +347,94 @@ static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
376 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 347 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
377 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00); 348 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00);
378 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); 349 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04);
379 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10)); 350 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 10));
380 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); 351 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg);
381 352
382 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg); 353 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg);
383 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask); 354 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask);
384 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04); 355 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04);
385 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 20)); 356 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 20));
386 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); 357 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg);
387 358
388 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg); 359 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg);
389 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask); 360 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask);
390 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04); 361 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04);
391 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 55)); 362 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 55));
392 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); 363 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg);
393 364
394 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg); 365 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg);
395 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask); 366 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask);
396 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84); 367 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84);
397 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 110)); 368 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 110));
398 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); 369 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg);
370
371 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates);
372
373 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
374 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time);
375 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
376
377 rt2x00pci_register_read(rt2x00dev, CSR18, &reg);
378 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs);
379 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs);
380 rt2x00pci_register_write(rt2x00dev, CSR18, reg);
381
382 rt2x00pci_register_read(rt2x00dev, CSR19, &reg);
383 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs);
384 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs);
385 rt2x00pci_register_write(rt2x00dev, CSR19, reg);
399} 386}
400 387
401static void rt2400pci_config_phymode(struct rt2x00_dev *rt2x00dev, 388static void rt2400pci_config_ant(struct rt2x00_dev *rt2x00dev,
402 const int basic_rate_mask) 389 struct antenna_setup *ant)
403{ 390{
404 rt2x00pci_register_write(rt2x00dev, ARCSR1, basic_rate_mask); 391 u8 r1;
392 u8 r4;
393
394 /*
395 * We should never come here because rt2x00lib is supposed
396 * to catch this and send us the correct antenna explicitely.
397 */
398 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
399 ant->tx == ANTENNA_SW_DIVERSITY);
400
401 rt2400pci_bbp_read(rt2x00dev, 4, &r4);
402 rt2400pci_bbp_read(rt2x00dev, 1, &r1);
403
404 /*
405 * Configure the TX antenna.
406 */
407 switch (ant->tx) {
408 case ANTENNA_HW_DIVERSITY:
409 rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 1);
410 break;
411 case ANTENNA_A:
412 rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 0);
413 break;
414 case ANTENNA_B:
415 default:
416 rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 2);
417 break;
418 }
419
420 /*
421 * Configure the RX antenna.
422 */
423 switch (ant->rx) {
424 case ANTENNA_HW_DIVERSITY:
425 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1);
426 break;
427 case ANTENNA_A:
428 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 0);
429 break;
430 case ANTENNA_B:
431 default:
432 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2);
433 break;
434 }
435
436 rt2400pci_bbp_write(rt2x00dev, 4, r4);
437 rt2400pci_bbp_write(rt2x00dev, 1, r1);
405} 438}
406 439
407static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev, 440static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev,
@@ -460,56 +493,17 @@ static void rt2400pci_config_txpower(struct rt2x00_dev *rt2x00dev, int txpower)
460 rt2400pci_bbp_write(rt2x00dev, 3, TXPOWER_TO_DEV(txpower)); 493 rt2400pci_bbp_write(rt2x00dev, 3, TXPOWER_TO_DEV(txpower));
461} 494}
462 495
463static void rt2400pci_config_antenna(struct rt2x00_dev *rt2x00dev, 496static void rt2400pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
464 struct antenna_setup *ant) 497 struct rt2x00lib_conf *libconf)
465{ 498{
466 u8 r1; 499 u32 reg;
467 u8 r4;
468
469 /*
470 * We should never come here because rt2x00lib is supposed
471 * to catch this and send us the correct antenna explicitely.
472 */
473 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
474 ant->tx == ANTENNA_SW_DIVERSITY);
475
476 rt2400pci_bbp_read(rt2x00dev, 4, &r4);
477 rt2400pci_bbp_read(rt2x00dev, 1, &r1);
478
479 /*
480 * Configure the TX antenna.
481 */
482 switch (ant->tx) {
483 case ANTENNA_HW_DIVERSITY:
484 rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 1);
485 break;
486 case ANTENNA_A:
487 rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 0);
488 break;
489 case ANTENNA_B:
490 default:
491 rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 2);
492 break;
493 }
494
495 /*
496 * Configure the RX antenna.
497 */
498 switch (ant->rx) {
499 case ANTENNA_HW_DIVERSITY:
500 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1);
501 break;
502 case ANTENNA_A:
503 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 0);
504 break;
505 case ANTENNA_B:
506 default:
507 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2);
508 break;
509 }
510 500
511 rt2400pci_bbp_write(rt2x00dev, 4, r4); 501 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
512 rt2400pci_bbp_write(rt2x00dev, 1, r1); 502 rt2x00_set_field32(&reg, CSR11_LONG_RETRY,
503 libconf->conf->long_frame_max_tx_count);
504 rt2x00_set_field32(&reg, CSR11_SHORT_RETRY,
505 libconf->conf->short_frame_max_tx_count);
506 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
513} 507}
514 508
515static void rt2400pci_config_duration(struct rt2x00_dev *rt2x00dev, 509static void rt2400pci_config_duration(struct rt2x00_dev *rt2x00dev,
@@ -517,20 +511,6 @@ static void rt2400pci_config_duration(struct rt2x00_dev *rt2x00dev,
517{ 511{
518 u32 reg; 512 u32 reg;
519 513
520 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
521 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, libconf->slot_time);
522 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
523
524 rt2x00pci_register_read(rt2x00dev, CSR18, &reg);
525 rt2x00_set_field32(&reg, CSR18_SIFS, libconf->sifs);
526 rt2x00_set_field32(&reg, CSR18_PIFS, libconf->pifs);
527 rt2x00pci_register_write(rt2x00dev, CSR18, reg);
528
529 rt2x00pci_register_read(rt2x00dev, CSR19, &reg);
530 rt2x00_set_field32(&reg, CSR19_DIFS, libconf->difs);
531 rt2x00_set_field32(&reg, CSR19_EIFS, libconf->eifs);
532 rt2x00pci_register_write(rt2x00dev, CSR19, reg);
533
534 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg); 514 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
535 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); 515 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
536 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1); 516 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
@@ -548,16 +528,14 @@ static void rt2400pci_config(struct rt2x00_dev *rt2x00dev,
548 struct rt2x00lib_conf *libconf, 528 struct rt2x00lib_conf *libconf,
549 const unsigned int flags) 529 const unsigned int flags)
550{ 530{
551 if (flags & CONFIG_UPDATE_PHYMODE) 531 if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
552 rt2400pci_config_phymode(rt2x00dev, libconf->basic_rates);
553 if (flags & CONFIG_UPDATE_CHANNEL)
554 rt2400pci_config_channel(rt2x00dev, &libconf->rf); 532 rt2400pci_config_channel(rt2x00dev, &libconf->rf);
555 if (flags & CONFIG_UPDATE_TXPOWER) 533 if (flags & IEEE80211_CONF_CHANGE_POWER)
556 rt2400pci_config_txpower(rt2x00dev, 534 rt2400pci_config_txpower(rt2x00dev,
557 libconf->conf->power_level); 535 libconf->conf->power_level);
558 if (flags & CONFIG_UPDATE_ANTENNA) 536 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
559 rt2400pci_config_antenna(rt2x00dev, &libconf->ant); 537 rt2400pci_config_retry_limit(rt2x00dev, libconf);
560 if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) 538 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
561 rt2400pci_config_duration(rt2x00dev, libconf); 539 rt2400pci_config_duration(rt2x00dev, libconf);
562} 540}
563 541
@@ -628,36 +606,47 @@ static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev)
628/* 606/*
629 * Initialization functions. 607 * Initialization functions.
630 */ 608 */
631static void rt2400pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 609static bool rt2400pci_get_entry_state(struct queue_entry *entry)
632 struct queue_entry *entry)
633{ 610{
634 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 611 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
635 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
636 u32 word; 612 u32 word;
637 613
638 rt2x00_desc_read(entry_priv->desc, 2, &word); 614 if (entry->queue->qid == QID_RX) {
639 rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, entry->skb->len); 615 rt2x00_desc_read(entry_priv->desc, 0, &word);
640 rt2x00_desc_write(entry_priv->desc, 2, word);
641 616
642 rt2x00_desc_read(entry_priv->desc, 1, &word); 617 return rt2x00_get_field32(word, RXD_W0_OWNER_NIC);
643 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); 618 } else {
644 rt2x00_desc_write(entry_priv->desc, 1, word); 619 rt2x00_desc_read(entry_priv->desc, 0, &word);
645 620
646 rt2x00_desc_read(entry_priv->desc, 0, &word); 621 return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
647 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 622 rt2x00_get_field32(word, TXD_W0_VALID));
648 rt2x00_desc_write(entry_priv->desc, 0, word); 623 }
649} 624}
650 625
651static void rt2400pci_init_txentry(struct rt2x00_dev *rt2x00dev, 626static void rt2400pci_clear_entry(struct queue_entry *entry)
652 struct queue_entry *entry)
653{ 627{
654 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 628 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
629 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
655 u32 word; 630 u32 word;
656 631
657 rt2x00_desc_read(entry_priv->desc, 0, &word); 632 if (entry->queue->qid == QID_RX) {
658 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 633 rt2x00_desc_read(entry_priv->desc, 2, &word);
659 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 634 rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, entry->skb->len);
660 rt2x00_desc_write(entry_priv->desc, 0, word); 635 rt2x00_desc_write(entry_priv->desc, 2, word);
636
637 rt2x00_desc_read(entry_priv->desc, 1, &word);
638 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
639 rt2x00_desc_write(entry_priv->desc, 1, word);
640
641 rt2x00_desc_read(entry_priv->desc, 0, &word);
642 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
643 rt2x00_desc_write(entry_priv->desc, 0, word);
644 } else {
645 rt2x00_desc_read(entry_priv->desc, 0, &word);
646 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
647 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
648 rt2x00_desc_write(entry_priv->desc, 0, word);
649 }
661} 650}
662 651
663static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev) 652static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
@@ -1313,10 +1302,8 @@ static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1313 */ 1302 */
1314 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1303 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1315 if (!is_valid_ether_addr(mac)) { 1304 if (!is_valid_ether_addr(mac)) {
1316 DECLARE_MAC_BUF(macbuf);
1317
1318 random_ether_addr(mac); 1305 random_ether_addr(mac);
1319 EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); 1306 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
1320 } 1307 }
1321 1308
1322 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); 1309 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
@@ -1504,20 +1491,6 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1504/* 1491/*
1505 * IEEE80211 stack callback functions. 1492 * IEEE80211 stack callback functions.
1506 */ 1493 */
1507static int rt2400pci_set_retry_limit(struct ieee80211_hw *hw,
1508 u32 short_retry, u32 long_retry)
1509{
1510 struct rt2x00_dev *rt2x00dev = hw->priv;
1511 u32 reg;
1512
1513 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
1514 rt2x00_set_field32(&reg, CSR11_LONG_RETRY, long_retry);
1515 rt2x00_set_field32(&reg, CSR11_SHORT_RETRY, short_retry);
1516 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
1517
1518 return 0;
1519}
1520
1521static int rt2400pci_conf_tx(struct ieee80211_hw *hw, u16 queue, 1494static int rt2400pci_conf_tx(struct ieee80211_hw *hw, u16 queue,
1522 const struct ieee80211_tx_queue_params *params) 1495 const struct ieee80211_tx_queue_params *params)
1523{ 1496{
@@ -1576,7 +1549,6 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
1576 .config_interface = rt2x00mac_config_interface, 1549 .config_interface = rt2x00mac_config_interface,
1577 .configure_filter = rt2x00mac_configure_filter, 1550 .configure_filter = rt2x00mac_configure_filter,
1578 .get_stats = rt2x00mac_get_stats, 1551 .get_stats = rt2x00mac_get_stats,
1579 .set_retry_limit = rt2400pci_set_retry_limit,
1580 .bss_info_changed = rt2x00mac_bss_info_changed, 1552 .bss_info_changed = rt2x00mac_bss_info_changed,
1581 .conf_tx = rt2400pci_conf_tx, 1553 .conf_tx = rt2400pci_conf_tx,
1582 .get_tx_stats = rt2x00mac_get_tx_stats, 1554 .get_tx_stats = rt2x00mac_get_tx_stats,
@@ -1589,8 +1561,8 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1589 .probe_hw = rt2400pci_probe_hw, 1561 .probe_hw = rt2400pci_probe_hw,
1590 .initialize = rt2x00pci_initialize, 1562 .initialize = rt2x00pci_initialize,
1591 .uninitialize = rt2x00pci_uninitialize, 1563 .uninitialize = rt2x00pci_uninitialize,
1592 .init_rxentry = rt2400pci_init_rxentry, 1564 .get_entry_state = rt2400pci_get_entry_state,
1593 .init_txentry = rt2400pci_init_txentry, 1565 .clear_entry = rt2400pci_clear_entry,
1594 .set_device_state = rt2400pci_set_device_state, 1566 .set_device_state = rt2400pci_set_device_state,
1595 .rfkill_poll = rt2400pci_rfkill_poll, 1567 .rfkill_poll = rt2400pci_rfkill_poll,
1596 .link_stats = rt2400pci_link_stats, 1568 .link_stats = rt2400pci_link_stats,
@@ -1604,6 +1576,7 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1604 .config_filter = rt2400pci_config_filter, 1576 .config_filter = rt2400pci_config_filter,
1605 .config_intf = rt2400pci_config_intf, 1577 .config_intf = rt2400pci_config_intf,
1606 .config_erp = rt2400pci_config_erp, 1578 .config_erp = rt2400pci_config_erp,
1579 .config_ant = rt2400pci_config_ant,
1607 .config = rt2400pci_config, 1580 .config = rt2400pci_config,
1608}; 1581};
1609 1582
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index bbff381ce396..9aefda4ab3c2 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -46,7 +46,9 @@
46#define CSR_REG_SIZE 0x014c 46#define CSR_REG_SIZE 0x014c
47#define EEPROM_BASE 0x0000 47#define EEPROM_BASE 0x0000
48#define EEPROM_SIZE 0x0100 48#define EEPROM_SIZE 0x0100
49#define BBP_BASE 0x0000
49#define BBP_SIZE 0x0020 50#define BBP_SIZE 0x0020
51#define RF_BASE 0x0000
50#define RF_SIZE 0x0010 52#define RF_SIZE 0x0010
51 53
52/* 54/*
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index ef42cc04a2d7..d3bc218ec85c 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -49,45 +49,33 @@
49 * the access attempt is considered to have failed, 49 * the access attempt is considered to have failed,
50 * and we will print an error. 50 * and we will print an error.
51 */ 51 */
52static u32 rt2500pci_bbp_check(struct rt2x00_dev *rt2x00dev) 52#define WAIT_FOR_BBP(__dev, __reg) \
53{ 53 rt2x00pci_regbusy_read((__dev), BBPCSR, BBPCSR_BUSY, (__reg))
54 u32 reg; 54#define WAIT_FOR_RF(__dev, __reg) \
55 unsigned int i; 55 rt2x00pci_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg))
56
57 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
58 rt2x00pci_register_read(rt2x00dev, BBPCSR, &reg);
59 if (!rt2x00_get_field32(reg, BBPCSR_BUSY))
60 break;
61 udelay(REGISTER_BUSY_DELAY);
62 }
63
64 return reg;
65}
66 56
67static void rt2500pci_bbp_write(struct rt2x00_dev *rt2x00dev, 57static void rt2500pci_bbp_write(struct rt2x00_dev *rt2x00dev,
68 const unsigned int word, const u8 value) 58 const unsigned int word, const u8 value)
69{ 59{
70 u32 reg; 60 u32 reg;
71 61
72 /* 62 mutex_lock(&rt2x00dev->csr_mutex);
73 * Wait until the BBP becomes ready.
74 */
75 reg = rt2500pci_bbp_check(rt2x00dev);
76 if (rt2x00_get_field32(reg, BBPCSR_BUSY)) {
77 ERROR(rt2x00dev, "BBPCSR register busy. Write failed.\n");
78 return;
79 }
80 63
81 /* 64 /*
82 * Write the data into the BBP. 65 * Wait until the BBP becomes available, afterwards we
66 * can safely write the new data into the register.
83 */ 67 */
84 reg = 0; 68 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
85 rt2x00_set_field32(&reg, BBPCSR_VALUE, value); 69 reg = 0;
86 rt2x00_set_field32(&reg, BBPCSR_REGNUM, word); 70 rt2x00_set_field32(&reg, BBPCSR_VALUE, value);
87 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); 71 rt2x00_set_field32(&reg, BBPCSR_REGNUM, word);
88 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1); 72 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
73 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1);
74
75 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg);
76 }
89 77
90 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); 78 mutex_unlock(&rt2x00dev->csr_mutex);
91} 79}
92 80
93static void rt2500pci_bbp_read(struct rt2x00_dev *rt2x00dev, 81static void rt2500pci_bbp_read(struct rt2x00_dev *rt2x00dev,
@@ -95,66 +83,58 @@ static void rt2500pci_bbp_read(struct rt2x00_dev *rt2x00dev,
95{ 83{
96 u32 reg; 84 u32 reg;
97 85
98 /* 86 mutex_lock(&rt2x00dev->csr_mutex);
99 * Wait until the BBP becomes ready.
100 */
101 reg = rt2500pci_bbp_check(rt2x00dev);
102 if (rt2x00_get_field32(reg, BBPCSR_BUSY)) {
103 ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n");
104 return;
105 }
106 87
107 /* 88 /*
108 * Write the request into the BBP. 89 * Wait until the BBP becomes available, afterwards we
90 * can safely write the read request into the register.
91 * After the data has been written, we wait until hardware
92 * returns the correct value, if at any time the register
93 * doesn't become available in time, reg will be 0xffffffff
94 * which means we return 0xff to the caller.
109 */ 95 */
110 reg = 0; 96 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
111 rt2x00_set_field32(&reg, BBPCSR_REGNUM, word); 97 reg = 0;
112 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); 98 rt2x00_set_field32(&reg, BBPCSR_REGNUM, word);
113 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0); 99 rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
100 rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0);
114 101
115 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg); 102 rt2x00pci_register_write(rt2x00dev, BBPCSR, reg);
116 103
117 /* 104 WAIT_FOR_BBP(rt2x00dev, &reg);
118 * Wait until the BBP becomes ready.
119 */
120 reg = rt2500pci_bbp_check(rt2x00dev);
121 if (rt2x00_get_field32(reg, BBPCSR_BUSY)) {
122 ERROR(rt2x00dev, "BBPCSR register busy. Read failed.\n");
123 *value = 0xff;
124 return;
125 } 105 }
126 106
127 *value = rt2x00_get_field32(reg, BBPCSR_VALUE); 107 *value = rt2x00_get_field32(reg, BBPCSR_VALUE);
108
109 mutex_unlock(&rt2x00dev->csr_mutex);
128} 110}
129 111
130static void rt2500pci_rf_write(struct rt2x00_dev *rt2x00dev, 112static void rt2500pci_rf_write(struct rt2x00_dev *rt2x00dev,
131 const unsigned int word, const u32 value) 113 const unsigned int word, const u32 value)
132{ 114{
133 u32 reg; 115 u32 reg;
134 unsigned int i;
135 116
136 if (!word) 117 if (!word)
137 return; 118 return;
138 119
139 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 120 mutex_lock(&rt2x00dev->csr_mutex);
140 rt2x00pci_register_read(rt2x00dev, RFCSR, &reg);
141 if (!rt2x00_get_field32(reg, RFCSR_BUSY))
142 goto rf_write;
143 udelay(REGISTER_BUSY_DELAY);
144 }
145
146 ERROR(rt2x00dev, "RFCSR register busy. Write failed.\n");
147 return;
148 121
149rf_write: 122 /*
150 reg = 0; 123 * Wait until the RF becomes available, afterwards we
151 rt2x00_set_field32(&reg, RFCSR_VALUE, value); 124 * can safely write the new data into the register.
152 rt2x00_set_field32(&reg, RFCSR_NUMBER_OF_BITS, 20); 125 */
153 rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0); 126 if (WAIT_FOR_RF(rt2x00dev, &reg)) {
154 rt2x00_set_field32(&reg, RFCSR_BUSY, 1); 127 reg = 0;
128 rt2x00_set_field32(&reg, RFCSR_VALUE, value);
129 rt2x00_set_field32(&reg, RFCSR_NUMBER_OF_BITS, 20);
130 rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0);
131 rt2x00_set_field32(&reg, RFCSR_BUSY, 1);
132
133 rt2x00pci_register_write(rt2x00dev, RFCSR, reg);
134 rt2x00_rf_write(rt2x00dev, word, value);
135 }
155 136
156 rt2x00pci_register_write(rt2x00dev, RFCSR, reg); 137 mutex_unlock(&rt2x00dev->csr_mutex);
157 rt2x00_rf_write(rt2x00dev, word, value);
158} 138}
159 139
160static void rt2500pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 140static void rt2500pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -188,43 +168,34 @@ static void rt2500pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
188} 168}
189 169
190#ifdef CONFIG_RT2X00_LIB_DEBUGFS 170#ifdef CONFIG_RT2X00_LIB_DEBUGFS
191#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) )
192
193static void rt2500pci_read_csr(struct rt2x00_dev *rt2x00dev,
194 const unsigned int word, u32 *data)
195{
196 rt2x00pci_register_read(rt2x00dev, CSR_OFFSET(word), data);
197}
198
199static void rt2500pci_write_csr(struct rt2x00_dev *rt2x00dev,
200 const unsigned int word, u32 data)
201{
202 rt2x00pci_register_write(rt2x00dev, CSR_OFFSET(word), data);
203}
204
205static const struct rt2x00debug rt2500pci_rt2x00debug = { 171static const struct rt2x00debug rt2500pci_rt2x00debug = {
206 .owner = THIS_MODULE, 172 .owner = THIS_MODULE,
207 .csr = { 173 .csr = {
208 .read = rt2500pci_read_csr, 174 .read = rt2x00pci_register_read,
209 .write = rt2500pci_write_csr, 175 .write = rt2x00pci_register_write,
176 .flags = RT2X00DEBUGFS_OFFSET,
177 .word_base = CSR_REG_BASE,
210 .word_size = sizeof(u32), 178 .word_size = sizeof(u32),
211 .word_count = CSR_REG_SIZE / sizeof(u32), 179 .word_count = CSR_REG_SIZE / sizeof(u32),
212 }, 180 },
213 .eeprom = { 181 .eeprom = {
214 .read = rt2x00_eeprom_read, 182 .read = rt2x00_eeprom_read,
215 .write = rt2x00_eeprom_write, 183 .write = rt2x00_eeprom_write,
184 .word_base = EEPROM_BASE,
216 .word_size = sizeof(u16), 185 .word_size = sizeof(u16),
217 .word_count = EEPROM_SIZE / sizeof(u16), 186 .word_count = EEPROM_SIZE / sizeof(u16),
218 }, 187 },
219 .bbp = { 188 .bbp = {
220 .read = rt2500pci_bbp_read, 189 .read = rt2500pci_bbp_read,
221 .write = rt2500pci_bbp_write, 190 .write = rt2500pci_bbp_write,
191 .word_base = BBP_BASE,
222 .word_size = sizeof(u8), 192 .word_size = sizeof(u8),
223 .word_count = BBP_SIZE / sizeof(u8), 193 .word_count = BBP_SIZE / sizeof(u8),
224 }, 194 },
225 .rf = { 195 .rf = {
226 .read = rt2x00_rf_read, 196 .read = rt2x00_rf_read,
227 .write = rt2500pci_rf_write, 197 .write = rt2500pci_rf_write,
198 .word_base = RF_BASE,
228 .word_size = sizeof(u32), 199 .word_size = sizeof(u32),
229 .word_count = RF_SIZE / sizeof(u32), 200 .word_count = RF_SIZE / sizeof(u32),
230 }, 201 },
@@ -336,7 +307,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
336 /* 307 /*
337 * Enable beacon config 308 * Enable beacon config
338 */ 309 */
339 bcn_preload = PREAMBLE + get_duration(IEEE80211_HEADER, 20); 310 bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20);
340 rt2x00pci_register_read(rt2x00dev, BCNCSR1, &reg); 311 rt2x00pci_register_read(rt2x00dev, BCNCSR1, &reg);
341 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload); 312 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload);
342 rt2x00_set_field32(&reg, BCNCSR1_BEACON_CWMIN, queue->cw_min); 313 rt2x00_set_field32(&reg, BCNCSR1_BEACON_CWMIN, queue->cw_min);
@@ -382,32 +353,114 @@ static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev,
382 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 353 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
383 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00); 354 rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00);
384 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); 355 rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04);
385 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 10)); 356 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 10));
386 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg); 357 rt2x00pci_register_write(rt2x00dev, ARCSR2, reg);
387 358
388 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg); 359 rt2x00pci_register_read(rt2x00dev, ARCSR3, &reg);
389 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask); 360 rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask);
390 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04); 361 rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04);
391 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 20)); 362 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 20));
392 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg); 363 rt2x00pci_register_write(rt2x00dev, ARCSR3, reg);
393 364
394 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg); 365 rt2x00pci_register_read(rt2x00dev, ARCSR4, &reg);
395 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask); 366 rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask);
396 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04); 367 rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04);
397 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 55)); 368 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 55));
398 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg); 369 rt2x00pci_register_write(rt2x00dev, ARCSR4, reg);
399 370
400 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg); 371 rt2x00pci_register_read(rt2x00dev, ARCSR5, &reg);
401 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask); 372 rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask);
402 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84); 373 rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84);
403 rt2x00_set_field32(&reg, ARCSR2_LENGTH, get_duration(ACK_SIZE, 110)); 374 rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 110));
404 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg); 375 rt2x00pci_register_write(rt2x00dev, ARCSR5, reg);
376
377 rt2x00pci_register_write(rt2x00dev, ARCSR1, erp->basic_rates);
378
379 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
380 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time);
381 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
382
383 rt2x00pci_register_read(rt2x00dev, CSR18, &reg);
384 rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs);
385 rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs);
386 rt2x00pci_register_write(rt2x00dev, CSR18, reg);
387
388 rt2x00pci_register_read(rt2x00dev, CSR19, &reg);
389 rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs);
390 rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs);
391 rt2x00pci_register_write(rt2x00dev, CSR19, reg);
405} 392}
406 393
407static void rt2500pci_config_phymode(struct rt2x00_dev *rt2x00dev, 394static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
408 const int basic_rate_mask) 395 struct antenna_setup *ant)
409{ 396{
410 rt2x00pci_register_write(rt2x00dev, ARCSR1, basic_rate_mask); 397 u32 reg;
398 u8 r14;
399 u8 r2;
400
401 /*
402 * We should never come here because rt2x00lib is supposed
403 * to catch this and send us the correct antenna explicitely.
404 */
405 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
406 ant->tx == ANTENNA_SW_DIVERSITY);
407
408 rt2x00pci_register_read(rt2x00dev, BBPCSR1, &reg);
409 rt2500pci_bbp_read(rt2x00dev, 14, &r14);
410 rt2500pci_bbp_read(rt2x00dev, 2, &r2);
411
412 /*
413 * Configure the TX antenna.
414 */
415 switch (ant->tx) {
416 case ANTENNA_A:
417 rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0);
418 rt2x00_set_field32(&reg, BBPCSR1_CCK, 0);
419 rt2x00_set_field32(&reg, BBPCSR1_OFDM, 0);
420 break;
421 case ANTENNA_B:
422 default:
423 rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2);
424 rt2x00_set_field32(&reg, BBPCSR1_CCK, 2);
425 rt2x00_set_field32(&reg, BBPCSR1_OFDM, 2);
426 break;
427 }
428
429 /*
430 * Configure the RX antenna.
431 */
432 switch (ant->rx) {
433 case ANTENNA_A:
434 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0);
435 break;
436 case ANTENNA_B:
437 default:
438 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2);
439 break;
440 }
441
442 /*
443 * RT2525E and RT5222 need to flip TX I/Q
444 */
445 if (rt2x00_rf(&rt2x00dev->chip, RF2525E) ||
446 rt2x00_rf(&rt2x00dev->chip, RF5222)) {
447 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
448 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1);
449 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1);
450
451 /*
452 * RT2525E does not need RX I/Q Flip.
453 */
454 if (rt2x00_rf(&rt2x00dev->chip, RF2525E))
455 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
456 } else {
457 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0);
458 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 0);
459 }
460
461 rt2x00pci_register_write(rt2x00dev, BBPCSR1, reg);
462 rt2500pci_bbp_write(rt2x00dev, 14, r14);
463 rt2500pci_bbp_write(rt2x00dev, 2, r2);
411} 464}
412 465
413static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev, 466static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
@@ -489,76 +542,17 @@ static void rt2500pci_config_txpower(struct rt2x00_dev *rt2x00dev,
489 rt2500pci_rf_write(rt2x00dev, 3, rf3); 542 rt2500pci_rf_write(rt2x00dev, 3, rf3);
490} 543}
491 544
492static void rt2500pci_config_antenna(struct rt2x00_dev *rt2x00dev, 545static void rt2500pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
493 struct antenna_setup *ant) 546 struct rt2x00lib_conf *libconf)
494{ 547{
495 u32 reg; 548 u32 reg;
496 u8 r14;
497 u8 r2;
498
499 /*
500 * We should never come here because rt2x00lib is supposed
501 * to catch this and send us the correct antenna explicitely.
502 */
503 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
504 ant->tx == ANTENNA_SW_DIVERSITY);
505
506 rt2x00pci_register_read(rt2x00dev, BBPCSR1, &reg);
507 rt2500pci_bbp_read(rt2x00dev, 14, &r14);
508 rt2500pci_bbp_read(rt2x00dev, 2, &r2);
509
510 /*
511 * Configure the TX antenna.
512 */
513 switch (ant->tx) {
514 case ANTENNA_A:
515 rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0);
516 rt2x00_set_field32(&reg, BBPCSR1_CCK, 0);
517 rt2x00_set_field32(&reg, BBPCSR1_OFDM, 0);
518 break;
519 case ANTENNA_B:
520 default:
521 rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2);
522 rt2x00_set_field32(&reg, BBPCSR1_CCK, 2);
523 rt2x00_set_field32(&reg, BBPCSR1_OFDM, 2);
524 break;
525 }
526
527 /*
528 * Configure the RX antenna.
529 */
530 switch (ant->rx) {
531 case ANTENNA_A:
532 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0);
533 break;
534 case ANTENNA_B:
535 default:
536 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2);
537 break;
538 }
539
540 /*
541 * RT2525E and RT5222 need to flip TX I/Q
542 */
543 if (rt2x00_rf(&rt2x00dev->chip, RF2525E) ||
544 rt2x00_rf(&rt2x00dev->chip, RF5222)) {
545 rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
546 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1);
547 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1);
548 549
549 /* 550 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
550 * RT2525E does not need RX I/Q Flip. 551 rt2x00_set_field32(&reg, CSR11_LONG_RETRY,
551 */ 552 libconf->conf->long_frame_max_tx_count);
552 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) 553 rt2x00_set_field32(&reg, CSR11_SHORT_RETRY,
553 rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); 554 libconf->conf->short_frame_max_tx_count);
554 } else { 555 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
555 rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0);
556 rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 0);
557 }
558
559 rt2x00pci_register_write(rt2x00dev, BBPCSR1, reg);
560 rt2500pci_bbp_write(rt2x00dev, 14, r14);
561 rt2500pci_bbp_write(rt2x00dev, 2, r2);
562} 556}
563 557
564static void rt2500pci_config_duration(struct rt2x00_dev *rt2x00dev, 558static void rt2500pci_config_duration(struct rt2x00_dev *rt2x00dev,
@@ -566,20 +560,6 @@ static void rt2500pci_config_duration(struct rt2x00_dev *rt2x00dev,
566{ 560{
567 u32 reg; 561 u32 reg;
568 562
569 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
570 rt2x00_set_field32(&reg, CSR11_SLOT_TIME, libconf->slot_time);
571 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
572
573 rt2x00pci_register_read(rt2x00dev, CSR18, &reg);
574 rt2x00_set_field32(&reg, CSR18_SIFS, libconf->sifs);
575 rt2x00_set_field32(&reg, CSR18_PIFS, libconf->pifs);
576 rt2x00pci_register_write(rt2x00dev, CSR18, reg);
577
578 rt2x00pci_register_read(rt2x00dev, CSR19, &reg);
579 rt2x00_set_field32(&reg, CSR19_DIFS, libconf->difs);
580 rt2x00_set_field32(&reg, CSR19_EIFS, libconf->eifs);
581 rt2x00pci_register_write(rt2x00dev, CSR19, reg);
582
583 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg); 563 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
584 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); 564 rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER);
585 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1); 565 rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
@@ -597,17 +577,16 @@ static void rt2500pci_config(struct rt2x00_dev *rt2x00dev,
597 struct rt2x00lib_conf *libconf, 577 struct rt2x00lib_conf *libconf,
598 const unsigned int flags) 578 const unsigned int flags)
599{ 579{
600 if (flags & CONFIG_UPDATE_PHYMODE) 580 if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
601 rt2500pci_config_phymode(rt2x00dev, libconf->basic_rates);
602 if (flags & CONFIG_UPDATE_CHANNEL)
603 rt2500pci_config_channel(rt2x00dev, &libconf->rf, 581 rt2500pci_config_channel(rt2x00dev, &libconf->rf,
604 libconf->conf->power_level); 582 libconf->conf->power_level);
605 if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) 583 if ((flags & IEEE80211_CONF_CHANGE_POWER) &&
584 !(flags & IEEE80211_CONF_CHANGE_CHANNEL))
606 rt2500pci_config_txpower(rt2x00dev, 585 rt2500pci_config_txpower(rt2x00dev,
607 libconf->conf->power_level); 586 libconf->conf->power_level);
608 if (flags & CONFIG_UPDATE_ANTENNA) 587 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
609 rt2500pci_config_antenna(rt2x00dev, &libconf->ant); 588 rt2500pci_config_retry_limit(rt2x00dev, libconf);
610 if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) 589 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
611 rt2500pci_config_duration(rt2x00dev, libconf); 590 rt2500pci_config_duration(rt2x00dev, libconf);
612} 591}
613 592
@@ -723,32 +702,43 @@ dynamic_cca_tune:
723/* 702/*
724 * Initialization functions. 703 * Initialization functions.
725 */ 704 */
726static void rt2500pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 705static bool rt2500pci_get_entry_state(struct queue_entry *entry)
727 struct queue_entry *entry)
728{ 706{
729 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 707 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
730 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
731 u32 word; 708 u32 word;
732 709
733 rt2x00_desc_read(entry_priv->desc, 1, &word); 710 if (entry->queue->qid == QID_RX) {
734 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); 711 rt2x00_desc_read(entry_priv->desc, 0, &word);
735 rt2x00_desc_write(entry_priv->desc, 1, word); 712
713 return rt2x00_get_field32(word, RXD_W0_OWNER_NIC);
714 } else {
715 rt2x00_desc_read(entry_priv->desc, 0, &word);
736 716
737 rt2x00_desc_read(entry_priv->desc, 0, &word); 717 return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
738 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 718 rt2x00_get_field32(word, TXD_W0_VALID));
739 rt2x00_desc_write(entry_priv->desc, 0, word); 719 }
740} 720}
741 721
742static void rt2500pci_init_txentry(struct rt2x00_dev *rt2x00dev, 722static void rt2500pci_clear_entry(struct queue_entry *entry)
743 struct queue_entry *entry)
744{ 723{
745 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 724 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
725 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
746 u32 word; 726 u32 word;
747 727
748 rt2x00_desc_read(entry_priv->desc, 0, &word); 728 if (entry->queue->qid == QID_RX) {
749 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 729 rt2x00_desc_read(entry_priv->desc, 1, &word);
750 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 730 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
751 rt2x00_desc_write(entry_priv->desc, 0, word); 731 rt2x00_desc_write(entry_priv->desc, 1, word);
732
733 rt2x00_desc_read(entry_priv->desc, 0, &word);
734 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
735 rt2x00_desc_write(entry_priv->desc, 0, word);
736 } else {
737 rt2x00_desc_read(entry_priv->desc, 0, &word);
738 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
739 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
740 rt2x00_desc_write(entry_priv->desc, 0, word);
741 }
752} 742}
753 743
754static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev) 744static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
@@ -1451,11 +1441,8 @@ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1451 */ 1441 */
1452 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1442 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1453 if (!is_valid_ether_addr(mac)) { 1443 if (!is_valid_ether_addr(mac)) {
1454 DECLARE_MAC_BUF(macbuf);
1455
1456 random_ether_addr(mac); 1444 random_ether_addr(mac);
1457 EEPROM(rt2x00dev, "MAC: %s\n", 1445 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
1458 print_mac(macbuf, mac));
1459 } 1446 }
1460 1447
1461 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); 1448 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
@@ -1830,20 +1817,6 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1830/* 1817/*
1831 * IEEE80211 stack callback functions. 1818 * IEEE80211 stack callback functions.
1832 */ 1819 */
1833static int rt2500pci_set_retry_limit(struct ieee80211_hw *hw,
1834 u32 short_retry, u32 long_retry)
1835{
1836 struct rt2x00_dev *rt2x00dev = hw->priv;
1837 u32 reg;
1838
1839 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
1840 rt2x00_set_field32(&reg, CSR11_LONG_RETRY, long_retry);
1841 rt2x00_set_field32(&reg, CSR11_SHORT_RETRY, short_retry);
1842 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
1843
1844 return 0;
1845}
1846
1847static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw) 1820static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw)
1848{ 1821{
1849 struct rt2x00_dev *rt2x00dev = hw->priv; 1822 struct rt2x00_dev *rt2x00dev = hw->priv;
@@ -1877,7 +1850,6 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
1877 .config_interface = rt2x00mac_config_interface, 1850 .config_interface = rt2x00mac_config_interface,
1878 .configure_filter = rt2x00mac_configure_filter, 1851 .configure_filter = rt2x00mac_configure_filter,
1879 .get_stats = rt2x00mac_get_stats, 1852 .get_stats = rt2x00mac_get_stats,
1880 .set_retry_limit = rt2500pci_set_retry_limit,
1881 .bss_info_changed = rt2x00mac_bss_info_changed, 1853 .bss_info_changed = rt2x00mac_bss_info_changed,
1882 .conf_tx = rt2x00mac_conf_tx, 1854 .conf_tx = rt2x00mac_conf_tx,
1883 .get_tx_stats = rt2x00mac_get_tx_stats, 1855 .get_tx_stats = rt2x00mac_get_tx_stats,
@@ -1890,8 +1862,8 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
1890 .probe_hw = rt2500pci_probe_hw, 1862 .probe_hw = rt2500pci_probe_hw,
1891 .initialize = rt2x00pci_initialize, 1863 .initialize = rt2x00pci_initialize,
1892 .uninitialize = rt2x00pci_uninitialize, 1864 .uninitialize = rt2x00pci_uninitialize,
1893 .init_rxentry = rt2500pci_init_rxentry, 1865 .get_entry_state = rt2500pci_get_entry_state,
1894 .init_txentry = rt2500pci_init_txentry, 1866 .clear_entry = rt2500pci_clear_entry,
1895 .set_device_state = rt2500pci_set_device_state, 1867 .set_device_state = rt2500pci_set_device_state,
1896 .rfkill_poll = rt2500pci_rfkill_poll, 1868 .rfkill_poll = rt2500pci_rfkill_poll,
1897 .link_stats = rt2500pci_link_stats, 1869 .link_stats = rt2500pci_link_stats,
@@ -1905,6 +1877,7 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
1905 .config_filter = rt2500pci_config_filter, 1877 .config_filter = rt2500pci_config_filter,
1906 .config_intf = rt2500pci_config_intf, 1878 .config_intf = rt2500pci_config_intf,
1907 .config_erp = rt2500pci_config_erp, 1879 .config_erp = rt2500pci_config_erp,
1880 .config_ant = rt2500pci_config_ant,
1908 .config = rt2500pci_config, 1881 .config = rt2500pci_config,
1909}; 1882};
1910 1883
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 8c26bef6cf49..e135247f7f89 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -57,7 +57,9 @@
57#define CSR_REG_SIZE 0x0174 57#define CSR_REG_SIZE 0x0174
58#define EEPROM_BASE 0x0000 58#define EEPROM_BASE 0x0000
59#define EEPROM_SIZE 0x0200 59#define EEPROM_SIZE 0x0200
60#define BBP_BASE 0x0000
60#define BBP_SIZE 0x0040 61#define BBP_SIZE 0x0040
62#define RF_BASE 0x0000
61#define RF_SIZE 0x0014 63#define RF_SIZE 0x0014
62 64
63/* 65/*
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index d3bf7bba611a..30028e2422fc 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -36,6 +36,13 @@
36#include "rt2500usb.h" 36#include "rt2500usb.h"
37 37
38/* 38/*
39 * Allow hardware encryption to be disabled.
40 */
41static int modparam_nohwcrypt = 1;
42module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
43MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
44
45/*
39 * Register access. 46 * Register access.
40 * All access to the CSR registers will go through the methods 47 * All access to the CSR registers will go through the methods
41 * rt2500usb_register_read and rt2500usb_register_write. 48 * rt2500usb_register_read and rt2500usb_register_write.
@@ -47,7 +54,7 @@
47 * between each attampt. When the busy bit is still set at that time, 54 * between each attampt. When the busy bit is still set at that time,
48 * the access attempt is considered to have failed, 55 * the access attempt is considered to have failed,
49 * and we will print an error. 56 * and we will print an error.
50 * If the usb_cache_mutex is already held then the _lock variants must 57 * If the csr_mutex is already held then the _lock variants must
51 * be used instead. 58 * be used instead.
52 */ 59 */
53static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, 60static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
@@ -57,7 +64,7 @@ static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
57 __le16 reg; 64 __le16 reg;
58 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, 65 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
59 USB_VENDOR_REQUEST_IN, offset, 66 USB_VENDOR_REQUEST_IN, offset,
60 &reg, sizeof(u16), REGISTER_TIMEOUT); 67 &reg, sizeof(reg), REGISTER_TIMEOUT);
61 *value = le16_to_cpu(reg); 68 *value = le16_to_cpu(reg);
62} 69}
63 70
@@ -68,7 +75,7 @@ static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
68 __le16 reg; 75 __le16 reg;
69 rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ, 76 rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ,
70 USB_VENDOR_REQUEST_IN, offset, 77 USB_VENDOR_REQUEST_IN, offset,
71 &reg, sizeof(u16), REGISTER_TIMEOUT); 78 &reg, sizeof(reg), REGISTER_TIMEOUT);
72 *value = le16_to_cpu(reg); 79 *value = le16_to_cpu(reg);
73} 80}
74 81
@@ -89,7 +96,7 @@ static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
89 __le16 reg = cpu_to_le16(value); 96 __le16 reg = cpu_to_le16(value);
90 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, 97 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
91 USB_VENDOR_REQUEST_OUT, offset, 98 USB_VENDOR_REQUEST_OUT, offset,
92 &reg, sizeof(u16), REGISTER_TIMEOUT); 99 &reg, sizeof(reg), REGISTER_TIMEOUT);
93} 100}
94 101
95static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, 102static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
@@ -99,7 +106,7 @@ static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
99 __le16 reg = cpu_to_le16(value); 106 __le16 reg = cpu_to_le16(value);
100 rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_WRITE, 107 rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_WRITE,
101 USB_VENDOR_REQUEST_OUT, offset, 108 USB_VENDOR_REQUEST_OUT, offset,
102 &reg, sizeof(u16), REGISTER_TIMEOUT); 109 &reg, sizeof(reg), REGISTER_TIMEOUT);
103} 110}
104 111
105static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, 112static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
@@ -112,53 +119,53 @@ static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
112 REGISTER_TIMEOUT16(length)); 119 REGISTER_TIMEOUT16(length));
113} 120}
114 121
115static u16 rt2500usb_bbp_check(struct rt2x00_dev *rt2x00dev) 122static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
123 const unsigned int offset,
124 struct rt2x00_field16 field,
125 u16 *reg)
116{ 126{
117 u16 reg;
118 unsigned int i; 127 unsigned int i;
119 128
120 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 129 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
121 rt2500usb_register_read_lock(rt2x00dev, PHY_CSR8, &reg); 130 rt2500usb_register_read_lock(rt2x00dev, offset, reg);
122 if (!rt2x00_get_field16(reg, PHY_CSR8_BUSY)) 131 if (!rt2x00_get_field16(*reg, field))
123 break; 132 return 1;
124 udelay(REGISTER_BUSY_DELAY); 133 udelay(REGISTER_BUSY_DELAY);
125 } 134 }
126 135
127 return reg; 136 ERROR(rt2x00dev, "Indirect register access failed: "
137 "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
138 *reg = ~0;
139
140 return 0;
128} 141}
129 142
143#define WAIT_FOR_BBP(__dev, __reg) \
144 rt2500usb_regbusy_read((__dev), PHY_CSR8, PHY_CSR8_BUSY, (__reg))
145#define WAIT_FOR_RF(__dev, __reg) \
146 rt2500usb_regbusy_read((__dev), PHY_CSR10, PHY_CSR10_RF_BUSY, (__reg))
147
130static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev, 148static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev,
131 const unsigned int word, const u8 value) 149 const unsigned int word, const u8 value)
132{ 150{
133 u16 reg; 151 u16 reg;
134 152
135 mutex_lock(&rt2x00dev->usb_cache_mutex); 153 mutex_lock(&rt2x00dev->csr_mutex);
136 154
137 /* 155 /*
138 * Wait until the BBP becomes ready. 156 * Wait until the BBP becomes available, afterwards we
157 * can safely write the new data into the register.
139 */ 158 */
140 reg = rt2500usb_bbp_check(rt2x00dev); 159 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
141 if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) 160 reg = 0;
142 goto exit_fail; 161 rt2x00_set_field16(&reg, PHY_CSR7_DATA, value);
143 162 rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word);
144 /* 163 rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 0);
145 * Write the data into the BBP.
146 */
147 reg = 0;
148 rt2x00_set_field16(&reg, PHY_CSR7_DATA, value);
149 rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word);
150 rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 0);
151
152 rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg);
153
154 mutex_unlock(&rt2x00dev->usb_cache_mutex);
155
156 return;
157 164
158exit_fail: 165 rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg);
159 mutex_unlock(&rt2x00dev->usb_cache_mutex); 166 }
160 167
161 ERROR(rt2x00dev, "PHY_CSR8 register busy. Write failed.\n"); 168 mutex_unlock(&rt2x00dev->csr_mutex);
162} 169}
163 170
164static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, 171static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
@@ -166,122 +173,107 @@ static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
166{ 173{
167 u16 reg; 174 u16 reg;
168 175
169 mutex_lock(&rt2x00dev->usb_cache_mutex); 176 mutex_lock(&rt2x00dev->csr_mutex);
170 177
171 /* 178 /*
172 * Wait until the BBP becomes ready. 179 * Wait until the BBP becomes available, afterwards we
180 * can safely write the read request into the register.
181 * After the data has been written, we wait until hardware
182 * returns the correct value, if at any time the register
183 * doesn't become available in time, reg will be 0xffffffff
184 * which means we return 0xff to the caller.
173 */ 185 */
174 reg = rt2500usb_bbp_check(rt2x00dev); 186 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
175 if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) 187 reg = 0;
176 goto exit_fail; 188 rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word);
177 189 rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 1);
178 /*
179 * Write the request into the BBP.
180 */
181 reg = 0;
182 rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word);
183 rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 1);
184 190
185 rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); 191 rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg);
186 192
187 /* 193 if (WAIT_FOR_BBP(rt2x00dev, &reg))
188 * Wait until the BBP becomes ready. 194 rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7, &reg);
189 */ 195 }
190 reg = rt2500usb_bbp_check(rt2x00dev);
191 if (rt2x00_get_field16(reg, PHY_CSR8_BUSY))
192 goto exit_fail;
193 196
194 rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7, &reg);
195 *value = rt2x00_get_field16(reg, PHY_CSR7_DATA); 197 *value = rt2x00_get_field16(reg, PHY_CSR7_DATA);
196 198
197 mutex_unlock(&rt2x00dev->usb_cache_mutex); 199 mutex_unlock(&rt2x00dev->csr_mutex);
198
199 return;
200
201exit_fail:
202 mutex_unlock(&rt2x00dev->usb_cache_mutex);
203
204 ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n");
205 *value = 0xff;
206} 200}
207 201
208static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev, 202static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev,
209 const unsigned int word, const u32 value) 203 const unsigned int word, const u32 value)
210{ 204{
211 u16 reg; 205 u16 reg;
212 unsigned int i;
213 206
214 if (!word) 207 if (!word)
215 return; 208 return;
216 209
217 mutex_lock(&rt2x00dev->usb_cache_mutex); 210 mutex_lock(&rt2x00dev->csr_mutex);
218
219 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
220 rt2500usb_register_read_lock(rt2x00dev, PHY_CSR10, &reg);
221 if (!rt2x00_get_field16(reg, PHY_CSR10_RF_BUSY))
222 goto rf_write;
223 udelay(REGISTER_BUSY_DELAY);
224 }
225 211
226 mutex_unlock(&rt2x00dev->usb_cache_mutex); 212 /*
227 ERROR(rt2x00dev, "PHY_CSR10 register busy. Write failed.\n"); 213 * Wait until the RF becomes available, afterwards we
228 return; 214 * can safely write the new data into the register.
229 215 */
230rf_write: 216 if (WAIT_FOR_RF(rt2x00dev, &reg)) {
231 reg = 0; 217 reg = 0;
232 rt2x00_set_field16(&reg, PHY_CSR9_RF_VALUE, value); 218 rt2x00_set_field16(&reg, PHY_CSR9_RF_VALUE, value);
233 rt2500usb_register_write_lock(rt2x00dev, PHY_CSR9, reg); 219 rt2500usb_register_write_lock(rt2x00dev, PHY_CSR9, reg);
234 220
235 reg = 0; 221 reg = 0;
236 rt2x00_set_field16(&reg, PHY_CSR10_RF_VALUE, value >> 16); 222 rt2x00_set_field16(&reg, PHY_CSR10_RF_VALUE, value >> 16);
237 rt2x00_set_field16(&reg, PHY_CSR10_RF_NUMBER_OF_BITS, 20); 223 rt2x00_set_field16(&reg, PHY_CSR10_RF_NUMBER_OF_BITS, 20);
238 rt2x00_set_field16(&reg, PHY_CSR10_RF_IF_SELECT, 0); 224 rt2x00_set_field16(&reg, PHY_CSR10_RF_IF_SELECT, 0);
239 rt2x00_set_field16(&reg, PHY_CSR10_RF_BUSY, 1); 225 rt2x00_set_field16(&reg, PHY_CSR10_RF_BUSY, 1);
240 226
241 rt2500usb_register_write_lock(rt2x00dev, PHY_CSR10, reg); 227 rt2500usb_register_write_lock(rt2x00dev, PHY_CSR10, reg);
242 rt2x00_rf_write(rt2x00dev, word, value); 228 rt2x00_rf_write(rt2x00dev, word, value);
229 }
243 230
244 mutex_unlock(&rt2x00dev->usb_cache_mutex); 231 mutex_unlock(&rt2x00dev->csr_mutex);
245} 232}
246 233
247#ifdef CONFIG_RT2X00_LIB_DEBUGFS 234#ifdef CONFIG_RT2X00_LIB_DEBUGFS
248#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u16)) ) 235static void _rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
249 236 const unsigned int offset,
250static void rt2500usb_read_csr(struct rt2x00_dev *rt2x00dev, 237 u32 *value)
251 const unsigned int word, u32 *data)
252{ 238{
253 rt2500usb_register_read(rt2x00dev, CSR_OFFSET(word), (u16 *) data); 239 rt2500usb_register_read(rt2x00dev, offset, (u16 *)value);
254} 240}
255 241
256static void rt2500usb_write_csr(struct rt2x00_dev *rt2x00dev, 242static void _rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
257 const unsigned int word, u32 data) 243 const unsigned int offset,
244 u32 value)
258{ 245{
259 rt2500usb_register_write(rt2x00dev, CSR_OFFSET(word), data); 246 rt2500usb_register_write(rt2x00dev, offset, value);
260} 247}
261 248
262static const struct rt2x00debug rt2500usb_rt2x00debug = { 249static const struct rt2x00debug rt2500usb_rt2x00debug = {
263 .owner = THIS_MODULE, 250 .owner = THIS_MODULE,
264 .csr = { 251 .csr = {
265 .read = rt2500usb_read_csr, 252 .read = _rt2500usb_register_read,
266 .write = rt2500usb_write_csr, 253 .write = _rt2500usb_register_write,
254 .flags = RT2X00DEBUGFS_OFFSET,
255 .word_base = CSR_REG_BASE,
267 .word_size = sizeof(u16), 256 .word_size = sizeof(u16),
268 .word_count = CSR_REG_SIZE / sizeof(u16), 257 .word_count = CSR_REG_SIZE / sizeof(u16),
269 }, 258 },
270 .eeprom = { 259 .eeprom = {
271 .read = rt2x00_eeprom_read, 260 .read = rt2x00_eeprom_read,
272 .write = rt2x00_eeprom_write, 261 .write = rt2x00_eeprom_write,
262 .word_base = EEPROM_BASE,
273 .word_size = sizeof(u16), 263 .word_size = sizeof(u16),
274 .word_count = EEPROM_SIZE / sizeof(u16), 264 .word_count = EEPROM_SIZE / sizeof(u16),
275 }, 265 },
276 .bbp = { 266 .bbp = {
277 .read = rt2500usb_bbp_read, 267 .read = rt2500usb_bbp_read,
278 .write = rt2500usb_bbp_write, 268 .write = rt2500usb_bbp_write,
269 .word_base = BBP_BASE,
279 .word_size = sizeof(u8), 270 .word_size = sizeof(u8),
280 .word_count = BBP_SIZE / sizeof(u8), 271 .word_count = BBP_SIZE / sizeof(u8),
281 }, 272 },
282 .rf = { 273 .rf = {
283 .read = rt2x00_rf_read, 274 .read = rt2x00_rf_read,
284 .write = rt2500usb_rf_write, 275 .write = rt2500usb_rf_write,
276 .word_base = RF_BASE,
285 .word_size = sizeof(u32), 277 .word_size = sizeof(u32),
286 .word_count = RF_SIZE / sizeof(u32), 278 .word_count = RF_SIZE / sizeof(u32),
287 }, 279 },
@@ -338,6 +330,82 @@ static void rt2500usb_init_led(struct rt2x00_dev *rt2x00dev,
338/* 330/*
339 * Configuration handlers. 331 * Configuration handlers.
340 */ 332 */
333
334/*
335 * rt2500usb does not differentiate between shared and pairwise
336 * keys, so we should use the same function for both key types.
337 */
338static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev,
339 struct rt2x00lib_crypto *crypto,
340 struct ieee80211_key_conf *key)
341{
342 int timeout;
343 u32 mask;
344 u16 reg;
345
346 if (crypto->cmd == SET_KEY) {
347 /*
348 * Pairwise key will always be entry 0, but this
349 * could collide with a shared key on the same
350 * position...
351 */
352 mask = TXRX_CSR0_KEY_ID.bit_mask;
353
354 rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
355 reg &= mask;
356
357 if (reg && reg == mask)
358 return -ENOSPC;
359
360 reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID);
361
362 key->hw_key_idx += reg ? ffz(reg) : 0;
363
364 /*
365 * The encryption key doesn't fit within the CSR cache,
366 * this means we should allocate it seperately and use
367 * rt2x00usb_vendor_request() to send the key to the hardware.
368 */
369 reg = KEY_ENTRY(key->hw_key_idx);
370 timeout = REGISTER_TIMEOUT32(sizeof(crypto->key));
371 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
372 USB_VENDOR_REQUEST_OUT, reg,
373 crypto->key,
374 sizeof(crypto->key),
375 timeout);
376
377 /*
378 * The driver does not support the IV/EIV generation
379 * in hardware. However it doesn't support the IV/EIV
380 * inside the ieee80211 frame either, but requires it
381 * to be provided seperately for the descriptor.
382 * rt2x00lib will cut the IV/EIV data out of all frames
383 * given to us by mac80211, but we must tell mac80211
384 * to generate the IV/EIV data.
385 */
386 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
387 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
388 }
389
390 /*
391 * TXRX_CSR0_KEY_ID contains only single-bit fields to indicate
392 * a particular key is valid.
393 */
394 rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
395 rt2x00_set_field16(&reg, TXRX_CSR0_ALGORITHM, crypto->cipher);
396 rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER);
397
398 mask = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID);
399 if (crypto->cmd == SET_KEY)
400 mask |= 1 << key->hw_key_idx;
401 else if (crypto->cmd == DISABLE_KEY)
402 mask &= ~(1 << key->hw_key_idx);
403 rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, mask);
404 rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg);
405
406 return 0;
407}
408
341static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev, 409static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev,
342 const unsigned int filter_flags) 410 const unsigned int filter_flags)
343{ 411{
@@ -380,7 +448,7 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
380 /* 448 /*
381 * Enable beacon config 449 * Enable beacon config
382 */ 450 */
383 bcn_preload = PREAMBLE + get_duration(IEEE80211_HEADER, 20); 451 bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20);
384 rt2500usb_register_read(rt2x00dev, TXRX_CSR20, &reg); 452 rt2500usb_register_read(rt2x00dev, TXRX_CSR20, &reg);
385 rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6); 453 rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6);
386 rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW, 454 rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW,
@@ -423,57 +491,16 @@ static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev,
423 rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE, 491 rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE,
424 !!erp->short_preamble); 492 !!erp->short_preamble);
425 rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); 493 rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg);
426}
427
428static void rt2500usb_config_phymode(struct rt2x00_dev *rt2x00dev,
429 const int basic_rate_mask)
430{
431 rt2500usb_register_write(rt2x00dev, TXRX_CSR11, basic_rate_mask);
432}
433
434static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev,
435 struct rf_channel *rf, const int txpower)
436{
437 /*
438 * Set TXpower.
439 */
440 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
441
442 /*
443 * For RT2525E we should first set the channel to half band higher.
444 */
445 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
446 static const u32 vals[] = {
447 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2,
448 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba,
449 0x000008ba, 0x000008be, 0x000008b7, 0x00000902,
450 0x00000902, 0x00000906
451 };
452
453 rt2500usb_rf_write(rt2x00dev, 2, vals[rf->channel - 1]);
454 if (rf->rf4)
455 rt2500usb_rf_write(rt2x00dev, 4, rf->rf4);
456 }
457
458 rt2500usb_rf_write(rt2x00dev, 1, rf->rf1);
459 rt2500usb_rf_write(rt2x00dev, 2, rf->rf2);
460 rt2500usb_rf_write(rt2x00dev, 3, rf->rf3);
461 if (rf->rf4)
462 rt2500usb_rf_write(rt2x00dev, 4, rf->rf4);
463}
464 494
465static void rt2500usb_config_txpower(struct rt2x00_dev *rt2x00dev, 495 rt2500usb_register_write(rt2x00dev, TXRX_CSR11, erp->basic_rates);
466 const int txpower)
467{
468 u32 rf3;
469 496
470 rt2x00_rf_read(rt2x00dev, 3, &rf3); 497 rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time);
471 rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); 498 rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs);
472 rt2500usb_rf_write(rt2x00dev, 3, rf3); 499 rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs);
473} 500}
474 501
475static void rt2500usb_config_antenna(struct rt2x00_dev *rt2x00dev, 502static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
476 struct antenna_setup *ant) 503 struct antenna_setup *ant)
477{ 504{
478 u8 r2; 505 u8 r2;
479 u8 r14; 506 u8 r14;
@@ -555,15 +582,52 @@ static void rt2500usb_config_antenna(struct rt2x00_dev *rt2x00dev,
555 rt2500usb_register_write(rt2x00dev, PHY_CSR6, csr6); 582 rt2500usb_register_write(rt2x00dev, PHY_CSR6, csr6);
556} 583}
557 584
585static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev,
586 struct rf_channel *rf, const int txpower)
587{
588 /*
589 * Set TXpower.
590 */
591 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
592
593 /*
594 * For RT2525E we should first set the channel to half band higher.
595 */
596 if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
597 static const u32 vals[] = {
598 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2,
599 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba,
600 0x000008ba, 0x000008be, 0x000008b7, 0x00000902,
601 0x00000902, 0x00000906
602 };
603
604 rt2500usb_rf_write(rt2x00dev, 2, vals[rf->channel - 1]);
605 if (rf->rf4)
606 rt2500usb_rf_write(rt2x00dev, 4, rf->rf4);
607 }
608
609 rt2500usb_rf_write(rt2x00dev, 1, rf->rf1);
610 rt2500usb_rf_write(rt2x00dev, 2, rf->rf2);
611 rt2500usb_rf_write(rt2x00dev, 3, rf->rf3);
612 if (rf->rf4)
613 rt2500usb_rf_write(rt2x00dev, 4, rf->rf4);
614}
615
616static void rt2500usb_config_txpower(struct rt2x00_dev *rt2x00dev,
617 const int txpower)
618{
619 u32 rf3;
620
621 rt2x00_rf_read(rt2x00dev, 3, &rf3);
622 rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
623 rt2500usb_rf_write(rt2x00dev, 3, rf3);
624}
625
558static void rt2500usb_config_duration(struct rt2x00_dev *rt2x00dev, 626static void rt2500usb_config_duration(struct rt2x00_dev *rt2x00dev,
559 struct rt2x00lib_conf *libconf) 627 struct rt2x00lib_conf *libconf)
560{ 628{
561 u16 reg; 629 u16 reg;
562 630
563 rt2500usb_register_write(rt2x00dev, MAC_CSR10, libconf->slot_time);
564 rt2500usb_register_write(rt2x00dev, MAC_CSR11, libconf->sifs);
565 rt2500usb_register_write(rt2x00dev, MAC_CSR12, libconf->eifs);
566
567 rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg); 631 rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg);
568 rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL, 632 rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL,
569 libconf->conf->beacon_int * 4); 633 libconf->conf->beacon_int * 4);
@@ -574,17 +638,14 @@ static void rt2500usb_config(struct rt2x00_dev *rt2x00dev,
574 struct rt2x00lib_conf *libconf, 638 struct rt2x00lib_conf *libconf,
575 const unsigned int flags) 639 const unsigned int flags)
576{ 640{
577 if (flags & CONFIG_UPDATE_PHYMODE) 641 if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
578 rt2500usb_config_phymode(rt2x00dev, libconf->basic_rates);
579 if (flags & CONFIG_UPDATE_CHANNEL)
580 rt2500usb_config_channel(rt2x00dev, &libconf->rf, 642 rt2500usb_config_channel(rt2x00dev, &libconf->rf,
581 libconf->conf->power_level); 643 libconf->conf->power_level);
582 if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) 644 if ((flags & IEEE80211_CONF_CHANGE_POWER) &&
645 !(flags & IEEE80211_CONF_CHANGE_CHANNEL))
583 rt2500usb_config_txpower(rt2x00dev, 646 rt2500usb_config_txpower(rt2x00dev,
584 libconf->conf->power_level); 647 libconf->conf->power_level);
585 if (flags & CONFIG_UPDATE_ANTENNA) 648 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
586 rt2500usb_config_antenna(rt2x00dev, &libconf->ant);
587 if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT))
588 rt2500usb_config_duration(rt2x00dev, libconf); 649 rt2500usb_config_duration(rt2x00dev, libconf);
589} 650}
590 651
@@ -866,7 +927,7 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
866 927
867 rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg); 928 rt2500usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
868 rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); 929 rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER);
869 rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, 0xff); 930 rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, 0);
870 rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); 931 rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg);
871 932
872 rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg); 933 rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg);
@@ -1088,7 +1149,7 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1088 * Start writing the descriptor words. 1149 * Start writing the descriptor words.
1089 */ 1150 */
1090 rt2x00_desc_read(txd, 1, &word); 1151 rt2x00_desc_read(txd, 1, &word);
1091 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1152 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1092 rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs); 1153 rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs);
1093 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1154 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1094 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1155 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
@@ -1101,6 +1162,11 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1101 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); 1162 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
1102 rt2x00_desc_write(txd, 2, word); 1163 rt2x00_desc_write(txd, 2, word);
1103 1164
1165 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
1166 _rt2x00_desc_write(txd, 3, skbdesc->iv[0]);
1167 _rt2x00_desc_write(txd, 4, skbdesc->iv[1]);
1168 }
1169
1104 rt2x00_desc_read(txd, 0, &word); 1170 rt2x00_desc_read(txd, 0, &word);
1105 rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit); 1171 rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit);
1106 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, 1172 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
@@ -1115,7 +1181,8 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1115 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); 1181 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
1116 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1182 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1117 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); 1183 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1118 rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE); 1184 rt2x00_set_field32(&word, TXD_W0_CIPHER, txdesc->cipher);
1185 rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
1119 rt2x00_desc_write(txd, 0, word); 1186 rt2x00_desc_write(txd, 0, word);
1120} 1187}
1121 1188
@@ -1130,7 +1197,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1130 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 1197 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
1131 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; 1198 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
1132 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1199 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1133 int pipe = usb_sndbulkpipe(usb_dev, 1); 1200 int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint);
1134 int length; 1201 int length;
1135 u16 reg; 1202 u16 reg;
1136 1203
@@ -1156,7 +1223,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1156 * length of the data to usb_fill_bulk_urb. Pass the skb 1223 * length of the data to usb_fill_bulk_urb. Pass the skb
1157 * to the driver to determine what the length should be. 1224 * to the driver to determine what the length should be.
1158 */ 1225 */
1159 length = rt2x00dev->ops->lib->get_tx_data_len(rt2x00dev, entry->skb); 1226 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
1160 1227
1161 usb_fill_bulk_urb(bcn_priv->urb, usb_dev, pipe, 1228 usb_fill_bulk_urb(bcn_priv->urb, usb_dev, pipe,
1162 entry->skb->data, length, rt2500usb_beacondone, 1229 entry->skb->data, length, rt2500usb_beacondone,
@@ -1178,8 +1245,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1178 usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC); 1245 usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC);
1179} 1246}
1180 1247
1181static int rt2500usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev, 1248static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
1182 struct sk_buff *skb)
1183{ 1249{
1184 int length; 1250 int length;
1185 1251
@@ -1187,8 +1253,8 @@ static int rt2500usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev,
1187 * The length _must_ be a multiple of 2, 1253 * The length _must_ be a multiple of 2,
1188 * but it must _not_ be a multiple of the USB packet size. 1254 * but it must _not_ be a multiple of the USB packet size.
1189 */ 1255 */
1190 length = roundup(skb->len, 2); 1256 length = roundup(entry->skb->len, 2);
1191 length += (2 * !(length % rt2x00dev->usb_maxpacket)); 1257 length += (2 * !(length % entry->queue->usb_maxpacket));
1192 1258
1193 return length; 1259 return length;
1194} 1260}
@@ -1227,6 +1293,7 @@ static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1227static void rt2500usb_fill_rxdone(struct queue_entry *entry, 1293static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1228 struct rxdone_entry_desc *rxdesc) 1294 struct rxdone_entry_desc *rxdesc)
1229{ 1295{
1296 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1230 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 1297 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
1231 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1298 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1232 __le32 *rxd = 1299 __le32 *rxd =
@@ -1254,6 +1321,33 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1254 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) 1321 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
1255 rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; 1322 rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC;
1256 1323
1324 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
1325 rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER);
1326 if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR))
1327 rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY;
1328 }
1329
1330 if (rxdesc->cipher != CIPHER_NONE) {
1331 _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
1332 _rt2x00_desc_read(rxd, 3, &rxdesc->iv[1]);
1333 rxdesc->dev_flags |= RXDONE_CRYPTO_IV;
1334
1335 /* ICV is located at the end of frame */
1336
1337 /*
1338 * Hardware has stripped IV/EIV data from 802.11 frame during
1339 * decryption. It has provided the data seperately but rt2x00lib
1340 * should decide if it should be reinserted.
1341 */
1342 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
1343 if (rxdesc->cipher != CIPHER_TKIP)
1344 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
1345 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
1346 rxdesc->flags |= RX_FLAG_DECRYPTED;
1347 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
1348 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
1349 }
1350
1257 /* 1351 /*
1258 * Obtain the status about this packet. 1352 * Obtain the status about this packet.
1259 * When frame was received with an OFDM bitrate, 1353 * When frame was received with an OFDM bitrate,
@@ -1261,8 +1355,8 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1261 * a CCK bitrate the signal is the rate in 100kbit/s. 1355 * a CCK bitrate the signal is the rate in 100kbit/s.
1262 */ 1356 */
1263 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); 1357 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
1264 rxdesc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) - 1358 rxdesc->rssi =
1265 entry->queue->rt2x00dev->rssi_offset; 1359 rt2x00_get_field32(word1, RXD_W1_RSSI) - rt2x00dev->rssi_offset;
1266 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1360 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1267 1361
1268 if (rt2x00_get_field32(word0, RXD_W0_OFDM)) 1362 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
@@ -1319,10 +1413,8 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1319 */ 1413 */
1320 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1414 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1321 if (!is_valid_ether_addr(mac)) { 1415 if (!is_valid_ether_addr(mac)) {
1322 DECLARE_MAC_BUF(macbuf);
1323
1324 random_ether_addr(mac); 1416 random_ether_addr(mac);
1325 EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); 1417 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
1326 } 1418 }
1327 1419
1328 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); 1420 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
@@ -1752,6 +1844,10 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1752 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1844 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
1753 __set_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags); 1845 __set_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
1754 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags); 1846 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags);
1847 if (!modparam_nohwcrypt) {
1848 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
1849 __set_bit(CONFIG_CRYPTO_COPY_IV, &rt2x00dev->flags);
1850 }
1755 __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags); 1851 __set_bit(CONFIG_DISABLE_LINK_TUNING, &rt2x00dev->flags);
1756 1852
1757 /* 1853 /*
@@ -1771,6 +1867,7 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
1771 .config = rt2x00mac_config, 1867 .config = rt2x00mac_config,
1772 .config_interface = rt2x00mac_config_interface, 1868 .config_interface = rt2x00mac_config_interface,
1773 .configure_filter = rt2x00mac_configure_filter, 1869 .configure_filter = rt2x00mac_configure_filter,
1870 .set_key = rt2x00mac_set_key,
1774 .get_stats = rt2x00mac_get_stats, 1871 .get_stats = rt2x00mac_get_stats,
1775 .bss_info_changed = rt2x00mac_bss_info_changed, 1872 .bss_info_changed = rt2x00mac_bss_info_changed,
1776 .conf_tx = rt2x00mac_conf_tx, 1873 .conf_tx = rt2x00mac_conf_tx,
@@ -1781,8 +1878,7 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1781 .probe_hw = rt2500usb_probe_hw, 1878 .probe_hw = rt2500usb_probe_hw,
1782 .initialize = rt2x00usb_initialize, 1879 .initialize = rt2x00usb_initialize,
1783 .uninitialize = rt2x00usb_uninitialize, 1880 .uninitialize = rt2x00usb_uninitialize,
1784 .init_rxentry = rt2x00usb_init_rxentry, 1881 .clear_entry = rt2x00usb_clear_entry,
1785 .init_txentry = rt2x00usb_init_txentry,
1786 .set_device_state = rt2500usb_set_device_state, 1882 .set_device_state = rt2500usb_set_device_state,
1787 .link_stats = rt2500usb_link_stats, 1883 .link_stats = rt2500usb_link_stats,
1788 .reset_tuner = rt2500usb_reset_tuner, 1884 .reset_tuner = rt2500usb_reset_tuner,
@@ -1793,9 +1889,12 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1793 .get_tx_data_len = rt2500usb_get_tx_data_len, 1889 .get_tx_data_len = rt2500usb_get_tx_data_len,
1794 .kick_tx_queue = rt2500usb_kick_tx_queue, 1890 .kick_tx_queue = rt2500usb_kick_tx_queue,
1795 .fill_rxdone = rt2500usb_fill_rxdone, 1891 .fill_rxdone = rt2500usb_fill_rxdone,
1892 .config_shared_key = rt2500usb_config_key,
1893 .config_pairwise_key = rt2500usb_config_key,
1796 .config_filter = rt2500usb_config_filter, 1894 .config_filter = rt2500usb_config_filter,
1797 .config_intf = rt2500usb_config_intf, 1895 .config_intf = rt2500usb_config_intf,
1798 .config_erp = rt2500usb_config_erp, 1896 .config_erp = rt2500usb_config_erp,
1897 .config_ant = rt2500usb_config_ant,
1799 .config = rt2500usb_config, 1898 .config = rt2500usb_config,
1800}; 1899};
1801 1900
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 89e5ed24e4f7..4347dfdabcd4 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -57,7 +57,9 @@
57#define CSR_REG_SIZE 0x0100 57#define CSR_REG_SIZE 0x0100
58#define EEPROM_BASE 0x0000 58#define EEPROM_BASE 0x0000
59#define EEPROM_SIZE 0x006a 59#define EEPROM_SIZE 0x006a
60#define BBP_BASE 0x0000
60#define BBP_SIZE 0x0060 61#define BBP_SIZE 0x0060
62#define RF_BASE 0x0000
61#define RF_SIZE 0x0014 63#define RF_SIZE 0x0014
62 64
63/* 65/*
@@ -445,6 +447,9 @@
445#define SEC_CSR30 0x04bc 447#define SEC_CSR30 0x04bc
446#define SEC_CSR31 0x04be 448#define SEC_CSR31 0x04be
447 449
450#define KEY_ENTRY(__idx) \
451 ( SEC_CSR0 + ((__idx) * 16) )
452
448/* 453/*
449 * PHY control registers. 454 * PHY control registers.
450 */ 455 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 1359a3768404..39ecf3b82ca1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -44,7 +44,7 @@
44/* 44/*
45 * Module information. 45 * Module information.
46 */ 46 */
47#define DRV_VERSION "2.2.1" 47#define DRV_VERSION "2.2.3"
48#define DRV_PROJECT "http://rt2x00.serialmonkey.com" 48#define DRV_PROJECT "http://rt2x00.serialmonkey.com"
49 49
50/* 50/*
@@ -92,6 +92,16 @@
92 DEBUG_PRINTK(__dev, KERN_DEBUG, "EEPROM recovery", __msg, ##__args) 92 DEBUG_PRINTK(__dev, KERN_DEBUG, "EEPROM recovery", __msg, ##__args)
93 93
94/* 94/*
95 * Duration calculations
96 * The rate variable passed is: 100kbs.
97 * To convert from bytes to bits we multiply size with 8,
98 * then the size is multiplied with 10 to make the
99 * real rate -> rate argument correction.
100 */
101#define GET_DURATION(__size, __rate) (((__size) * 8 * 10) / (__rate))
102#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate))
103
104/*
95 * Standard timing and size defines. 105 * Standard timing and size defines.
96 * These values should follow the ieee80211 specifications. 106 * These values should follow the ieee80211 specifications.
97 */ 107 */
@@ -109,9 +119,9 @@
109#define DIFS ( PIFS + SLOT_TIME ) 119#define DIFS ( PIFS + SLOT_TIME )
110#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME ) 120#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME )
111#define EIFS ( SIFS + DIFS + \ 121#define EIFS ( SIFS + DIFS + \
112 (8 * (IEEE80211_HEADER + ACK_SIZE)) ) 122 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
113#define SHORT_EIFS ( SIFS + SHORT_DIFS + \ 123#define SHORT_EIFS ( SIFS + SHORT_DIFS + \
114 (8 * (IEEE80211_HEADER + ACK_SIZE)) ) 124 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
115 125
116/* 126/*
117 * Chipset identification 127 * Chipset identification
@@ -348,13 +358,6 @@ struct rt2x00_intf {
348 spinlock_t lock; 358 spinlock_t lock;
349 359
350 /* 360 /*
351 * BSS configuration. Copied from the structure
352 * passed to us through the bss_info_changed()
353 * callback funtion.
354 */
355 struct ieee80211_bss_conf conf;
356
357 /*
358 * MAC of the device. 361 * MAC of the device.
359 */ 362 */
360 u8 mac[ETH_ALEN]; 363 u8 mac[ETH_ALEN];
@@ -433,18 +436,6 @@ struct rt2x00lib_conf {
433 436
434 struct rf_channel rf; 437 struct rf_channel rf;
435 struct channel_info channel; 438 struct channel_info channel;
436
437 struct antenna_setup ant;
438
439 enum ieee80211_band band;
440
441 u32 basic_rates;
442 u32 slot_time;
443
444 short sifs;
445 short pifs;
446 short difs;
447 short eifs;
448}; 439};
449 440
450/* 441/*
@@ -456,6 +447,15 @@ struct rt2x00lib_erp {
456 447
457 int ack_timeout; 448 int ack_timeout;
458 int ack_consume_time; 449 int ack_consume_time;
450
451 u64 basic_rates;
452
453 int slot_time;
454
455 short sifs;
456 short pifs;
457 short difs;
458 short eifs;
459}; 459};
460 460
461/* 461/*
@@ -533,10 +533,8 @@ struct rt2x00lib_ops {
533 /* 533 /*
534 * queue initialization handlers 534 * queue initialization handlers
535 */ 535 */
536 void (*init_rxentry) (struct rt2x00_dev *rt2x00dev, 536 bool (*get_entry_state) (struct queue_entry *entry);
537 struct queue_entry *entry); 537 void (*clear_entry) (struct queue_entry *entry);
538 void (*init_txentry) (struct rt2x00_dev *rt2x00dev,
539 struct queue_entry *entry);
540 538
541 /* 539 /*
542 * Radio control handlers. 540 * Radio control handlers.
@@ -557,8 +555,7 @@ struct rt2x00lib_ops {
557 struct txentry_desc *txdesc); 555 struct txentry_desc *txdesc);
558 int (*write_tx_data) (struct queue_entry *entry); 556 int (*write_tx_data) (struct queue_entry *entry);
559 void (*write_beacon) (struct queue_entry *entry); 557 void (*write_beacon) (struct queue_entry *entry);
560 int (*get_tx_data_len) (struct rt2x00_dev *rt2x00dev, 558 int (*get_tx_data_len) (struct queue_entry *entry);
561 struct sk_buff *skb);
562 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, 559 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev,
563 const enum data_queue_qid queue); 560 const enum data_queue_qid queue);
564 561
@@ -589,16 +586,11 @@ struct rt2x00lib_ops {
589 586
590 void (*config_erp) (struct rt2x00_dev *rt2x00dev, 587 void (*config_erp) (struct rt2x00_dev *rt2x00dev,
591 struct rt2x00lib_erp *erp); 588 struct rt2x00lib_erp *erp);
589 void (*config_ant) (struct rt2x00_dev *rt2x00dev,
590 struct antenna_setup *ant);
592 void (*config) (struct rt2x00_dev *rt2x00dev, 591 void (*config) (struct rt2x00_dev *rt2x00dev,
593 struct rt2x00lib_conf *libconf, 592 struct rt2x00lib_conf *libconf,
594 const unsigned int flags); 593 const unsigned int changed_flags);
595#define CONFIG_UPDATE_PHYMODE ( 1 << 1 )
596#define CONFIG_UPDATE_CHANNEL ( 1 << 2 )
597#define CONFIG_UPDATE_TXPOWER ( 1 << 3 )
598#define CONFIG_UPDATE_ANTENNA ( 1 << 4 )
599#define CONFIG_UPDATE_SLOT_TIME ( 1 << 5 )
600#define CONFIG_UPDATE_BEACON_INT ( 1 << 6 )
601#define CONFIG_UPDATE_ALL 0xffff
602}; 594};
603 595
604/* 596/*
@@ -661,6 +653,7 @@ enum rt2x00_flags {
661 CONFIG_EXTERNAL_LNA_BG, 653 CONFIG_EXTERNAL_LNA_BG,
662 CONFIG_DOUBLE_ANTENNA, 654 CONFIG_DOUBLE_ANTENNA,
663 CONFIG_DISABLE_LINK_TUNING, 655 CONFIG_DISABLE_LINK_TUNING,
656 CONFIG_CRYPTO_COPY_IV,
664}; 657};
665 658
666/* 659/*
@@ -738,8 +731,7 @@ struct rt2x00_dev {
738 731
739 /* 732 /*
740 * This is the default TX/RX antenna setup as indicated 733 * This is the default TX/RX antenna setup as indicated
741 * by the device's EEPROM. When mac80211 sets its 734 * by the device's EEPROM.
742 * antenna value to 0 we should be using these values.
743 */ 735 */
744 struct antenna_setup default_ant; 736 struct antenna_setup default_ant;
745 737
@@ -754,16 +746,15 @@ struct rt2x00_dev {
754 } csr; 746 } csr;
755 747
756 /* 748 /*
757 * Mutex to protect register accesses on USB devices. 749 * Mutex to protect register accesses.
758 * There are 2 reasons this is needed, one is to ensure 750 * For PCI and USB devices it protects against concurrent indirect
759 * use of the csr_cache (for USB devices) by one thread 751 * register access (BBP, RF, MCU) since accessing those
760 * isn't corrupted by another thread trying to access it. 752 * registers require multiple calls to the CSR registers.
761 * The other is that access to BBP and RF registers 753 * For USB devices it also protects the csr_cache since that
762 * require multiple BUS transactions and if another thread 754 * field is used for normal CSR access and it cannot support
763 * attempted to access one of those registers at the same 755 * multiple callers simultaneously.
764 * time one of the writes could silently fail.
765 */ 756 */
766 struct mutex usb_cache_mutex; 757 struct mutex csr_mutex;
767 758
768 /* 759 /*
769 * Current packet filter configuration for the device. 760 * Current packet filter configuration for the device.
@@ -808,14 +799,15 @@ struct rt2x00_dev {
808 short lna_gain; 799 short lna_gain;
809 800
810 /* 801 /*
811 * USB Max frame size (for rt2500usb & rt73usb). 802 * Current TX power value.
812 */ 803 */
813 u16 usb_maxpacket; 804 u16 tx_power;
814 805
815 /* 806 /*
816 * Current TX power value. 807 * Current retry values.
817 */ 808 */
818 u16 tx_power; 809 u8 short_retry;
810 u8 long_retry;
819 811
820 /* 812 /*
821 * Rssi <-> Dbm offset 813 * Rssi <-> Dbm offset
@@ -938,23 +930,6 @@ static inline u16 rt2x00_check_rev(const struct rt2x00_chip *chipset,
938 !!(chipset->rev & 0x0000f)); 930 !!(chipset->rev & 0x0000f));
939} 931}
940 932
941/*
942 * Duration calculations
943 * The rate variable passed is: 100kbs.
944 * To convert from bytes to bits we multiply size with 8,
945 * then the size is multiplied with 10 to make the
946 * real rate -> rate argument correction.
947 */
948static inline u16 get_duration(const unsigned int size, const u8 rate)
949{
950 return ((size * 8 * 10) / rate);
951}
952
953static inline u16 get_duration_res(const unsigned int size, const u8 rate)
954{
955 return ((size * 8 * 10) % rate);
956}
957
958/** 933/**
959 * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes. 934 * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
960 * @rt2x00dev: Pointer to &struct rt2x00_dev. 935 * @rt2x00dev: Pointer to &struct rt2x00_dev.
@@ -997,7 +972,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
997 struct ieee80211_if_init_conf *conf); 972 struct ieee80211_if_init_conf *conf);
998void rt2x00mac_remove_interface(struct ieee80211_hw *hw, 973void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
999 struct ieee80211_if_init_conf *conf); 974 struct ieee80211_if_init_conf *conf);
1000int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf); 975int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed);
1001int rt2x00mac_config_interface(struct ieee80211_hw *hw, 976int rt2x00mac_config_interface(struct ieee80211_hw *hw,
1002 struct ieee80211_vif *vif, 977 struct ieee80211_vif *vif,
1003 struct ieee80211_if_conf *conf); 978 struct ieee80211_if_conf *conf);
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 4d5e87b015a0..e66fb316cd61 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -86,13 +86,14 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
86 erp.short_preamble = bss_conf->use_short_preamble; 86 erp.short_preamble = bss_conf->use_short_preamble;
87 erp.cts_protection = bss_conf->use_cts_prot; 87 erp.cts_protection = bss_conf->use_cts_prot;
88 88
89 erp.ack_timeout = PLCP + get_duration(ACK_SIZE, 10); 89 erp.slot_time = bss_conf->use_short_slot ? SHORT_SLOT_TIME : SLOT_TIME;
90 erp.ack_consume_time = SIFS + PLCP + get_duration(ACK_SIZE, 10); 90 erp.sifs = SIFS;
91 erp.pifs = bss_conf->use_short_slot ? SHORT_PIFS : PIFS;
92 erp.difs = bss_conf->use_short_slot ? SHORT_DIFS : DIFS;
93 erp.eifs = bss_conf->use_short_slot ? SHORT_EIFS : EIFS;
91 94
92 if (rt2x00dev->hw->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) 95 erp.ack_timeout = PLCP + erp.difs + GET_DURATION(ACK_SIZE, 10);
93 erp.ack_timeout += SHORT_DIFS; 96 erp.ack_consume_time = SIFS + PLCP + GET_DURATION(ACK_SIZE, 10);
94 else
95 erp.ack_timeout += DIFS;
96 97
97 if (bss_conf->use_short_preamble) { 98 if (bss_conf->use_short_preamble) {
98 erp.ack_timeout += SHORT_PREAMBLE; 99 erp.ack_timeout += SHORT_PREAMBLE;
@@ -102,19 +103,39 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
102 erp.ack_consume_time += PREAMBLE; 103 erp.ack_consume_time += PREAMBLE;
103 } 104 }
104 105
106 erp.basic_rates = bss_conf->basic_rates;
107
105 rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp); 108 rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp);
106} 109}
107 110
111static inline
112enum antenna rt2x00lib_config_antenna_check(enum antenna current_ant,
113 enum antenna default_ant)
114{
115 if (current_ant != ANTENNA_SW_DIVERSITY)
116 return current_ant;
117 return (default_ant != ANTENNA_SW_DIVERSITY) ? default_ant : ANTENNA_B;
118}
119
108void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, 120void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
109 enum antenna rx, enum antenna tx) 121 struct antenna_setup *ant)
110{ 122{
111 struct rt2x00lib_conf libconf; 123 struct antenna_setup *def = &rt2x00dev->default_ant;
124 struct antenna_setup *active = &rt2x00dev->link.ant.active;
112 125
113 libconf.ant.rx = rx; 126 /*
114 libconf.ant.tx = tx; 127 * Failsafe: Make sure we are not sending the
128 * ANTENNA_SW_DIVERSITY state to the driver.
129 * If that happes fallback to hardware default,
130 * or our own default.
131 * The calls to rt2x00lib_config_antenna_check()
132 * might have caused that we restore back to the already
133 * active setting. If that has happened we can quit.
134 */
135 ant->rx = rt2x00lib_config_antenna_check(ant->rx, def->rx);
136 ant->tx = rt2x00lib_config_antenna_check(ant->tx, def->tx);
115 137
116 if (rx == rt2x00dev->link.ant.active.rx && 138 if (ant->rx == active->rx && ant->tx == active->tx)
117 tx == rt2x00dev->link.ant.active.tx)
118 return; 139 return;
119 140
120 /* 141 /*
@@ -129,119 +150,28 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
129 * The latter is required since we need to recalibrate the 150 * The latter is required since we need to recalibrate the
130 * noise-sensitivity ratio for the new setup. 151 * noise-sensitivity ratio for the new setup.
131 */ 152 */
132 rt2x00dev->ops->lib->config(rt2x00dev, &libconf, CONFIG_UPDATE_ANTENNA); 153 rt2x00dev->ops->lib->config_ant(rt2x00dev, ant);
154
133 rt2x00lib_reset_link_tuner(rt2x00dev); 155 rt2x00lib_reset_link_tuner(rt2x00dev);
134 rt2x00_reset_link_ant_rssi(&rt2x00dev->link); 156 rt2x00_reset_link_ant_rssi(&rt2x00dev->link);
135 157
136 rt2x00dev->link.ant.active.rx = libconf.ant.rx; 158 memcpy(active, ant, sizeof(*ant));
137 rt2x00dev->link.ant.active.tx = libconf.ant.tx;
138 159
139 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 160 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
140 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK); 161 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
141} 162}
142 163
143static u32 rt2x00lib_get_basic_rates(struct ieee80211_supported_band *band)
144{
145 const struct rt2x00_rate *rate;
146 unsigned int i;
147 u32 mask = 0;
148
149 for (i = 0; i < band->n_bitrates; i++) {
150 rate = rt2x00_get_rate(band->bitrates[i].hw_value);
151 if (rate->flags & DEV_RATE_BASIC)
152 mask |= rate->ratemask;
153 }
154
155 return mask;
156}
157
158void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, 164void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
159 struct ieee80211_conf *conf, const int force_config) 165 struct ieee80211_conf *conf,
166 unsigned int ieee80211_flags)
160{ 167{
161 struct rt2x00lib_conf libconf; 168 struct rt2x00lib_conf libconf;
162 struct ieee80211_supported_band *band;
163 struct antenna_setup *default_ant = &rt2x00dev->default_ant;
164 struct antenna_setup *active_ant = &rt2x00dev->link.ant.active;
165 int flags = 0;
166 int short_slot_time;
167
168 /*
169 * In some situations we want to force all configurations
170 * to be reloaded (When resuming for instance).
171 */
172 if (force_config) {
173 flags = CONFIG_UPDATE_ALL;
174 goto config;
175 }
176 169
177 /*
178 * Check which configuration options have been
179 * updated and should be send to the device.
180 */
181 if (rt2x00dev->rx_status.band != conf->channel->band)
182 flags |= CONFIG_UPDATE_PHYMODE;
183 if (rt2x00dev->rx_status.freq != conf->channel->center_freq)
184 flags |= CONFIG_UPDATE_CHANNEL;
185 if (rt2x00dev->tx_power != conf->power_level)
186 flags |= CONFIG_UPDATE_TXPOWER;
187
188 /*
189 * Determining changes in the antenna setups request several checks:
190 * antenna_sel_{r,t}x = 0
191 * -> Does active_{r,t}x match default_{r,t}x
192 * -> Is default_{r,t}x SW_DIVERSITY
193 * antenna_sel_{r,t}x = 1/2
194 * -> Does active_{r,t}x match antenna_sel_{r,t}x
195 * The reason for not updating the antenna while SW diversity
196 * should be used is simple: Software diversity means that
197 * we should switch between the antenna's based on the
198 * quality. This means that the current antenna is good enough
199 * to work with untill the link tuner decides that an antenna
200 * switch should be performed.
201 */
202 if (!conf->antenna_sel_rx &&
203 default_ant->rx != ANTENNA_SW_DIVERSITY &&
204 default_ant->rx != active_ant->rx)
205 flags |= CONFIG_UPDATE_ANTENNA;
206 else if (conf->antenna_sel_rx &&
207 conf->antenna_sel_rx != active_ant->rx)
208 flags |= CONFIG_UPDATE_ANTENNA;
209 else if (active_ant->rx == ANTENNA_SW_DIVERSITY)
210 flags |= CONFIG_UPDATE_ANTENNA;
211
212 if (!conf->antenna_sel_tx &&
213 default_ant->tx != ANTENNA_SW_DIVERSITY &&
214 default_ant->tx != active_ant->tx)
215 flags |= CONFIG_UPDATE_ANTENNA;
216 else if (conf->antenna_sel_tx &&
217 conf->antenna_sel_tx != active_ant->tx)
218 flags |= CONFIG_UPDATE_ANTENNA;
219 else if (active_ant->tx == ANTENNA_SW_DIVERSITY)
220 flags |= CONFIG_UPDATE_ANTENNA;
221
222 /*
223 * The following configuration options are never
224 * stored anywhere and will always be updated.
225 */
226 flags |= CONFIG_UPDATE_SLOT_TIME;
227 flags |= CONFIG_UPDATE_BEACON_INT;
228
229 /*
230 * We have determined what options should be updated,
231 * now precalculate device configuration values depending
232 * on what configuration options need to be updated.
233 */
234config:
235 memset(&libconf, 0, sizeof(libconf)); 170 memset(&libconf, 0, sizeof(libconf));
236 171
237 if (flags & CONFIG_UPDATE_PHYMODE) { 172 libconf.conf = conf;
238 band = &rt2x00dev->bands[conf->channel->band];
239
240 libconf.band = conf->channel->band;
241 libconf.basic_rates = rt2x00lib_get_basic_rates(band);
242 }
243 173
244 if (flags & CONFIG_UPDATE_CHANNEL) { 174 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) {
245 memcpy(&libconf.rf, 175 memcpy(&libconf.rf,
246 &rt2x00dev->spec.channels[conf->channel->hw_value], 176 &rt2x00dev->spec.channels[conf->channel->hw_value],
247 sizeof(libconf.rf)); 177 sizeof(libconf.rf));
@@ -251,61 +181,23 @@ config:
251 sizeof(libconf.channel)); 181 sizeof(libconf.channel));
252 } 182 }
253 183
254 if (flags & CONFIG_UPDATE_ANTENNA) {
255 if (conf->antenna_sel_rx)
256 libconf.ant.rx = conf->antenna_sel_rx;
257 else if (default_ant->rx != ANTENNA_SW_DIVERSITY)
258 libconf.ant.rx = default_ant->rx;
259 else if (active_ant->rx == ANTENNA_SW_DIVERSITY)
260 libconf.ant.rx = ANTENNA_B;
261 else
262 libconf.ant.rx = active_ant->rx;
263
264 if (conf->antenna_sel_tx)
265 libconf.ant.tx = conf->antenna_sel_tx;
266 else if (default_ant->tx != ANTENNA_SW_DIVERSITY)
267 libconf.ant.tx = default_ant->tx;
268 else if (active_ant->tx == ANTENNA_SW_DIVERSITY)
269 libconf.ant.tx = ANTENNA_B;
270 else
271 libconf.ant.tx = active_ant->tx;
272 }
273
274 if (flags & CONFIG_UPDATE_SLOT_TIME) {
275 short_slot_time = conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME;
276
277 libconf.slot_time =
278 short_slot_time ? SHORT_SLOT_TIME : SLOT_TIME;
279 libconf.sifs = SIFS;
280 libconf.pifs = short_slot_time ? SHORT_PIFS : PIFS;
281 libconf.difs = short_slot_time ? SHORT_DIFS : DIFS;
282 libconf.eifs = short_slot_time ? SHORT_EIFS : EIFS;
283 }
284
285 libconf.conf = conf;
286
287 /* 184 /*
288 * Start configuration. 185 * Start configuration.
289 */ 186 */
290 rt2x00dev->ops->lib->config(rt2x00dev, &libconf, flags); 187 rt2x00dev->ops->lib->config(rt2x00dev, &libconf, ieee80211_flags);
291 188
292 /* 189 /*
293 * Some configuration changes affect the link quality 190 * Some configuration changes affect the link quality
294 * which means we need to reset the link tuner. 191 * which means we need to reset the link tuner.
295 */ 192 */
296 if (flags & (CONFIG_UPDATE_CHANNEL | CONFIG_UPDATE_ANTENNA)) 193 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL)
297 rt2x00lib_reset_link_tuner(rt2x00dev); 194 rt2x00lib_reset_link_tuner(rt2x00dev);
298 195
299 if (flags & CONFIG_UPDATE_PHYMODE) { 196 rt2x00dev->curr_band = conf->channel->band;
300 rt2x00dev->curr_band = conf->channel->band;
301 rt2x00dev->rx_status.band = conf->channel->band;
302 }
303
304 rt2x00dev->rx_status.freq = conf->channel->center_freq;
305 rt2x00dev->tx_power = conf->power_level; 197 rt2x00dev->tx_power = conf->power_level;
198 rt2x00dev->short_retry = conf->short_frame_max_tx_count;
199 rt2x00dev->long_retry = conf->long_frame_max_tx_count;
306 200
307 if (flags & CONFIG_UPDATE_ANTENNA) { 201 rt2x00dev->rx_status.band = conf->channel->band;
308 rt2x00dev->link.ant.active.rx = libconf.ant.rx; 202 rt2x00dev->rx_status.freq = conf->channel->center_freq;
309 rt2x00dev->link.ant.active.tx = libconf.ant.tx;
310 }
311} 203}
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index 5a858e5106c4..37ad0d2fb64c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -46,6 +46,29 @@ enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key)
46 } 46 }
47} 47}
48 48
49void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
50 struct txentry_desc *txdesc)
51{
52 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
53 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
54
55 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
56
57 txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
58
59 if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
60 __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
61
62 txdesc->key_idx = hw_key->hw_key_idx;
63 txdesc->iv_offset = ieee80211_get_hdrlen_from_skb(entry->skb);
64
65 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
66 __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
67
68 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
69 __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
70}
71
49unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info) 72unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info)
50{ 73{
51 struct ieee80211_key_conf *key = tx_info->control.hw_key; 74 struct ieee80211_key_conf *key = tx_info->control.hw_key;
@@ -69,6 +92,18 @@ unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info)
69 return overhead; 92 return overhead;
70} 93}
71 94
95void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, unsigned int iv_len)
96{
97 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
98 unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
99
100 if (unlikely(!iv_len))
101 return;
102
103 /* Copy IV/EIV data */
104 memcpy(skbdesc->iv, skb->data + header_length, iv_len);
105}
106
72void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len) 107void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len)
73{ 108{
74 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 109 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
@@ -78,10 +113,7 @@ void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len)
78 return; 113 return;
79 114
80 /* Copy IV/EIV data */ 115 /* Copy IV/EIV data */
81 if (iv_len >= 4) 116 memcpy(skbdesc->iv, skb->data + header_length, iv_len);
82 memcpy(&skbdesc->iv, skb->data + header_length, 4);
83 if (iv_len >= 8)
84 memcpy(&skbdesc->eiv, skb->data + header_length + 4, 4);
85 117
86 /* Move ieee80211 header */ 118 /* Move ieee80211 header */
87 memmove(skb->data + iv_len, skb->data, header_length); 119 memmove(skb->data + iv_len, skb->data, header_length);
@@ -98,7 +130,7 @@ void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
98 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 130 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
99 unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb); 131 unsigned int header_length = ieee80211_get_hdrlen_from_skb(skb);
100 const unsigned int iv_len = 132 const unsigned int iv_len =
101 ((!!(skbdesc->iv)) * 4) + ((!!(skbdesc->eiv)) * 4); 133 ((!!(skbdesc->iv[0])) * 4) + ((!!(skbdesc->iv[1])) * 4);
102 134
103 if (!(skbdesc->flags & FRAME_DESC_IV_STRIPPED)) 135 if (!(skbdesc->flags & FRAME_DESC_IV_STRIPPED))
104 return; 136 return;
@@ -109,10 +141,7 @@ void rt2x00crypto_tx_insert_iv(struct sk_buff *skb)
109 memmove(skb->data, skb->data + iv_len, header_length); 141 memmove(skb->data, skb->data + iv_len, header_length);
110 142
111 /* Copy IV/EIV data */ 143 /* Copy IV/EIV data */
112 if (iv_len >= 4) 144 memcpy(skb->data + header_length, skbdesc->iv, iv_len);
113 memcpy(skb->data + header_length, &skbdesc->iv, 4);
114 if (iv_len >= 8)
115 memcpy(skb->data + header_length + 4, &skbdesc->eiv, 4);
116 145
117 /* IV/EIV data has returned into the frame */ 146 /* IV/EIV data has returned into the frame */
118 skbdesc->flags &= ~FRAME_DESC_IV_STRIPPED; 147 skbdesc->flags &= ~FRAME_DESC_IV_STRIPPED;
@@ -172,17 +201,9 @@ void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
172 header_length); 201 header_length);
173 transfer += header_length; 202 transfer += header_length;
174 203
175 /* Copy IV data */ 204 /* Copy IV/EIV data */
176 if (iv_len >= 4) { 205 memcpy(skb->data + transfer, rxdesc->iv, iv_len);
177 memcpy(skb->data + transfer, &rxdesc->iv, 4); 206 transfer += iv_len;
178 transfer += 4;
179 }
180
181 /* Copy EIV data */
182 if (iv_len >= 8) {
183 memcpy(skb->data + transfer, &rxdesc->eiv, 4);
184 transfer += 4;
185 }
186 207
187 /* Move payload */ 208 /* Move payload */
188 if (align) { 209 if (align) {
@@ -198,16 +219,14 @@ void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
198 */ 219 */
199 transfer += payload_len; 220 transfer += payload_len;
200 221
201 /* Copy ICV data */ 222 /*
202 if (icv_len >= 4) { 223 * Copy ICV data
203 memcpy(skb->data + transfer, &rxdesc->icv, 4); 224 * AES appends 8 bytes, we can't fill the upper
204 /* 225 * 4 bytes, but mac80211 doesn't care about what
205 * AES appends 8 bytes, we can't fill the upper 226 * we provide here anyway and strips it immediately.
206 * 4 bytes, but mac80211 doesn't care about what 227 */
207 * we provide here anyway and strips it immediately. 228 memcpy(skb->data + transfer, &rxdesc->icv, 4);
208 */ 229 transfer += icv_len;
209 transfer += icv_len;
210 }
211 230
212 /* IV/EIV/ICV has been inserted into frame */ 231 /* IV/EIV/ICV has been inserted into frame */
213 rxdesc->size = transfer; 232 rxdesc->size = transfer;
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 5cf4c859e39d..54dd10060bf1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -285,7 +285,7 @@ exit:
285} 285}
286 286
287static unsigned int rt2x00debug_poll_queue_dump(struct file *file, 287static unsigned int rt2x00debug_poll_queue_dump(struct file *file,
288 poll_table *wait) 288 poll_table *wait)
289{ 289{
290 struct rt2x00debug_intf *intf = file->private_data; 290 struct rt2x00debug_intf *intf = file->private_data;
291 291
@@ -377,7 +377,7 @@ static ssize_t rt2x00debug_read_crypto_stats(struct file *file,
377 if (*offset) 377 if (*offset)
378 return 0; 378 return 0;
379 379
380 data = kzalloc((1 + CIPHER_MAX)* MAX_LINE_LENGTH, GFP_KERNEL); 380 data = kzalloc((1 + CIPHER_MAX) * MAX_LINE_LENGTH, GFP_KERNEL);
381 if (!data) 381 if (!data)
382 return -ENOMEM; 382 return -ENOMEM;
383 383
@@ -424,16 +424,21 @@ static ssize_t rt2x00debug_read_##__name(struct file *file, \
424 const struct rt2x00debug *debug = intf->debug; \ 424 const struct rt2x00debug *debug = intf->debug; \
425 char line[16]; \ 425 char line[16]; \
426 size_t size; \ 426 size_t size; \
427 unsigned int index = intf->offset_##__name; \
427 __type value; \ 428 __type value; \
428 \ 429 \
429 if (*offset) \ 430 if (*offset) \
430 return 0; \ 431 return 0; \
431 \ 432 \
432 if (intf->offset_##__name >= debug->__name.word_count) \ 433 if (index >= debug->__name.word_count) \
433 return -EINVAL; \ 434 return -EINVAL; \
434 \ 435 \
435 debug->__name.read(intf->rt2x00dev, \ 436 if (debug->__name.flags & RT2X00DEBUGFS_OFFSET) \
436 intf->offset_##__name, &value); \ 437 index *= debug->__name.word_size; \
438 \
439 index += debug->__name.word_base; \
440 \
441 debug->__name.read(intf->rt2x00dev, index, &value); \
437 \ 442 \
438 size = sprintf(line, __format, value); \ 443 size = sprintf(line, __format, value); \
439 \ 444 \
@@ -454,12 +459,13 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \
454 const struct rt2x00debug *debug = intf->debug; \ 459 const struct rt2x00debug *debug = intf->debug; \
455 char line[16]; \ 460 char line[16]; \
456 size_t size; \ 461 size_t size; \
462 unsigned int index = intf->offset_##__name; \
457 __type value; \ 463 __type value; \
458 \ 464 \
459 if (*offset) \ 465 if (*offset) \
460 return 0; \ 466 return 0; \
461 \ 467 \
462 if (intf->offset_##__name >= debug->__name.word_count) \ 468 if (index >= debug->__name.word_count) \
463 return -EINVAL; \ 469 return -EINVAL; \
464 \ 470 \
465 if (copy_from_user(line, buf, length)) \ 471 if (copy_from_user(line, buf, length)) \
@@ -468,8 +474,12 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \
468 size = strlen(line); \ 474 size = strlen(line); \
469 value = simple_strtoul(line, NULL, 0); \ 475 value = simple_strtoul(line, NULL, 0); \
470 \ 476 \
471 debug->__name.write(intf->rt2x00dev, \ 477 if (debug->__name.flags & RT2X00DEBUGFS_OFFSET) \
472 intf->offset_##__name, value); \ 478 index *= debug->__name.word_size; \
479 \
480 index += debug->__name.word_base; \
481 \
482 debug->__name.write(intf->rt2x00dev, index, value); \
473 \ 483 \
474 *offset += size; \ 484 *offset += size; \
475 return size; \ 485 return size; \
@@ -587,29 +597,29 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
587 intf->driver_folder = 597 intf->driver_folder =
588 debugfs_create_dir(intf->rt2x00dev->ops->name, 598 debugfs_create_dir(intf->rt2x00dev->ops->name,
589 rt2x00dev->hw->wiphy->debugfsdir); 599 rt2x00dev->hw->wiphy->debugfsdir);
590 if (IS_ERR(intf->driver_folder)) 600 if (IS_ERR(intf->driver_folder) || !intf->driver_folder)
591 goto exit; 601 goto exit;
592 602
593 intf->driver_entry = 603 intf->driver_entry =
594 rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob); 604 rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob);
595 if (IS_ERR(intf->driver_entry)) 605 if (IS_ERR(intf->driver_entry) || !intf->driver_entry)
596 goto exit; 606 goto exit;
597 607
598 intf->chipset_entry = 608 intf->chipset_entry =
599 rt2x00debug_create_file_chipset("chipset", 609 rt2x00debug_create_file_chipset("chipset",
600 intf, &intf->chipset_blob); 610 intf, &intf->chipset_blob);
601 if (IS_ERR(intf->chipset_entry)) 611 if (IS_ERR(intf->chipset_entry) || !intf->chipset_entry)
602 goto exit; 612 goto exit;
603 613
604 intf->dev_flags = debugfs_create_file("dev_flags", S_IRUSR, 614 intf->dev_flags = debugfs_create_file("dev_flags", S_IRUSR,
605 intf->driver_folder, intf, 615 intf->driver_folder, intf,
606 &rt2x00debug_fop_dev_flags); 616 &rt2x00debug_fop_dev_flags);
607 if (IS_ERR(intf->dev_flags)) 617 if (IS_ERR(intf->dev_flags) || !intf->dev_flags)
608 goto exit; 618 goto exit;
609 619
610 intf->register_folder = 620 intf->register_folder =
611 debugfs_create_dir("register", intf->driver_folder); 621 debugfs_create_dir("register", intf->driver_folder);
612 if (IS_ERR(intf->register_folder)) 622 if (IS_ERR(intf->register_folder) || !intf->register_folder)
613 goto exit; 623 goto exit;
614 624
615#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ 625#define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \
@@ -619,7 +629,8 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
619 S_IRUSR | S_IWUSR, \ 629 S_IRUSR | S_IWUSR, \
620 (__intf)->register_folder, \ 630 (__intf)->register_folder, \
621 &(__intf)->offset_##__name); \ 631 &(__intf)->offset_##__name); \
622 if (IS_ERR((__intf)->__name##_off_entry)) \ 632 if (IS_ERR((__intf)->__name##_off_entry) \
633 || !(__intf)->__name##_off_entry) \
623 goto exit; \ 634 goto exit; \
624 \ 635 \
625 (__intf)->__name##_val_entry = \ 636 (__intf)->__name##_val_entry = \
@@ -627,7 +638,8 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
627 S_IRUSR | S_IWUSR, \ 638 S_IRUSR | S_IWUSR, \
628 (__intf)->register_folder, \ 639 (__intf)->register_folder, \
629 (__intf), &rt2x00debug_fop_##__name);\ 640 (__intf), &rt2x00debug_fop_##__name);\
630 if (IS_ERR((__intf)->__name##_val_entry)) \ 641 if (IS_ERR((__intf)->__name##_val_entry) \
642 || !(__intf)->__name##_val_entry) \
631 goto exit; \ 643 goto exit; \
632}) 644})
633 645
@@ -640,13 +652,14 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
640 652
641 intf->queue_folder = 653 intf->queue_folder =
642 debugfs_create_dir("queue", intf->driver_folder); 654 debugfs_create_dir("queue", intf->driver_folder);
643 if (IS_ERR(intf->queue_folder)) 655 if (IS_ERR(intf->queue_folder) || !intf->queue_folder)
644 goto exit; 656 goto exit;
645 657
646 intf->queue_frame_dump_entry = 658 intf->queue_frame_dump_entry =
647 debugfs_create_file("dump", S_IRUSR, intf->queue_folder, 659 debugfs_create_file("dump", S_IRUSR, intf->queue_folder,
648 intf, &rt2x00debug_fop_queue_dump); 660 intf, &rt2x00debug_fop_queue_dump);
649 if (IS_ERR(intf->queue_frame_dump_entry)) 661 if (IS_ERR(intf->queue_frame_dump_entry)
662 || !intf->queue_frame_dump_entry)
650 goto exit; 663 goto exit;
651 664
652 skb_queue_head_init(&intf->frame_dump_skbqueue); 665 skb_queue_head_init(&intf->frame_dump_skbqueue);
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h
index c4ce895aa1c7..a92104dfee9a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.h
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.h
@@ -28,6 +28,16 @@
28 28
29struct rt2x00_dev; 29struct rt2x00_dev;
30 30
31/**
32 * enum rt2x00debugfs_entry_flags: Flags for debugfs registry entry
33 *
34 * @RT2X00DEBUGFS_OFFSET: rt2x00lib should pass the register offset
35 * as argument when using the callback function read()/write()
36 */
37enum rt2x00debugfs_entry_flags {
38 RT2X00DEBUGFS_OFFSET = (1 << 0),
39};
40
31#define RT2X00DEBUGFS_REGISTER_ENTRY(__name, __type) \ 41#define RT2X00DEBUGFS_REGISTER_ENTRY(__name, __type) \
32struct reg##__name { \ 42struct reg##__name { \
33 void (*read)(struct rt2x00_dev *rt2x00dev, \ 43 void (*read)(struct rt2x00_dev *rt2x00dev, \
@@ -35,6 +45,9 @@ struct reg##__name { \
35 void (*write)(struct rt2x00_dev *rt2x00dev, \ 45 void (*write)(struct rt2x00_dev *rt2x00dev, \
36 const unsigned int word, __type data); \ 46 const unsigned int word, __type data); \
37 \ 47 \
48 unsigned int flags; \
49 \
50 unsigned int word_base; \
38 unsigned int word_size; \ 51 unsigned int word_size; \
39 unsigned int word_count; \ 52 unsigned int word_count; \
40} __name 53} __name
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 86840e3585e8..6d92542fcf0d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -101,8 +101,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
101 /* 101 /*
102 * Initialize all data queues. 102 * Initialize all data queues.
103 */ 103 */
104 rt2x00queue_init_rx(rt2x00dev); 104 rt2x00queue_init_queues(rt2x00dev);
105 rt2x00queue_init_tx(rt2x00dev);
106 105
107 /* 106 /*
108 * Enable radio. 107 * Enable radio.
@@ -176,13 +175,14 @@ void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state)
176 175
177static void rt2x00lib_evaluate_antenna_sample(struct rt2x00_dev *rt2x00dev) 176static void rt2x00lib_evaluate_antenna_sample(struct rt2x00_dev *rt2x00dev)
178{ 177{
179 enum antenna rx = rt2x00dev->link.ant.active.rx; 178 struct antenna_setup ant;
180 enum antenna tx = rt2x00dev->link.ant.active.tx;
181 int sample_a = 179 int sample_a =
182 rt2x00_get_link_ant_rssi_history(&rt2x00dev->link, ANTENNA_A); 180 rt2x00_get_link_ant_rssi_history(&rt2x00dev->link, ANTENNA_A);
183 int sample_b = 181 int sample_b =
184 rt2x00_get_link_ant_rssi_history(&rt2x00dev->link, ANTENNA_B); 182 rt2x00_get_link_ant_rssi_history(&rt2x00dev->link, ANTENNA_B);
185 183
184 memcpy(&ant, &rt2x00dev->link.ant.active, sizeof(ant));
185
186 /* 186 /*
187 * We are done sampling. Now we should evaluate the results. 187 * We are done sampling. Now we should evaluate the results.
188 */ 188 */
@@ -200,21 +200,22 @@ static void rt2x00lib_evaluate_antenna_sample(struct rt2x00_dev *rt2x00dev)
200 return; 200 return;
201 201
202 if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) 202 if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY)
203 rx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B; 203 ant.rx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B;
204 204
205 if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY) 205 if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)
206 tx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B; 206 ant.tx = (sample_a > sample_b) ? ANTENNA_A : ANTENNA_B;
207 207
208 rt2x00lib_config_antenna(rt2x00dev, rx, tx); 208 rt2x00lib_config_antenna(rt2x00dev, &ant);
209} 209}
210 210
211static void rt2x00lib_evaluate_antenna_eval(struct rt2x00_dev *rt2x00dev) 211static void rt2x00lib_evaluate_antenna_eval(struct rt2x00_dev *rt2x00dev)
212{ 212{
213 enum antenna rx = rt2x00dev->link.ant.active.rx; 213 struct antenna_setup ant;
214 enum antenna tx = rt2x00dev->link.ant.active.tx;
215 int rssi_curr = rt2x00_get_link_ant_rssi(&rt2x00dev->link); 214 int rssi_curr = rt2x00_get_link_ant_rssi(&rt2x00dev->link);
216 int rssi_old = rt2x00_update_ant_rssi(&rt2x00dev->link, rssi_curr); 215 int rssi_old = rt2x00_update_ant_rssi(&rt2x00dev->link, rssi_curr);
217 216
217 memcpy(&ant, &rt2x00dev->link.ant.active, sizeof(ant));
218
218 /* 219 /*
219 * Legacy driver indicates that we should swap antenna's 220 * Legacy driver indicates that we should swap antenna's
220 * when the difference in RSSI is greater that 5. This 221 * when the difference in RSSI is greater that 5. This
@@ -230,12 +231,12 @@ static void rt2x00lib_evaluate_antenna_eval(struct rt2x00_dev *rt2x00dev)
230 rt2x00dev->link.ant.flags |= ANTENNA_MODE_SAMPLE; 231 rt2x00dev->link.ant.flags |= ANTENNA_MODE_SAMPLE;
231 232
232 if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) 233 if (rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY)
233 rx = (rx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A; 234 ant.rx = (ant.rx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A;
234 235
235 if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY) 236 if (rt2x00dev->link.ant.flags & ANTENNA_TX_DIVERSITY)
236 tx = (tx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A; 237 ant.tx = (ant.tx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A;
237 238
238 rt2x00lib_config_antenna(rt2x00dev, rx, tx); 239 rt2x00lib_config_antenna(rt2x00dev, &ant);
239} 240}
240 241
241static void rt2x00lib_evaluate_antenna(struct rt2x00_dev *rt2x00dev) 242static void rt2x00lib_evaluate_antenna(struct rt2x00_dev *rt2x00dev)
@@ -249,11 +250,9 @@ static void rt2x00lib_evaluate_antenna(struct rt2x00_dev *rt2x00dev)
249 rt2x00dev->link.ant.flags &= ~ANTENNA_RX_DIVERSITY; 250 rt2x00dev->link.ant.flags &= ~ANTENNA_RX_DIVERSITY;
250 rt2x00dev->link.ant.flags &= ~ANTENNA_TX_DIVERSITY; 251 rt2x00dev->link.ant.flags &= ~ANTENNA_TX_DIVERSITY;
251 252
252 if (rt2x00dev->hw->conf.antenna_sel_rx == 0 && 253 if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY)
253 rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY)
254 rt2x00dev->link.ant.flags |= ANTENNA_RX_DIVERSITY; 254 rt2x00dev->link.ant.flags |= ANTENNA_RX_DIVERSITY;
255 if (rt2x00dev->hw->conf.antenna_sel_tx == 0 && 255 if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY)
256 rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY)
257 rt2x00dev->link.ant.flags |= ANTENNA_TX_DIVERSITY; 256 rt2x00dev->link.ant.flags |= ANTENNA_TX_DIVERSITY;
258 257
259 if (!(rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) && 258 if (!(rt2x00dev->link.ant.flags & ANTENNA_RX_DIVERSITY) &&
@@ -419,7 +418,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
419 */ 418 */
420 spin_lock(&intf->lock); 419 spin_lock(&intf->lock);
421 420
422 memcpy(&conf, &intf->conf, sizeof(conf)); 421 memcpy(&conf, &vif->bss_conf, sizeof(conf));
423 delayed_flags = intf->delayed_flags; 422 delayed_flags = intf->delayed_flags;
424 intf->delayed_flags = 0; 423 intf->delayed_flags = 0;
425 424
@@ -500,7 +499,9 @@ void rt2x00lib_txdone(struct queue_entry *entry,
500{ 499{
501 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 500 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
502 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 501 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
502 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
503 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb); 503 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
504 u8 rate_idx, rate_flags;
504 505
505 /* 506 /*
506 * Unmap the skb. 507 * Unmap the skb.
@@ -530,14 +531,18 @@ void rt2x00lib_txdone(struct queue_entry *entry,
530 rt2x00dev->link.qual.tx_failed += 531 rt2x00dev->link.qual.tx_failed +=
531 test_bit(TXDONE_FAILURE, &txdesc->flags); 532 test_bit(TXDONE_FAILURE, &txdesc->flags);
532 533
534 rate_idx = skbdesc->tx_rate_idx;
535 rate_flags = skbdesc->tx_rate_flags;
536
533 /* 537 /*
534 * Initialize TX status 538 * Initialize TX status
535 */ 539 */
536 memset(&tx_info->status, 0, sizeof(tx_info->status)); 540 memset(&tx_info->status, 0, sizeof(tx_info->status));
537 tx_info->status.ack_signal = 0; 541 tx_info->status.ack_signal = 0;
538 tx_info->status.excessive_retries = 542 tx_info->status.rates[0].idx = rate_idx;
539 test_bit(TXDONE_EXCESSIVE_RETRY, &txdesc->flags); 543 tx_info->status.rates[0].flags = rate_flags;
540 tx_info->status.retry_count = txdesc->retry; 544 tx_info->status.rates[0].count = txdesc->retry + 1;
545 tx_info->status.rates[1].idx = -1; /* terminate */
541 546
542 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) { 547 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
543 if (test_bit(TXDONE_SUCCESS, &txdesc->flags)) 548 if (test_bit(TXDONE_SUCCESS, &txdesc->flags))
@@ -546,7 +551,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
546 rt2x00dev->low_level_stats.dot11ACKFailureCount++; 551 rt2x00dev->low_level_stats.dot11ACKFailureCount++;
547 } 552 }
548 553
549 if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { 554 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
550 if (test_bit(TXDONE_SUCCESS, &txdesc->flags)) 555 if (test_bit(TXDONE_SUCCESS, &txdesc->flags))
551 rt2x00dev->low_level_stats.dot11RTSSuccessCount++; 556 rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
552 else if (test_bit(TXDONE_FAILURE, &txdesc->flags)) 557 else if (test_bit(TXDONE_FAILURE, &txdesc->flags))
@@ -570,7 +575,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
570 entry->skb = NULL; 575 entry->skb = NULL;
571 entry->flags = 0; 576 entry->flags = 0;
572 577
573 rt2x00dev->ops->lib->init_txentry(rt2x00dev, entry); 578 rt2x00dev->ops->lib->clear_entry(entry);
574 579
575 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 580 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
576 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); 581 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
@@ -631,7 +636,8 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
631 * provided seperately (through hardware descriptor) 636 * provided seperately (through hardware descriptor)
632 * in which case we should reinsert the data into the frame. 637 * in which case we should reinsert the data into the frame.
633 */ 638 */
634 if ((rxdesc.flags & RX_FLAG_IV_STRIPPED)) { 639 if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) &&
640 (rxdesc.flags & RX_FLAG_IV_STRIPPED)) {
635 rt2x00crypto_rx_insert_iv(entry->skb, align, 641 rt2x00crypto_rx_insert_iv(entry->skb, align,
636 header_length, &rxdesc); 642 header_length, &rxdesc);
637 } else if (align) { 643 } else if (align) {
@@ -702,7 +708,7 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
702 entry->skb = skb; 708 entry->skb = skb;
703 entry->flags = 0; 709 entry->flags = 0;
704 710
705 rt2x00dev->ops->lib->init_rxentry(rt2x00dev, entry); 711 rt2x00dev->ops->lib->clear_entry(entry);
706 712
707 rt2x00queue_index_inc(entry->queue, Q_INDEX); 713 rt2x00queue_index_inc(entry->queue, Q_INDEX);
708} 714}
@@ -713,31 +719,31 @@ EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
713 */ 719 */
714const struct rt2x00_rate rt2x00_supported_rates[12] = { 720const struct rt2x00_rate rt2x00_supported_rates[12] = {
715 { 721 {
716 .flags = DEV_RATE_CCK | DEV_RATE_BASIC, 722 .flags = DEV_RATE_CCK,
717 .bitrate = 10, 723 .bitrate = 10,
718 .ratemask = BIT(0), 724 .ratemask = BIT(0),
719 .plcp = 0x00, 725 .plcp = 0x00,
720 }, 726 },
721 { 727 {
722 .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE | DEV_RATE_BASIC, 728 .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE,
723 .bitrate = 20, 729 .bitrate = 20,
724 .ratemask = BIT(1), 730 .ratemask = BIT(1),
725 .plcp = 0x01, 731 .plcp = 0x01,
726 }, 732 },
727 { 733 {
728 .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE | DEV_RATE_BASIC, 734 .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE,
729 .bitrate = 55, 735 .bitrate = 55,
730 .ratemask = BIT(2), 736 .ratemask = BIT(2),
731 .plcp = 0x02, 737 .plcp = 0x02,
732 }, 738 },
733 { 739 {
734 .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE | DEV_RATE_BASIC, 740 .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE,
735 .bitrate = 110, 741 .bitrate = 110,
736 .ratemask = BIT(3), 742 .ratemask = BIT(3),
737 .plcp = 0x03, 743 .plcp = 0x03,
738 }, 744 },
739 { 745 {
740 .flags = DEV_RATE_OFDM | DEV_RATE_BASIC, 746 .flags = DEV_RATE_OFDM,
741 .bitrate = 60, 747 .bitrate = 60,
742 .ratemask = BIT(4), 748 .ratemask = BIT(4),
743 .plcp = 0x0b, 749 .plcp = 0x0b,
@@ -749,7 +755,7 @@ const struct rt2x00_rate rt2x00_supported_rates[12] = {
749 .plcp = 0x0f, 755 .plcp = 0x0f,
750 }, 756 },
751 { 757 {
752 .flags = DEV_RATE_OFDM | DEV_RATE_BASIC, 758 .flags = DEV_RATE_OFDM,
753 .bitrate = 120, 759 .bitrate = 120,
754 .ratemask = BIT(6), 760 .ratemask = BIT(6),
755 .plcp = 0x0a, 761 .plcp = 0x0a,
@@ -761,7 +767,7 @@ const struct rt2x00_rate rt2x00_supported_rates[12] = {
761 .plcp = 0x0e, 767 .plcp = 0x0e,
762 }, 768 },
763 { 769 {
764 .flags = DEV_RATE_OFDM | DEV_RATE_BASIC, 770 .flags = DEV_RATE_OFDM,
765 .bitrate = 240, 771 .bitrate = 240,
766 .ratemask = BIT(8), 772 .ratemask = BIT(8),
767 .plcp = 0x09, 773 .plcp = 0x09,
@@ -1046,16 +1052,24 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1046{ 1052{
1047 int retval = -ENOMEM; 1053 int retval = -ENOMEM;
1048 1054
1055 mutex_init(&rt2x00dev->csr_mutex);
1056
1049 /* 1057 /*
1050 * Make room for rt2x00_intf inside the per-interface 1058 * Make room for rt2x00_intf inside the per-interface
1051 * structure ieee80211_vif. 1059 * structure ieee80211_vif.
1052 */ 1060 */
1053 rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf); 1061 rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf);
1054 1062
1055 rt2x00dev->hw->wiphy->interface_modes = 1063 /*
1056 BIT(NL80211_IFTYPE_AP) | 1064 * Determine which operating modes are supported, all modes
1057 BIT(NL80211_IFTYPE_STATION) | 1065 * which require beaconing, depend on the availability of
1058 BIT(NL80211_IFTYPE_ADHOC); 1066 * beacon entries.
1067 */
1068 rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1069 if (rt2x00dev->ops->bcn->entry_num > 0)
1070 rt2x00dev->hw->wiphy->interface_modes |=
1071 BIT(NL80211_IFTYPE_ADHOC) |
1072 BIT(NL80211_IFTYPE_AP);
1059 1073
1060 /* 1074 /*
1061 * Let the driver probe the device to detect the capabilities. 1075 * Let the driver probe the device to detect the capabilities.
@@ -1247,7 +1261,7 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1247 /* 1261 /*
1248 * Reconfigure device. 1262 * Reconfigure device.
1249 */ 1263 */
1250 retval = rt2x00mac_config(rt2x00dev->hw, &rt2x00dev->hw->conf); 1264 retval = rt2x00mac_config(rt2x00dev->hw, ~0);
1251 if (retval) 1265 if (retval)
1252 goto exit; 1266 goto exit;
1253 1267
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index b362a1cf3f8d..68f4e0fc35b9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -72,49 +72,33 @@ void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi)
72 } 72 }
73} 73}
74 74
75void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled) 75static void rt2x00led_led_simple(struct rt2x00_led *led, bool enabled)
76{ 76{
77 struct rt2x00_led *led = &rt2x00dev->led_qual; 77 unsigned int brightness = enabled ? LED_FULL : LED_OFF;
78 unsigned int brightness;
79 78
80 if ((led->type != LED_TYPE_ACTIVITY) || !(led->flags & LED_REGISTERED)) 79 if (!(led->flags & LED_REGISTERED))
81 return; 80 return;
82 81
83 brightness = enabled ? LED_FULL : LED_OFF; 82 led->led_dev.brightness_set(&led->led_dev, brightness);
84 if (brightness != led->led_dev.brightness) { 83 led->led_dev.brightness = brightness;
85 led->led_dev.brightness_set(&led->led_dev, brightness);
86 led->led_dev.brightness = brightness;
87 }
88} 84}
89 85
90void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled) 86void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled)
91{ 87{
92 struct rt2x00_led *led = &rt2x00dev->led_assoc; 88 if (rt2x00dev->led_qual.type == LED_TYPE_ACTIVITY)
93 unsigned int brightness; 89 rt2x00led_led_simple(&rt2x00dev->led_qual, enabled);
94 90}
95 if ((led->type != LED_TYPE_ASSOC) || !(led->flags & LED_REGISTERED))
96 return;
97 91
98 brightness = enabled ? LED_FULL : LED_OFF; 92void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled)
99 if (brightness != led->led_dev.brightness) { 93{
100 led->led_dev.brightness_set(&led->led_dev, brightness); 94 if (rt2x00dev->led_assoc.type == LED_TYPE_ASSOC)
101 led->led_dev.brightness = brightness; 95 rt2x00led_led_simple(&rt2x00dev->led_assoc, enabled);
102 }
103} 96}
104 97
105void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled) 98void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled)
106{ 99{
107 struct rt2x00_led *led = &rt2x00dev->led_radio; 100 if (rt2x00dev->led_radio.type == LED_TYPE_ASSOC)
108 unsigned int brightness; 101 rt2x00led_led_simple(&rt2x00dev->led_radio, enabled);
109
110 if ((led->type != LED_TYPE_RADIO) || !(led->flags & LED_REGISTERED))
111 return;
112
113 brightness = enabled ? LED_FULL : LED_OFF;
114 if (brightness != led->led_dev.brightness) {
115 led->led_dev.brightness_set(&led->led_dev, brightness);
116 led->led_dev.brightness = brightness;
117 }
118} 102}
119 103
120static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev, 104static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev,
@@ -125,6 +109,7 @@ static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev,
125 int retval; 109 int retval;
126 110
127 led->led_dev.name = name; 111 led->led_dev.name = name;
112 led->led_dev.brightness = LED_OFF;
128 113
129 retval = led_classdev_register(device, &led->led_dev); 114 retval = led_classdev_register(device, &led->led_dev);
130 if (retval) { 115 if (retval) {
@@ -199,7 +184,16 @@ exit_fail:
199static void rt2x00leds_unregister_led(struct rt2x00_led *led) 184static void rt2x00leds_unregister_led(struct rt2x00_led *led)
200{ 185{
201 led_classdev_unregister(&led->led_dev); 186 led_classdev_unregister(&led->led_dev);
202 led->led_dev.brightness_set(&led->led_dev, LED_OFF); 187
188 /*
189 * This might look weird, but when we are unregistering while
190 * suspended the led is already off, and since we haven't
191 * fully resumed yet, access to the device might not be
192 * possible yet.
193 */
194 if (!(led->led_dev.flags & LED_SUSPENDED))
195 led->led_dev.brightness_set(&led->led_dev, LED_OFF);
196
203 led->flags &= ~LED_REGISTERED; 197 led->flags &= ~LED_REGISTERED;
204} 198}
205 199
@@ -213,22 +207,40 @@ void rt2x00leds_unregister(struct rt2x00_dev *rt2x00dev)
213 rt2x00leds_unregister_led(&rt2x00dev->led_radio); 207 rt2x00leds_unregister_led(&rt2x00dev->led_radio);
214} 208}
215 209
210static inline void rt2x00leds_suspend_led(struct rt2x00_led *led)
211{
212 led_classdev_suspend(&led->led_dev);
213
214 /* This shouldn't be needed, but just to be safe */
215 led->led_dev.brightness_set(&led->led_dev, LED_OFF);
216 led->led_dev.brightness = LED_OFF;
217}
218
216void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev) 219void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev)
217{ 220{
218 if (rt2x00dev->led_qual.flags & LED_REGISTERED) 221 if (rt2x00dev->led_qual.flags & LED_REGISTERED)
219 led_classdev_suspend(&rt2x00dev->led_qual.led_dev); 222 rt2x00leds_suspend_led(&rt2x00dev->led_qual);
220 if (rt2x00dev->led_assoc.flags & LED_REGISTERED) 223 if (rt2x00dev->led_assoc.flags & LED_REGISTERED)
221 led_classdev_suspend(&rt2x00dev->led_assoc.led_dev); 224 rt2x00leds_suspend_led(&rt2x00dev->led_assoc);
222 if (rt2x00dev->led_radio.flags & LED_REGISTERED) 225 if (rt2x00dev->led_radio.flags & LED_REGISTERED)
223 led_classdev_suspend(&rt2x00dev->led_radio.led_dev); 226 rt2x00leds_suspend_led(&rt2x00dev->led_radio);
227}
228
229static inline void rt2x00leds_resume_led(struct rt2x00_led *led)
230{
231 led_classdev_resume(&led->led_dev);
232
233 /* Device might have enabled the LEDS during resume */
234 led->led_dev.brightness_set(&led->led_dev, LED_OFF);
235 led->led_dev.brightness = LED_OFF;
224} 236}
225 237
226void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev) 238void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev)
227{ 239{
228 if (rt2x00dev->led_radio.flags & LED_REGISTERED) 240 if (rt2x00dev->led_radio.flags & LED_REGISTERED)
229 led_classdev_resume(&rt2x00dev->led_radio.led_dev); 241 rt2x00leds_resume_led(&rt2x00dev->led_radio);
230 if (rt2x00dev->led_assoc.flags & LED_REGISTERED) 242 if (rt2x00dev->led_assoc.flags & LED_REGISTERED)
231 led_classdev_resume(&rt2x00dev->led_assoc.led_dev); 243 rt2x00leds_resume_led(&rt2x00dev->led_assoc);
232 if (rt2x00dev->led_qual.flags & LED_REGISTERED) 244 if (rt2x00dev->led_qual.flags & LED_REGISTERED)
233 led_classdev_resume(&rt2x00dev->led_qual.led_dev); 245 rt2x00leds_resume_led(&rt2x00dev->led_qual);
234} 246}
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 797eb619aa0a..03024327767b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -43,7 +43,6 @@ struct rt2x00_rate {
43#define DEV_RATE_CCK 0x0001 43#define DEV_RATE_CCK 0x0001
44#define DEV_RATE_OFDM 0x0002 44#define DEV_RATE_OFDM 0x0002
45#define DEV_RATE_SHORT_PREAMBLE 0x0004 45#define DEV_RATE_SHORT_PREAMBLE 0x0004
46#define DEV_RATE_BASIC 0x0008
47 46
48 unsigned short bitrate; /* In 100kbit/s */ 47 unsigned short bitrate; /* In 100kbit/s */
49 unsigned short ratemask; 48 unsigned short ratemask;
@@ -94,9 +93,10 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
94 struct rt2x00_intf *intf, 93 struct rt2x00_intf *intf,
95 struct ieee80211_bss_conf *conf); 94 struct ieee80211_bss_conf *conf);
96void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, 95void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
97 enum antenna rx, enum antenna tx); 96 struct antenna_setup *ant);
98void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, 97void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
99 struct ieee80211_conf *conf, const int force_config); 98 struct ieee80211_conf *conf,
99 const unsigned int changed_flags);
100 100
101/** 101/**
102 * DOC: Queue handlers 102 * DOC: Queue handlers
@@ -150,8 +150,16 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
150 */ 150 */
151void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index); 151void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index);
152 152
153void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev); 153/**
154void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev); 154 * rt2x00queue_init_queues - Initialize all data queues
155 * @rt2x00dev: Pointer to &struct rt2x00_dev.
156 *
157 * This function will loop through all available queues to clear all
158 * index numbers and set the queue entry to the correct initialization
159 * state.
160 */
161void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev);
162
155int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev); 163int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev);
156void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev); 164void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev);
157int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev); 165int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev);
@@ -210,7 +218,10 @@ static inline void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
210 */ 218 */
211#ifdef CONFIG_RT2X00_LIB_CRYPTO 219#ifdef CONFIG_RT2X00_LIB_CRYPTO
212enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key); 220enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key);
221void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
222 struct txentry_desc *txdesc);
213unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info); 223unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info);
224void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, unsigned int iv_len);
214void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len); 225void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, unsigned int iv_len);
215void rt2x00crypto_tx_insert_iv(struct sk_buff *skb); 226void rt2x00crypto_tx_insert_iv(struct sk_buff *skb);
216void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align, 227void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int align,
@@ -222,11 +233,21 @@ static inline enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *
222 return CIPHER_NONE; 233 return CIPHER_NONE;
223} 234}
224 235
236static inline void rt2x00crypto_create_tx_descriptor(struct queue_entry *entry,
237 struct txentry_desc *txdesc)
238{
239}
240
225static inline unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info) 241static inline unsigned int rt2x00crypto_tx_overhead(struct ieee80211_tx_info *tx_info)
226{ 242{
227 return 0; 243 return 0;
228} 244}
229 245
246static inline void rt2x00crypto_tx_copy_iv(struct sk_buff *skb,
247 unsigned int iv_len)
248{
249}
250
230static inline void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, 251static inline void rt2x00crypto_tx_remove_iv(struct sk_buff *skb,
231 unsigned int iv_len) 252 unsigned int iv_len)
232{ 253{
@@ -242,7 +263,7 @@ static inline void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
242 struct rxdone_entry_desc *rxdesc) 263 struct rxdone_entry_desc *rxdesc)
243{ 264{
244} 265}
245#endif 266#endif /* CONFIG_RT2X00_LIB_CRYPTO */
246 267
247/* 268/*
248 * RFkill handlers. 269 * RFkill handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 2c6cc5c374ff..38edee5fe168 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -39,7 +39,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
39 unsigned int data_length; 39 unsigned int data_length;
40 int retval = 0; 40 int retval = 0;
41 41
42 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) 42 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
43 data_length = sizeof(struct ieee80211_cts); 43 data_length = sizeof(struct ieee80211_cts);
44 else 44 else
45 data_length = sizeof(struct ieee80211_rts); 45 data_length = sizeof(struct ieee80211_rts);
@@ -64,11 +64,11 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
64 */ 64 */
65 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb)); 65 memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
66 rts_info = IEEE80211_SKB_CB(skb); 66 rts_info = IEEE80211_SKB_CB(skb);
67 rts_info->flags &= ~IEEE80211_TX_CTL_USE_RTS_CTS; 67 rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
68 rts_info->flags &= ~IEEE80211_TX_CTL_USE_CTS_PROTECT; 68 rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT;
69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS; 69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS;
70 70
71 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) 71 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
72 rts_info->flags |= IEEE80211_TX_CTL_NO_ACK; 72 rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
73 else 73 else
74 rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK; 74 rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
@@ -79,12 +79,10 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
79 * RTS/CTS frame should use the length of the frame plus any 79 * RTS/CTS frame should use the length of the frame plus any
80 * encryption overhead that will be added by the hardware. 80 * encryption overhead that will be added by the hardware.
81 */ 81 */
82#ifdef CONFIG_RT2X00_LIB_CRYPTO
83 if (!frag_skb->do_not_encrypt) 82 if (!frag_skb->do_not_encrypt)
84 data_length += rt2x00crypto_tx_overhead(tx_info); 83 data_length += rt2x00crypto_tx_overhead(tx_info);
85#endif /* CONFIG_RT2X00_LIB_CRYPTO */
86 84
87 if (tx_info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) 85 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
88 ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif, 86 ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
89 frag_skb->data, data_length, tx_info, 87 frag_skb->data, data_length, tx_info,
90 (struct ieee80211_cts *)(skb->data)); 88 (struct ieee80211_cts *)(skb->data));
@@ -132,8 +130,7 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
132 ERROR(rt2x00dev, 130 ERROR(rt2x00dev,
133 "Attempt to send packet over invalid queue %d.\n" 131 "Attempt to send packet over invalid queue %d.\n"
134 "Please file bug report to %s.\n", qid, DRV_PROJECT); 132 "Please file bug report to %s.\n", qid, DRV_PROJECT);
135 dev_kfree_skb_any(skb); 133 goto exit_fail;
136 return NETDEV_TX_OK;
137 } 134 }
138 135
139 /* 136 /*
@@ -146,8 +143,8 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
146 * inside the hardware. 143 * inside the hardware.
147 */ 144 */
148 frame_control = le16_to_cpu(ieee80211hdr->frame_control); 145 frame_control = le16_to_cpu(ieee80211hdr->frame_control);
149 if ((tx_info->flags & (IEEE80211_TX_CTL_USE_RTS_CTS | 146 if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS |
150 IEEE80211_TX_CTL_USE_CTS_PROTECT)) && 147 IEEE80211_TX_RC_USE_CTS_PROTECT)) &&
151 !rt2x00dev->ops->hw->set_rts_threshold) { 148 !rt2x00dev->ops->hw->set_rts_threshold) {
152 if (rt2x00queue_available(queue) <= 1) 149 if (rt2x00queue_available(queue) <= 1)
153 goto exit_fail; 150 goto exit_fail;
@@ -335,10 +332,10 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
335} 332}
336EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface); 333EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface);
337 334
338int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 335int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
339{ 336{
340 struct rt2x00_dev *rt2x00dev = hw->priv; 337 struct rt2x00_dev *rt2x00dev = hw->priv;
341 int radio_on; 338 struct ieee80211_conf *conf = &hw->conf;
342 int status; 339 int status;
343 340
344 /* 341 /*
@@ -355,7 +352,6 @@ int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
355 * some configuration parameters (e.g. channel and antenna values) can 352 * some configuration parameters (e.g. channel and antenna values) can
356 * only be set when the radio is enabled. 353 * only be set when the radio is enabled.
357 */ 354 */
358 radio_on = test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags);
359 if (conf->radio_enabled) { 355 if (conf->radio_enabled) {
360 /* For programming the values, we have to turn RX off */ 356 /* For programming the values, we have to turn RX off */
361 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); 357 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF);
@@ -369,7 +365,18 @@ int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
369 * When we've just turned on the radio, we want to reprogram 365 * When we've just turned on the radio, we want to reprogram
370 * everything to ensure a consistent state 366 * everything to ensure a consistent state
371 */ 367 */
372 rt2x00lib_config(rt2x00dev, conf, !radio_on); 368 rt2x00lib_config(rt2x00dev, conf, changed);
369
370 /*
371 * The radio was enabled, configure the antenna to the
372 * default settings, the link tuner will later start
373 * continue configuring the antenna based on the software
374 * diversity. But for non-diversity configurations, we need
375 * to have configured the correct state now.
376 */
377 if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED)
378 rt2x00lib_config_antenna(rt2x00dev,
379 &rt2x00dev->default_ant);
373 380
374 /* Turn RX back on */ 381 /* Turn RX back on */
375 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); 382 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON);
@@ -480,12 +487,15 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
480 struct ieee80211_key_conf *key) 487 struct ieee80211_key_conf *key)
481{ 488{
482 struct rt2x00_dev *rt2x00dev = hw->priv; 489 struct rt2x00_dev *rt2x00dev = hw->priv;
490 struct ieee80211_sta *sta;
483 int (*set_key) (struct rt2x00_dev *rt2x00dev, 491 int (*set_key) (struct rt2x00_dev *rt2x00dev,
484 struct rt2x00lib_crypto *crypto, 492 struct rt2x00lib_crypto *crypto,
485 struct ieee80211_key_conf *key); 493 struct ieee80211_key_conf *key);
486 struct rt2x00lib_crypto crypto; 494 struct rt2x00lib_crypto crypto;
487 495
488 if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) 496 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
497 return 0;
498 else if (!test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags))
489 return -EOPNOTSUPP; 499 return -EOPNOTSUPP;
490 else if (key->keylen > 32) 500 else if (key->keylen > 32)
491 return -ENOSPC; 501 return -ENOSPC;
@@ -528,6 +538,17 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
528 memcpy(&crypto.key, &key->key[0], key->keylen); 538 memcpy(&crypto.key, &key->key[0], key->keylen);
529 539
530 /* 540 /*
541 * Discover the Association ID from mac80211.
542 * Some drivers need this information when updating the
543 * hardware key (either adding or removing).
544 */
545 rcu_read_lock();
546 sta = ieee80211_find_sta(hw, address);
547 if (sta)
548 crypto.aid = sta->aid;
549 rcu_read_unlock();
550
551 /*
531 * Each BSS has a maximum of 4 shared keys. 552 * Each BSS has a maximum of 4 shared keys.
532 * Shared key index values: 553 * Shared key index values:
533 * 0) BSS0 key0 554 * 0) BSS0 key0
@@ -625,7 +646,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
625 * When the erp information has changed, we should perform 646 * When the erp information has changed, we should perform
626 * additional configuration steps. For all other changes we are done. 647 * additional configuration steps. For all other changes we are done.
627 */ 648 */
628 if (changes & (BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_CTS_PROT)) { 649 if (changes & ~(BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
629 if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags)) 650 if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags))
630 rt2x00lib_config_erp(rt2x00dev, intf, bss_conf); 651 rt2x00lib_config_erp(rt2x00dev, intf, bss_conf);
631 else 652 else
@@ -633,7 +654,6 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
633 } 654 }
634 655
635 spin_lock(&intf->lock); 656 spin_lock(&intf->lock);
636 memcpy(&intf->conf, bss_conf, sizeof(*bss_conf));
637 if (delayed) { 657 if (delayed) {
638 intf->delayed_flags |= delayed; 658 intf->delayed_flags |= delayed;
639 schedule_work(&rt2x00dev->intf_work); 659 schedule_work(&rt2x00dev->intf_work);
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index adf2876ed8ab..d52b22b82d1f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -32,24 +32,46 @@
32#include "rt2x00pci.h" 32#include "rt2x00pci.h"
33 33
34/* 34/*
35 * Register access.
36 */
37int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
38 const unsigned int offset,
39 const struct rt2x00_field32 field,
40 u32 *reg)
41{
42 unsigned int i;
43
44 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
45 rt2x00pci_register_read(rt2x00dev, offset, reg);
46 if (!rt2x00_get_field32(*reg, field))
47 return 1;
48 udelay(REGISTER_BUSY_DELAY);
49 }
50
51 ERROR(rt2x00dev, "Indirect register access failed: "
52 "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
53 *reg = ~0;
54
55 return 0;
56}
57EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
58
59/*
35 * TX data handlers. 60 * TX data handlers.
36 */ 61 */
37int rt2x00pci_write_tx_data(struct queue_entry *entry) 62int rt2x00pci_write_tx_data(struct queue_entry *entry)
38{ 63{
64 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
39 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 65 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
40 struct skb_frame_desc *skbdesc; 66 struct skb_frame_desc *skbdesc;
41 u32 word;
42
43 rt2x00_desc_read(entry_priv->desc, 0, &word);
44 67
45 /* 68 /*
46 * This should not happen, we already checked the entry 69 * This should not happen, we already checked the entry
47 * was ours. When the hardware disagrees there has been 70 * was ours. When the hardware disagrees there has been
48 * a queue corruption! 71 * a queue corruption!
49 */ 72 */
50 if (unlikely(rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) || 73 if (unlikely(rt2x00dev->ops->lib->get_entry_state(entry))) {
51 rt2x00_get_field32(word, TXD_ENTRY_VALID))) { 74 ERROR(rt2x00dev,
52 ERROR(entry->queue->rt2x00dev,
53 "Corrupt queue %d, accessing entry which is not ours.\n" 75 "Corrupt queue %d, accessing entry which is not ours.\n"
54 "Please file bug report to %s.\n", 76 "Please file bug report to %s.\n",
55 entry->queue->qid, DRV_PROJECT); 77 entry->queue->qid, DRV_PROJECT);
@@ -76,14 +98,12 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
76 struct queue_entry *entry; 98 struct queue_entry *entry;
77 struct queue_entry_priv_pci *entry_priv; 99 struct queue_entry_priv_pci *entry_priv;
78 struct skb_frame_desc *skbdesc; 100 struct skb_frame_desc *skbdesc;
79 u32 word;
80 101
81 while (1) { 102 while (1) {
82 entry = rt2x00queue_get_entry(queue, Q_INDEX); 103 entry = rt2x00queue_get_entry(queue, Q_INDEX);
83 entry_priv = entry->priv_data; 104 entry_priv = entry->priv_data;
84 rt2x00_desc_read(entry_priv->desc, 0, &word);
85 105
86 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC)) 106 if (rt2x00dev->ops->lib->get_entry_state(entry))
87 break; 107 break;
88 108
89 /* 109 /*
@@ -222,8 +242,7 @@ static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
222{ 242{
223 struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev); 243 struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev);
224 244
225 rt2x00dev->csr.base = ioremap(pci_resource_start(pci_dev, 0), 245 rt2x00dev->csr.base = pci_ioremap_bar(pci_dev, 0);
226 pci_resource_len(pci_dev, 0));
227 if (!rt2x00dev->csr.base) 246 if (!rt2x00dev->csr.base)
228 goto exit; 247 goto exit;
229 248
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 80bf97c03e2d..9c0a4d77bc1b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -44,21 +44,10 @@
44#define REGISTER_BUSY_DELAY 100 44#define REGISTER_BUSY_DELAY 100
45 45
46/* 46/*
47 * Descriptor availability flags.
48 * All PCI device descriptors have these 2 flags
49 * with the exact same definition.
50 * By storing them here we can use them inside rt2x00pci
51 * for some simple entry availability checking.
52 */
53#define TXD_ENTRY_OWNER_NIC FIELD32(0x00000001)
54#define TXD_ENTRY_VALID FIELD32(0x00000002)
55#define RXD_ENTRY_OWNER_NIC FIELD32(0x00000001)
56
57/*
58 * Register access. 47 * Register access.
59 */ 48 */
60static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, 49static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
61 const unsigned long offset, 50 const unsigned int offset,
62 u32 *value) 51 u32 *value)
63{ 52{
64 *value = readl(rt2x00dev->csr.base + offset); 53 *value = readl(rt2x00dev->csr.base + offset);
@@ -66,14 +55,14 @@ static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
66 55
67static inline void 56static inline void
68rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, 57rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
69 const unsigned long offset, 58 const unsigned int offset,
70 void *value, const u16 length) 59 void *value, const u16 length)
71{ 60{
72 memcpy_fromio(value, rt2x00dev->csr.base + offset, length); 61 memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
73} 62}
74 63
75static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, 64static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
76 const unsigned long offset, 65 const unsigned int offset,
77 u32 value) 66 u32 value)
78{ 67{
79 writel(value, rt2x00dev->csr.base + offset); 68 writel(value, rt2x00dev->csr.base + offset);
@@ -81,13 +70,31 @@ static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
81 70
82static inline void 71static inline void
83rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, 72rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
84 const unsigned long offset, 73 const unsigned int offset,
85 const void *value, const u16 length) 74 const void *value, const u16 length)
86{ 75{
87 memcpy_toio(rt2x00dev->csr.base + offset, value, length); 76 memcpy_toio(rt2x00dev->csr.base + offset, value, length);
88} 77}
89 78
90/** 79/**
80 * rt2x00pci_regbusy_read - Read from register with busy check
81 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
82 * @offset: Register offset
83 * @field: Field to check if register is busy
84 * @reg: Pointer to where register contents should be stored
85 *
86 * This function will read the given register, and checks if the
87 * register is busy. If it is, it will sleep for a couple of
88 * microseconds before reading the register again. If the register
89 * is not read after a certain timeout, this function will return
90 * FALSE.
91 */
92int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
93 const unsigned int offset,
94 const struct rt2x00_field32 field,
95 u32 *reg);
96
97/**
91 * rt2x00pci_write_tx_data - Initialize data for TX operation 98 * rt2x00pci_write_tx_data - Initialize data for TX operation
92 * @entry: The entry where the frame is located 99 * @entry: The entry where the frame is located
93 * 100 *
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 451d410ecdae..eaec6bd93ed5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -55,14 +55,12 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
55 /* 55 /*
56 * For IV/EIV/ICV assembly we must make sure there is 56 * For IV/EIV/ICV assembly we must make sure there is
57 * at least 8 bytes bytes available in headroom for IV/EIV 57 * at least 8 bytes bytes available in headroom for IV/EIV
58 * and 4 bytes for ICV data as tailroon. 58 * and 8 bytes for ICV data as tailroon.
59 */ 59 */
60#ifdef CONFIG_RT2X00_LIB_CRYPTO
61 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 60 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
62 head_size += 8; 61 head_size += 8;
63 tail_size += 4; 62 tail_size += 8;
64 } 63 }
65#endif /* CONFIG_RT2X00_LIB_CRYPTO */
66 64
67 /* 65 /*
68 * Allocate skbuffer. 66 * Allocate skbuffer.
@@ -174,7 +172,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
174 txdesc->cw_max = entry->queue->cw_max; 172 txdesc->cw_max = entry->queue->cw_max;
175 txdesc->aifs = entry->queue->aifs; 173 txdesc->aifs = entry->queue->aifs;
176 174
177 /* Data length + CRC + IV/EIV/ICV/MMIC (when using encryption) */ 175 /* Data length + CRC */
178 data_length = entry->skb->len + 4; 176 data_length = entry->skb->len + 4;
179 177
180 /* 178 /*
@@ -183,34 +181,17 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
183 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) 181 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
184 __set_bit(ENTRY_TXD_ACK, &txdesc->flags); 182 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
185 183
186#ifdef CONFIG_RT2X00_LIB_CRYPTO
187 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) && 184 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) &&
188 !entry->skb->do_not_encrypt) { 185 !entry->skb->do_not_encrypt) {
189 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 186 /* Apply crypto specific descriptor information */
190 187 rt2x00crypto_create_tx_descriptor(entry, txdesc);
191 __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
192
193 txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key);
194
195 if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
196 __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags);
197
198 txdesc->key_idx = hw_key->hw_key_idx;
199 txdesc->iv_offset = ieee80211_get_hdrlen_from_skb(entry->skb);
200 188
201 /* 189 /*
202 * Extend frame length to include all encryption overhead 190 * Extend frame length to include all encryption overhead
203 * that will be added by the hardware. 191 * that will be added by the hardware.
204 */ 192 */
205 data_length += rt2x00crypto_tx_overhead(tx_info); 193 data_length += rt2x00crypto_tx_overhead(tx_info);
206
207 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV))
208 __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags);
209
210 if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
211 __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags);
212 } 194 }
213#endif /* CONFIG_RT2X00_LIB_CRYPTO */
214 195
215 /* 196 /*
216 * Check if this is a RTS/CTS frame 197 * Check if this is a RTS/CTS frame
@@ -230,8 +211,8 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
230 /* 211 /*
231 * Determine retry information. 212 * Determine retry information.
232 */ 213 */
233 txdesc->retry_limit = tx_info->control.retry_limit; 214 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
234 if (tx_info->flags & IEEE80211_TX_CTL_LONG_RETRY_LIMIT) 215 if (txdesc->retry_limit >= rt2x00dev->long_retry)
235 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); 216 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
236 217
237 /* 218 /*
@@ -312,8 +293,8 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
312 /* 293 /*
313 * Convert length to microseconds. 294 * Convert length to microseconds.
314 */ 295 */
315 residual = get_duration_res(data_length, hwrate->bitrate); 296 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
316 duration = get_duration(data_length, hwrate->bitrate); 297 duration = GET_DURATION(data_length, hwrate->bitrate);
317 298
318 if (residual != 0) { 299 if (residual != 0) {
319 duration++; 300 duration++;
@@ -371,13 +352,15 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
371 352
372int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb) 353int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
373{ 354{
355 struct ieee80211_tx_info *tx_info;
374 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 356 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
375 struct txentry_desc txdesc; 357 struct txentry_desc txdesc;
376 struct skb_frame_desc *skbdesc; 358 struct skb_frame_desc *skbdesc;
377 unsigned int iv_len = 0; 359 unsigned int iv_len = 0;
360 u8 rate_idx, rate_flags;
378 361
379 if (unlikely(rt2x00queue_full(queue))) 362 if (unlikely(rt2x00queue_full(queue)))
380 return -EINVAL; 363 return -ENOBUFS;
381 364
382 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) { 365 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
383 ERROR(queue->rt2x00dev, 366 ERROR(queue->rt2x00dev,
@@ -399,13 +382,18 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
399 iv_len = IEEE80211_SKB_CB(skb)->control.hw_key->iv_len; 382 iv_len = IEEE80211_SKB_CB(skb)->control.hw_key->iv_len;
400 383
401 /* 384 /*
402 * All information is retreived from the skb->cb array, 385 * All information is retrieved from the skb->cb array,
403 * now we should claim ownership of the driver part of that 386 * now we should claim ownership of the driver part of that
404 * array. 387 * array, preserving the bitrate index and flags.
405 */ 388 */
406 skbdesc = get_skb_frame_desc(entry->skb); 389 tx_info = IEEE80211_SKB_CB(skb);
390 rate_idx = tx_info->control.rates[0].idx;
391 rate_flags = tx_info->control.rates[0].flags;
392 skbdesc = get_skb_frame_desc(skb);
407 memset(skbdesc, 0, sizeof(*skbdesc)); 393 memset(skbdesc, 0, sizeof(*skbdesc));
408 skbdesc->entry = entry; 394 skbdesc->entry = entry;
395 skbdesc->tx_rate_idx = rate_idx;
396 skbdesc->tx_rate_flags = rate_flags;
409 397
410 /* 398 /*
411 * When hardware encryption is supported, and this frame 399 * When hardware encryption is supported, and this frame
@@ -414,19 +402,21 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
414 */ 402 */
415 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && 403 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
416 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { 404 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
417 rt2x00crypto_tx_remove_iv(skb, iv_len); 405 if (test_bit(CONFIG_CRYPTO_COPY_IV, &queue->rt2x00dev->flags))
406 rt2x00crypto_tx_copy_iv(skb, iv_len);
407 else
408 rt2x00crypto_tx_remove_iv(skb, iv_len);
418 } 409 }
419 410
420 /* 411 /*
421 * It could be possible that the queue was corrupted and this 412 * It could be possible that the queue was corrupted and this
422 * call failed. Just drop the frame, we cannot rollback and pass 413 * call failed. Since we always return NETDEV_TX_OK to mac80211,
423 * the frame to mac80211 because the skb->cb has now been tainted. 414 * this frame will simply be dropped.
424 */ 415 */
425 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) { 416 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
426 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 417 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
427 dev_kfree_skb_any(entry->skb);
428 entry->skb = NULL; 418 entry->skb = NULL;
429 return 0; 419 return -EIO;
430 } 420 }
431 421
432 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags)) 422 if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
@@ -556,7 +546,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
556 queue->length++; 546 queue->length++;
557 } else if (index == Q_INDEX_DONE) { 547 } else if (index == Q_INDEX_DONE) {
558 queue->length--; 548 queue->length--;
559 queue->count ++; 549 queue->count++;
560 } 550 }
561 551
562 spin_unlock_irqrestore(&queue->lock, irqflags); 552 spin_unlock_irqrestore(&queue->lock, irqflags);
@@ -575,40 +565,18 @@ static void rt2x00queue_reset(struct data_queue *queue)
575 spin_unlock_irqrestore(&queue->lock, irqflags); 565 spin_unlock_irqrestore(&queue->lock, irqflags);
576} 566}
577 567
578void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev) 568void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
579{
580 struct data_queue *queue = rt2x00dev->rx;
581 unsigned int i;
582
583 rt2x00queue_reset(queue);
584
585 if (!rt2x00dev->ops->lib->init_rxentry)
586 return;
587
588 for (i = 0; i < queue->limit; i++) {
589 queue->entries[i].flags = 0;
590
591 rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
592 &queue->entries[i]);
593 }
594}
595
596void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
597{ 569{
598 struct data_queue *queue; 570 struct data_queue *queue;
599 unsigned int i; 571 unsigned int i;
600 572
601 txall_queue_for_each(rt2x00dev, queue) { 573 queue_for_each(rt2x00dev, queue) {
602 rt2x00queue_reset(queue); 574 rt2x00queue_reset(queue);
603 575
604 if (!rt2x00dev->ops->lib->init_txentry)
605 continue;
606
607 for (i = 0; i < queue->limit; i++) { 576 for (i = 0; i < queue->limit; i++) {
608 queue->entries[i].flags = 0; 577 queue->entries[i].flags = 0;
609 578
610 rt2x00dev->ops->lib->init_txentry(rt2x00dev, 579 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
611 &queue->entries[i]);
612 } 580 }
613 } 581 }
614} 582}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 9dbf04f0f04c..282937153408 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -104,22 +104,25 @@ enum skb_frame_desc_flags {
104 * 104 *
105 * @flags: Frame flags, see &enum skb_frame_desc_flags. 105 * @flags: Frame flags, see &enum skb_frame_desc_flags.
106 * @desc_len: Length of the frame descriptor. 106 * @desc_len: Length of the frame descriptor.
107 * @tx_rate_idx: the index of the TX rate, used for TX status reporting
108 * @tx_rate_flags: the TX rate flags, used for TX status reporting
107 * @desc: Pointer to descriptor part of the frame. 109 * @desc: Pointer to descriptor part of the frame.
108 * Note that this pointer could point to something outside 110 * Note that this pointer could point to something outside
109 * of the scope of the skb->data pointer. 111 * of the scope of the skb->data pointer.
110 * @iv: IV data used during encryption/decryption. 112 * @iv: IV/EIV data used during encryption/decryption.
111 * @eiv: EIV data used during encryption/decryption.
112 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer. 113 * @skb_dma: (PCI-only) the DMA address associated with the sk buffer.
113 * @entry: The entry to which this sk buffer belongs. 114 * @entry: The entry to which this sk buffer belongs.
114 */ 115 */
115struct skb_frame_desc { 116struct skb_frame_desc {
116 unsigned int flags; 117 u8 flags;
118
119 u8 desc_len;
120 u8 tx_rate_idx;
121 u8 tx_rate_flags;
117 122
118 unsigned int desc_len;
119 void *desc; 123 void *desc;
120 124
121 __le32 iv; 125 __le32 iv[2];
122 __le32 eiv;
123 126
124 dma_addr_t skb_dma; 127 dma_addr_t skb_dma;
125 128
@@ -143,11 +146,15 @@ static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
143 * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value. 146 * @RXDONE_SIGNAL_PLCP: Signal field contains the plcp value.
144 * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value. 147 * @RXDONE_SIGNAL_BITRATE: Signal field contains the bitrate value.
145 * @RXDONE_MY_BSS: Does this frame originate from device's BSS. 148 * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
149 * @RXDONE_CRYPTO_IV: Driver provided IV/EIV data.
150 * @RXDONE_CRYPTO_ICV: Driver provided ICV data.
146 */ 151 */
147enum rxdone_entry_desc_flags { 152enum rxdone_entry_desc_flags {
148 RXDONE_SIGNAL_PLCP = 1 << 0, 153 RXDONE_SIGNAL_PLCP = 1 << 0,
149 RXDONE_SIGNAL_BITRATE = 1 << 1, 154 RXDONE_SIGNAL_BITRATE = 1 << 1,
150 RXDONE_MY_BSS = 1 << 2, 155 RXDONE_MY_BSS = 1 << 2,
156 RXDONE_CRYPTO_IV = 1 << 3,
157 RXDONE_CRYPTO_ICV = 1 << 4,
151}; 158};
152 159
153/** 160/**
@@ -163,8 +170,7 @@ enum rxdone_entry_desc_flags {
163 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags). 170 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
164 * @cipher: Cipher type used during decryption. 171 * @cipher: Cipher type used during decryption.
165 * @cipher_status: Decryption status. 172 * @cipher_status: Decryption status.
166 * @iv: IV data used during decryption. 173 * @iv: IV/EIV data used during decryption.
167 * @eiv: EIV data used during decryption.
168 * @icv: ICV data used during decryption. 174 * @icv: ICV data used during decryption.
169 */ 175 */
170struct rxdone_entry_desc { 176struct rxdone_entry_desc {
@@ -177,8 +183,7 @@ struct rxdone_entry_desc {
177 u8 cipher; 183 u8 cipher;
178 u8 cipher_status; 184 u8 cipher_status;
179 185
180 __le32 iv; 186 __le32 iv[2];
181 __le32 eiv;
182 __le32 icv; 187 __le32 icv;
183}; 188};
184 189
@@ -375,6 +380,8 @@ enum queue_index {
375 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue). 380 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
376 * @data_size: Maximum data size for the frames in this queue. 381 * @data_size: Maximum data size for the frames in this queue.
377 * @desc_size: Hardware descriptor size for the data in this queue. 382 * @desc_size: Hardware descriptor size for the data in this queue.
383 * @usb_endpoint: Device endpoint used for communication (USB only)
384 * @usb_maxpacket: Max packet size for given endpoint (USB only)
378 */ 385 */
379struct data_queue { 386struct data_queue {
380 struct rt2x00_dev *rt2x00dev; 387 struct rt2x00_dev *rt2x00dev;
@@ -396,6 +403,9 @@ struct data_queue {
396 403
397 unsigned short data_size; 404 unsigned short data_size;
398 unsigned short desc_size; 405 unsigned short desc_size;
406
407 unsigned short usb_endpoint;
408 unsigned short usb_maxpacket;
399}; 409};
400 410
401/** 411/**
@@ -439,6 +449,19 @@ struct data_queue_desc {
439 &(__dev)->tx[(__dev)->ops->tx_queues] 449 &(__dev)->tx[(__dev)->ops->tx_queues]
440 450
441/** 451/**
452 * queue_next - Return pointer to next queue in list (HELPER MACRO).
453 * @__queue: Current queue for which we need the next queue
454 *
455 * Using the current queue address we take the address directly
456 * after the queue to take the next queue. Note that this macro
457 * should be used carefully since it does not protect against
458 * moving past the end of the list. (See macros &queue_end and
459 * &tx_queue_end for determining the end of the queue).
460 */
461#define queue_next(__queue) \
462 &(__queue)[1]
463
464/**
442 * queue_loop - Loop through the queues within a specific range (HELPER MACRO). 465 * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
443 * @__entry: Pointer where the current queue entry will be stored in. 466 * @__entry: Pointer where the current queue entry will be stored in.
444 * @__start: Start queue pointer. 467 * @__start: Start queue pointer.
@@ -448,8 +471,8 @@ struct data_queue_desc {
448 */ 471 */
449#define queue_loop(__entry, __start, __end) \ 472#define queue_loop(__entry, __start, __end) \
450 for ((__entry) = (__start); \ 473 for ((__entry) = (__start); \
451 prefetch(&(__entry)[1]), (__entry) != (__end); \ 474 prefetch(queue_next(__entry)), (__entry) != (__end);\
452 (__entry) = &(__entry)[1]) 475 (__entry) = queue_next(__entry))
453 476
454/** 477/**
455 * queue_for_each - Loop through all queues 478 * queue_for_each - Loop through all queues
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index b73a7e0aeed4..83df312ac56f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -79,7 +79,7 @@ int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
79{ 79{
80 int status; 80 int status;
81 81
82 BUG_ON(!mutex_is_locked(&rt2x00dev->usb_cache_mutex)); 82 BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex));
83 83
84 /* 84 /*
85 * Check for Cache availability. 85 * Check for Cache availability.
@@ -110,13 +110,13 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
110{ 110{
111 int status; 111 int status;
112 112
113 mutex_lock(&rt2x00dev->usb_cache_mutex); 113 mutex_lock(&rt2x00dev->csr_mutex);
114 114
115 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request, 115 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
116 requesttype, offset, buffer, 116 requesttype, offset, buffer,
117 buffer_length, timeout); 117 buffer_length, timeout);
118 118
119 mutex_unlock(&rt2x00dev->usb_cache_mutex); 119 mutex_unlock(&rt2x00dev->csr_mutex);
120 120
121 return status; 121 return status;
122} 122}
@@ -132,7 +132,7 @@ int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
132 unsigned char *tb; 132 unsigned char *tb;
133 u16 off, len, bsize; 133 u16 off, len, bsize;
134 134
135 mutex_lock(&rt2x00dev->usb_cache_mutex); 135 mutex_lock(&rt2x00dev->csr_mutex);
136 136
137 tb = (char *)buffer; 137 tb = (char *)buffer;
138 off = offset; 138 off = offset;
@@ -148,12 +148,34 @@ int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
148 off += bsize; 148 off += bsize;
149 } 149 }
150 150
151 mutex_unlock(&rt2x00dev->usb_cache_mutex); 151 mutex_unlock(&rt2x00dev->csr_mutex);
152 152
153 return status; 153 return status;
154} 154}
155EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff); 155EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff);
156 156
157int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
158 const unsigned int offset,
159 struct rt2x00_field32 field,
160 u32 *reg)
161{
162 unsigned int i;
163
164 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
165 rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
166 if (!rt2x00_get_field32(*reg, field))
167 return 1;
168 udelay(REGISTER_BUSY_DELAY);
169 }
170
171 ERROR(rt2x00dev, "Indirect register access failed: "
172 "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
173 *reg = ~0;
174
175 return 0;
176}
177EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
178
157/* 179/*
158 * TX data handlers. 180 * TX data handlers.
159 */ 181 */
@@ -212,10 +234,10 @@ int rt2x00usb_write_tx_data(struct queue_entry *entry)
212 * length of the data to usb_fill_bulk_urb. Pass the skb 234 * length of the data to usb_fill_bulk_urb. Pass the skb
213 * to the driver to determine what the length should be. 235 * to the driver to determine what the length should be.
214 */ 236 */
215 length = rt2x00dev->ops->lib->get_tx_data_len(rt2x00dev, entry->skb); 237 length = rt2x00dev->ops->lib->get_tx_data_len(entry);
216 238
217 usb_fill_bulk_urb(entry_priv->urb, usb_dev, 239 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
218 usb_sndbulkpipe(usb_dev, 1), 240 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
219 entry->skb->data, length, 241 entry->skb->data, length,
220 rt2x00usb_interrupt_txdone, entry); 242 rt2x00usb_interrupt_txdone, entry);
221 243
@@ -351,28 +373,96 @@ EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
351/* 373/*
352 * Device initialization handlers. 374 * Device initialization handlers.
353 */ 375 */
354void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev, 376void rt2x00usb_clear_entry(struct queue_entry *entry)
355 struct queue_entry *entry)
356{ 377{
357 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 378 struct usb_device *usb_dev =
379 to_usb_device_intf(entry->queue->rt2x00dev->dev);
358 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 380 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
381 int pipe;
359 382
360 usb_fill_bulk_urb(entry_priv->urb, usb_dev, 383 if (entry->queue->qid == QID_RX) {
361 usb_rcvbulkpipe(usb_dev, 1), 384 pipe = usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint);
362 entry->skb->data, entry->skb->len, 385 usb_fill_bulk_urb(entry_priv->urb, usb_dev, pipe,
363 rt2x00usb_interrupt_rxdone, entry); 386 entry->skb->data, entry->skb->len,
387 rt2x00usb_interrupt_rxdone, entry);
364 388
365 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 389 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
366 usb_submit_urb(entry_priv->urb, GFP_ATOMIC); 390 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
391 } else {
392 entry->flags = 0;
393 }
367} 394}
368EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry); 395EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
369 396
370void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev, 397static void rt2x00usb_assign_endpoint(struct data_queue *queue,
371 struct queue_entry *entry) 398 struct usb_endpoint_descriptor *ep_desc)
372{ 399{
373 entry->flags = 0; 400 struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev);
401 int pipe;
402
403 queue->usb_endpoint = usb_endpoint_num(ep_desc);
404
405 if (queue->qid == QID_RX) {
406 pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
407 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
408 } else {
409 pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
410 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
411 }
412
413 if (!queue->usb_maxpacket)
414 queue->usb_maxpacket = 1;
415}
416
417static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
418{
419 struct usb_interface *intf = to_usb_interface(rt2x00dev->dev);
420 struct usb_host_interface *intf_desc = intf->cur_altsetting;
421 struct usb_endpoint_descriptor *ep_desc;
422 struct data_queue *queue = rt2x00dev->tx;
423 struct usb_endpoint_descriptor *tx_ep_desc = NULL;
424 unsigned int i;
425
426 /*
427 * Walk through all available endpoints to search for "bulk in"
428 * and "bulk out" endpoints. When we find such endpoints collect
429 * the information we need from the descriptor and assign it
430 * to the queue.
431 */
432 for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
433 ep_desc = &intf_desc->endpoint[i].desc;
434
435 if (usb_endpoint_is_bulk_in(ep_desc)) {
436 rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
437 } else if (usb_endpoint_is_bulk_out(ep_desc)) {
438 rt2x00usb_assign_endpoint(queue, ep_desc);
439
440 if (queue != queue_end(rt2x00dev))
441 queue = queue_next(queue);
442 tx_ep_desc = ep_desc;
443 }
444 }
445
446 /*
447 * At least 1 endpoint for RX and 1 endpoint for TX must be available.
448 */
449 if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) {
450 ERROR(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n");
451 return -EPIPE;
452 }
453
454 /*
455 * It might be possible not all queues have a dedicated endpoint.
456 * Loop through all TX queues and copy the endpoint information
457 * which we have gathered from already assigned endpoints.
458 */
459 txall_queue_for_each(rt2x00dev, queue) {
460 if (!queue->usb_endpoint)
461 rt2x00usb_assign_endpoint(queue, tx_ep_desc);
462 }
463
464 return 0;
374} 465}
375EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);
376 466
377static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev, 467static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
378 struct data_queue *queue) 468 struct data_queue *queue)
@@ -445,6 +535,13 @@ int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
445 int status; 535 int status;
446 536
447 /* 537 /*
538 * Find endpoints for each queue
539 */
540 status = rt2x00usb_find_endpoints(rt2x00dev);
541 if (status)
542 goto exit;
543
544 /*
448 * Allocate DMA 545 * Allocate DMA
449 */ 546 */
450 queue_for_each(rt2x00dev, queue) { 547 queue_for_each(rt2x00dev, queue) {
@@ -534,12 +631,6 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
534 rt2x00dev->dev = &usb_intf->dev; 631 rt2x00dev->dev = &usb_intf->dev;
535 rt2x00dev->ops = ops; 632 rt2x00dev->ops = ops;
536 rt2x00dev->hw = hw; 633 rt2x00dev->hw = hw;
537 mutex_init(&rt2x00dev->usb_cache_mutex);
538
539 rt2x00dev->usb_maxpacket =
540 usb_maxpacket(usb_dev, usb_sndbulkpipe(usb_dev, 1), 1);
541 if (!rt2x00dev->usb_maxpacket)
542 rt2x00dev->usb_maxpacket = 1;
543 634
544 retval = rt2x00usb_alloc_reg(rt2x00dev); 635 retval = rt2x00usb_alloc_reg(rt2x00dev);
545 if (retval) 636 if (retval)
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 3b4a67417f95..2bd4ac855f52 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -231,6 +231,142 @@ static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev,
231 REGISTER_TIMEOUT16(length)); 231 REGISTER_TIMEOUT16(length));
232} 232}
233 233
234/**
235 * rt2x00usb_regbusy_read - Read 32bit register word
236 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
237 * @offset: Register offset
238 * @value: Pointer to where register contents should be stored
239 *
240 * This function is a simple wrapper for 32bit register access
241 * through rt2x00usb_vendor_request_buff().
242 */
243static inline void rt2x00usb_register_read(struct rt2x00_dev *rt2x00dev,
244 const unsigned int offset,
245 u32 *value)
246{
247 __le32 reg;
248 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
249 USB_VENDOR_REQUEST_IN, offset,
250 &reg, sizeof(reg), REGISTER_TIMEOUT);
251 *value = le32_to_cpu(reg);
252}
253
254/**
255 * rt2x00usb_register_read_lock - Read 32bit register word
256 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
257 * @offset: Register offset
258 * @value: Pointer to where register contents should be stored
259 *
260 * This function is a simple wrapper for 32bit register access
261 * through rt2x00usb_vendor_req_buff_lock().
262 */
263static inline void rt2x00usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
264 const unsigned int offset,
265 u32 *value)
266{
267 __le32 reg;
268 rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ,
269 USB_VENDOR_REQUEST_IN, offset,
270 &reg, sizeof(reg), REGISTER_TIMEOUT);
271 *value = le32_to_cpu(reg);
272}
273
274/**
275 * rt2x00usb_register_multiread - Read 32bit register words
276 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
277 * @offset: Register offset
278 * @value: Pointer to where register contents should be stored
279 * @length: Length of the data
280 *
281 * This function is a simple wrapper for 32bit register access
282 * through rt2x00usb_vendor_request_buff().
283 */
284static inline void rt2x00usb_register_multiread(struct rt2x00_dev *rt2x00dev,
285 const unsigned int offset,
286 void *value, const u32 length)
287{
288 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
289 USB_VENDOR_REQUEST_IN, offset,
290 value, length,
291 REGISTER_TIMEOUT32(length));
292}
293
294/**
295 * rt2x00usb_register_write - Write 32bit register word
296 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
297 * @offset: Register offset
298 * @value: Data which should be written
299 *
300 * This function is a simple wrapper for 32bit register access
301 * through rt2x00usb_vendor_request_buff().
302 */
303static inline void rt2x00usb_register_write(struct rt2x00_dev *rt2x00dev,
304 const unsigned int offset,
305 u32 value)
306{
307 __le32 reg = cpu_to_le32(value);
308 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
309 USB_VENDOR_REQUEST_OUT, offset,
310 &reg, sizeof(reg), REGISTER_TIMEOUT);
311}
312
313/**
314 * rt2x00usb_register_write_lock - Write 32bit register word
315 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
316 * @offset: Register offset
317 * @value: Data which should be written
318 *
319 * This function is a simple wrapper for 32bit register access
320 * through rt2x00usb_vendor_req_buff_lock().
321 */
322static inline void rt2x00usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
323 const unsigned int offset,
324 u32 value)
325{
326 __le32 reg = cpu_to_le32(value);
327 rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_WRITE,
328 USB_VENDOR_REQUEST_OUT, offset,
329 &reg, sizeof(reg), REGISTER_TIMEOUT);
330}
331
332/**
333 * rt2x00usb_register_multiwrite - Write 32bit register words
334 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
335 * @offset: Register offset
336 * @value: Data which should be written
337 * @length: Length of the data
338 *
339 * This function is a simple wrapper for 32bit register access
340 * through rt2x00usb_vendor_request_buff().
341 */
342static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
343 const unsigned int offset,
344 void *value, const u32 length)
345{
346 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
347 USB_VENDOR_REQUEST_OUT, offset,
348 value, length,
349 REGISTER_TIMEOUT32(length));
350}
351
352/**
353 * rt2x00usb_regbusy_read - Read from register with busy check
354 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
355 * @offset: Register offset
356 * @field: Field to check if register is busy
357 * @reg: Pointer to where register contents should be stored
358 *
359 * This function will read the given register, and checks if the
360 * register is busy. If it is, it will sleep for a couple of
361 * microseconds before reading the register again. If the register
362 * is not read after a certain timeout, this function will return
363 * FALSE.
364 */
365int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
366 const unsigned int offset,
367 struct rt2x00_field32 field,
368 u32 *reg);
369
234/* 370/*
235 * Radio handlers 371 * Radio handlers
236 */ 372 */
@@ -286,10 +422,7 @@ void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
286/* 422/*
287 * Device initialization handlers. 423 * Device initialization handlers.
288 */ 424 */
289void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev, 425void rt2x00usb_clear_entry(struct queue_entry *entry);
290 struct queue_entry *entry);
291void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev,
292 struct queue_entry *entry);
293int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev); 426int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev);
294void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev); 427void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev);
295 428
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index a461620b489f..987e89009f74 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -55,45 +55,36 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
55 * the access attempt is considered to have failed, 55 * the access attempt is considered to have failed,
56 * and we will print an error. 56 * and we will print an error.
57 */ 57 */
58static u32 rt61pci_bbp_check(struct rt2x00_dev *rt2x00dev) 58#define WAIT_FOR_BBP(__dev, __reg) \
59{ 59 rt2x00pci_regbusy_read((__dev), PHY_CSR3, PHY_CSR3_BUSY, (__reg))
60 u32 reg; 60#define WAIT_FOR_RF(__dev, __reg) \
61 unsigned int i; 61 rt2x00pci_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg))
62 62#define WAIT_FOR_MCU(__dev, __reg) \
63 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 63 rt2x00pci_regbusy_read((__dev), H2M_MAILBOX_CSR, \
64 rt2x00pci_register_read(rt2x00dev, PHY_CSR3, &reg); 64 H2M_MAILBOX_CSR_OWNER, (__reg))
65 if (!rt2x00_get_field32(reg, PHY_CSR3_BUSY))
66 break;
67 udelay(REGISTER_BUSY_DELAY);
68 }
69
70 return reg;
71}
72 65
73static void rt61pci_bbp_write(struct rt2x00_dev *rt2x00dev, 66static void rt61pci_bbp_write(struct rt2x00_dev *rt2x00dev,
74 const unsigned int word, const u8 value) 67 const unsigned int word, const u8 value)
75{ 68{
76 u32 reg; 69 u32 reg;
77 70
78 /* 71 mutex_lock(&rt2x00dev->csr_mutex);
79 * Wait until the BBP becomes ready.
80 */
81 reg = rt61pci_bbp_check(rt2x00dev);
82 if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) {
83 ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n");
84 return;
85 }
86 72
87 /* 73 /*
88 * Write the data into the BBP. 74 * Wait until the BBP becomes available, afterwards we
75 * can safely write the new data into the register.
89 */ 76 */
90 reg = 0; 77 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
91 rt2x00_set_field32(&reg, PHY_CSR3_VALUE, value); 78 reg = 0;
92 rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word); 79 rt2x00_set_field32(&reg, PHY_CSR3_VALUE, value);
93 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1); 80 rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word);
94 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 0); 81 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1);
82 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 0);
83
84 rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg);
85 }
95 86
96 rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg); 87 mutex_unlock(&rt2x00dev->csr_mutex);
97} 88}
98 89
99static void rt61pci_bbp_read(struct rt2x00_dev *rt2x00dev, 90static void rt61pci_bbp_read(struct rt2x00_dev *rt2x00dev,
@@ -101,66 +92,58 @@ static void rt61pci_bbp_read(struct rt2x00_dev *rt2x00dev,
101{ 92{
102 u32 reg; 93 u32 reg;
103 94
104 /* 95 mutex_lock(&rt2x00dev->csr_mutex);
105 * Wait until the BBP becomes ready.
106 */
107 reg = rt61pci_bbp_check(rt2x00dev);
108 if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) {
109 ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n");
110 return;
111 }
112 96
113 /* 97 /*
114 * Write the request into the BBP. 98 * Wait until the BBP becomes available, afterwards we
99 * can safely write the read request into the register.
100 * After the data has been written, we wait until hardware
101 * returns the correct value, if at any time the register
102 * doesn't become available in time, reg will be 0xffffffff
103 * which means we return 0xff to the caller.
115 */ 104 */
116 reg = 0; 105 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
117 rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word); 106 reg = 0;
118 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1); 107 rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word);
119 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 1); 108 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1);
109 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 1);
120 110
121 rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg); 111 rt2x00pci_register_write(rt2x00dev, PHY_CSR3, reg);
122 112
123 /* 113 WAIT_FOR_BBP(rt2x00dev, &reg);
124 * Wait until the BBP becomes ready.
125 */
126 reg = rt61pci_bbp_check(rt2x00dev);
127 if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) {
128 ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n");
129 *value = 0xff;
130 return;
131 } 114 }
132 115
133 *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); 116 *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE);
117
118 mutex_unlock(&rt2x00dev->csr_mutex);
134} 119}
135 120
136static void rt61pci_rf_write(struct rt2x00_dev *rt2x00dev, 121static void rt61pci_rf_write(struct rt2x00_dev *rt2x00dev,
137 const unsigned int word, const u32 value) 122 const unsigned int word, const u32 value)
138{ 123{
139 u32 reg; 124 u32 reg;
140 unsigned int i;
141 125
142 if (!word) 126 if (!word)
143 return; 127 return;
144 128
145 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 129 mutex_lock(&rt2x00dev->csr_mutex);
146 rt2x00pci_register_read(rt2x00dev, PHY_CSR4, &reg);
147 if (!rt2x00_get_field32(reg, PHY_CSR4_BUSY))
148 goto rf_write;
149 udelay(REGISTER_BUSY_DELAY);
150 }
151 130
152 ERROR(rt2x00dev, "PHY_CSR4 register busy. Write failed.\n"); 131 /*
153 return; 132 * Wait until the RF becomes available, afterwards we
133 * can safely write the new data into the register.
134 */
135 if (WAIT_FOR_RF(rt2x00dev, &reg)) {
136 reg = 0;
137 rt2x00_set_field32(&reg, PHY_CSR4_VALUE, value);
138 rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS, 21);
139 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
140 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
154 141
155rf_write: 142 rt2x00pci_register_write(rt2x00dev, PHY_CSR4, reg);
156 reg = 0; 143 rt2x00_rf_write(rt2x00dev, word, value);
157 rt2x00_set_field32(&reg, PHY_CSR4_VALUE, value); 144 }
158 rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS, 21);
159 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
160 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
161 145
162 rt2x00pci_register_write(rt2x00dev, PHY_CSR4, reg); 146 mutex_unlock(&rt2x00dev->csr_mutex);
163 rt2x00_rf_write(rt2x00dev, word, value);
164} 147}
165 148
166#ifdef CONFIG_RT2X00_LIB_LEDS 149#ifdef CONFIG_RT2X00_LIB_LEDS
@@ -175,25 +158,27 @@ static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev,
175{ 158{
176 u32 reg; 159 u32 reg;
177 160
178 rt2x00pci_register_read(rt2x00dev, H2M_MAILBOX_CSR, &reg); 161 mutex_lock(&rt2x00dev->csr_mutex);
179 162
180 if (rt2x00_get_field32(reg, H2M_MAILBOX_CSR_OWNER)) { 163 /*
181 ERROR(rt2x00dev, "mcu request error. " 164 * Wait until the MCU becomes available, afterwards we
182 "Request 0x%02x failed for token 0x%02x.\n", 165 * can safely write the new data into the register.
183 command, token); 166 */
184 return; 167 if (WAIT_FOR_MCU(rt2x00dev, &reg)) {
168 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1);
169 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
170 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
171 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
172 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, reg);
173
174 rt2x00pci_register_read(rt2x00dev, HOST_CMD_CSR, &reg);
175 rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
176 rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1);
177 rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg);
185 } 178 }
186 179
187 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1); 180 mutex_unlock(&rt2x00dev->csr_mutex);
188 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
189 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
190 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
191 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CSR, reg);
192 181
193 rt2x00pci_register_read(rt2x00dev, HOST_CMD_CSR, &reg);
194 rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
195 rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1);
196 rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg);
197} 182}
198#endif /* CONFIG_RT2X00_LIB_LEDS */ 183#endif /* CONFIG_RT2X00_LIB_LEDS */
199 184
@@ -228,43 +213,34 @@ static void rt61pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
228} 213}
229 214
230#ifdef CONFIG_RT2X00_LIB_DEBUGFS 215#ifdef CONFIG_RT2X00_LIB_DEBUGFS
231#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) )
232
233static void rt61pci_read_csr(struct rt2x00_dev *rt2x00dev,
234 const unsigned int word, u32 *data)
235{
236 rt2x00pci_register_read(rt2x00dev, CSR_OFFSET(word), data);
237}
238
239static void rt61pci_write_csr(struct rt2x00_dev *rt2x00dev,
240 const unsigned int word, u32 data)
241{
242 rt2x00pci_register_write(rt2x00dev, CSR_OFFSET(word), data);
243}
244
245static const struct rt2x00debug rt61pci_rt2x00debug = { 216static const struct rt2x00debug rt61pci_rt2x00debug = {
246 .owner = THIS_MODULE, 217 .owner = THIS_MODULE,
247 .csr = { 218 .csr = {
248 .read = rt61pci_read_csr, 219 .read = rt2x00pci_register_read,
249 .write = rt61pci_write_csr, 220 .write = rt2x00pci_register_write,
221 .flags = RT2X00DEBUGFS_OFFSET,
222 .word_base = CSR_REG_BASE,
250 .word_size = sizeof(u32), 223 .word_size = sizeof(u32),
251 .word_count = CSR_REG_SIZE / sizeof(u32), 224 .word_count = CSR_REG_SIZE / sizeof(u32),
252 }, 225 },
253 .eeprom = { 226 .eeprom = {
254 .read = rt2x00_eeprom_read, 227 .read = rt2x00_eeprom_read,
255 .write = rt2x00_eeprom_write, 228 .write = rt2x00_eeprom_write,
229 .word_base = EEPROM_BASE,
256 .word_size = sizeof(u16), 230 .word_size = sizeof(u16),
257 .word_count = EEPROM_SIZE / sizeof(u16), 231 .word_count = EEPROM_SIZE / sizeof(u16),
258 }, 232 },
259 .bbp = { 233 .bbp = {
260 .read = rt61pci_bbp_read, 234 .read = rt61pci_bbp_read,
261 .write = rt61pci_bbp_write, 235 .write = rt61pci_bbp_write,
236 .word_base = BBP_BASE,
262 .word_size = sizeof(u8), 237 .word_size = sizeof(u8),
263 .word_count = BBP_SIZE / sizeof(u8), 238 .word_count = BBP_SIZE / sizeof(u8),
264 }, 239 },
265 .rf = { 240 .rf = {
266 .read = rt2x00_rf_read, 241 .read = rt2x00_rf_read,
267 .write = rt61pci_rf_write, 242 .write = rt61pci_rf_write,
243 .word_base = RF_BASE,
268 .word_size = sizeof(u32), 244 .word_size = sizeof(u32),
269 .word_count = RF_SIZE / sizeof(u32), 245 .word_count = RF_SIZE / sizeof(u32),
270 }, 246 },
@@ -643,95 +619,18 @@ static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev,
643 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, 619 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE,
644 !!erp->short_preamble); 620 !!erp->short_preamble);
645 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); 621 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
646}
647
648
649static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
650 struct rt2x00lib_conf *libconf)
651{
652 u16 eeprom;
653 short lna_gain = 0;
654
655 if (libconf->band == IEEE80211_BAND_2GHZ) {
656 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
657 lna_gain += 14;
658
659 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
660 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
661 } else {
662 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
663 lna_gain += 14;
664
665 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
666 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
667 }
668
669 rt2x00dev->lna_gain = lna_gain;
670}
671
672static void rt61pci_config_phymode(struct rt2x00_dev *rt2x00dev,
673 const int basic_rate_mask)
674{
675 rt2x00pci_register_write(rt2x00dev, TXRX_CSR5, basic_rate_mask);
676}
677
678static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev,
679 struct rf_channel *rf, const int txpower)
680{
681 u8 r3;
682 u8 r94;
683 u8 smart;
684 622
685 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); 623 rt2x00pci_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates);
686 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
687
688 smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
689 rt2x00_rf(&rt2x00dev->chip, RF2527));
690
691 rt61pci_bbp_read(rt2x00dev, 3, &r3);
692 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
693 rt61pci_bbp_write(rt2x00dev, 3, r3);
694
695 r94 = 6;
696 if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94))
697 r94 += txpower - MAX_TXPOWER;
698 else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94))
699 r94 += txpower;
700 rt61pci_bbp_write(rt2x00dev, 94, r94);
701 624
702 rt61pci_rf_write(rt2x00dev, 1, rf->rf1); 625 rt2x00pci_register_read(rt2x00dev, MAC_CSR9, &reg);
703 rt61pci_rf_write(rt2x00dev, 2, rf->rf2); 626 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time);
704 rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); 627 rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg);
705 rt61pci_rf_write(rt2x00dev, 4, rf->rf4);
706
707 udelay(200);
708
709 rt61pci_rf_write(rt2x00dev, 1, rf->rf1);
710 rt61pci_rf_write(rt2x00dev, 2, rf->rf2);
711 rt61pci_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
712 rt61pci_rf_write(rt2x00dev, 4, rf->rf4);
713
714 udelay(200);
715
716 rt61pci_rf_write(rt2x00dev, 1, rf->rf1);
717 rt61pci_rf_write(rt2x00dev, 2, rf->rf2);
718 rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
719 rt61pci_rf_write(rt2x00dev, 4, rf->rf4);
720
721 msleep(1);
722}
723
724static void rt61pci_config_txpower(struct rt2x00_dev *rt2x00dev,
725 const int txpower)
726{
727 struct rf_channel rf;
728
729 rt2x00_rf_read(rt2x00dev, 1, &rf.rf1);
730 rt2x00_rf_read(rt2x00dev, 2, &rf.rf2);
731 rt2x00_rf_read(rt2x00dev, 3, &rf.rf3);
732 rt2x00_rf_read(rt2x00dev, 4, &rf.rf4);
733 628
734 rt61pci_config_channel(rt2x00dev, &rf, txpower); 629 rt2x00pci_register_read(rt2x00dev, MAC_CSR8, &reg);
630 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs);
631 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3);
632 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs);
633 rt2x00pci_register_write(rt2x00dev, MAC_CSR8, reg);
735} 634}
736 635
737static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev, 636static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
@@ -906,8 +805,8 @@ static const struct antenna_sel antenna_sel_bg[] = {
906 { 98, { 0x48, 0x48 } }, 805 { 98, { 0x48, 0x48 } },
907}; 806};
908 807
909static void rt61pci_config_antenna(struct rt2x00_dev *rt2x00dev, 808static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
910 struct antenna_setup *ant) 809 struct antenna_setup *ant)
911{ 810{
912 const struct antenna_sel *sel; 811 const struct antenna_sel *sel;
913 unsigned int lna; 812 unsigned int lna;
@@ -954,20 +853,105 @@ static void rt61pci_config_antenna(struct rt2x00_dev *rt2x00dev,
954 } 853 }
955} 854}
956 855
957static void rt61pci_config_duration(struct rt2x00_dev *rt2x00dev, 856static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
857 struct rt2x00lib_conf *libconf)
858{
859 u16 eeprom;
860 short lna_gain = 0;
861
862 if (libconf->conf->channel->band == IEEE80211_BAND_2GHZ) {
863 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
864 lna_gain += 14;
865
866 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
867 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
868 } else {
869 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
870 lna_gain += 14;
871
872 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
873 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
874 }
875
876 rt2x00dev->lna_gain = lna_gain;
877}
878
879static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev,
880 struct rf_channel *rf, const int txpower)
881{
882 u8 r3;
883 u8 r94;
884 u8 smart;
885
886 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
887 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
888
889 smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
890 rt2x00_rf(&rt2x00dev->chip, RF2527));
891
892 rt61pci_bbp_read(rt2x00dev, 3, &r3);
893 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
894 rt61pci_bbp_write(rt2x00dev, 3, r3);
895
896 r94 = 6;
897 if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94))
898 r94 += txpower - MAX_TXPOWER;
899 else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94))
900 r94 += txpower;
901 rt61pci_bbp_write(rt2x00dev, 94, r94);
902
903 rt61pci_rf_write(rt2x00dev, 1, rf->rf1);
904 rt61pci_rf_write(rt2x00dev, 2, rf->rf2);
905 rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
906 rt61pci_rf_write(rt2x00dev, 4, rf->rf4);
907
908 udelay(200);
909
910 rt61pci_rf_write(rt2x00dev, 1, rf->rf1);
911 rt61pci_rf_write(rt2x00dev, 2, rf->rf2);
912 rt61pci_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
913 rt61pci_rf_write(rt2x00dev, 4, rf->rf4);
914
915 udelay(200);
916
917 rt61pci_rf_write(rt2x00dev, 1, rf->rf1);
918 rt61pci_rf_write(rt2x00dev, 2, rf->rf2);
919 rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
920 rt61pci_rf_write(rt2x00dev, 4, rf->rf4);
921
922 msleep(1);
923}
924
925static void rt61pci_config_txpower(struct rt2x00_dev *rt2x00dev,
926 const int txpower)
927{
928 struct rf_channel rf;
929
930 rt2x00_rf_read(rt2x00dev, 1, &rf.rf1);
931 rt2x00_rf_read(rt2x00dev, 2, &rf.rf2);
932 rt2x00_rf_read(rt2x00dev, 3, &rf.rf3);
933 rt2x00_rf_read(rt2x00dev, 4, &rf.rf4);
934
935 rt61pci_config_channel(rt2x00dev, &rf, txpower);
936}
937
938static void rt61pci_config_retry_limit(struct rt2x00_dev *rt2x00dev,
958 struct rt2x00lib_conf *libconf) 939 struct rt2x00lib_conf *libconf)
959{ 940{
960 u32 reg; 941 u32 reg;
961 942
962 rt2x00pci_register_read(rt2x00dev, MAC_CSR9, &reg); 943 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg);
963 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, libconf->slot_time); 944 rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT,
964 rt2x00pci_register_write(rt2x00dev, MAC_CSR9, reg); 945 libconf->conf->long_frame_max_tx_count);
946 rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT,
947 libconf->conf->short_frame_max_tx_count);
948 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
949}
965 950
966 rt2x00pci_register_read(rt2x00dev, MAC_CSR8, &reg); 951static void rt61pci_config_duration(struct rt2x00_dev *rt2x00dev,
967 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, libconf->sifs); 952 struct rt2x00lib_conf *libconf)
968 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); 953{
969 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, libconf->eifs); 954 u32 reg;
970 rt2x00pci_register_write(rt2x00dev, MAC_CSR8, reg);
971 955
972 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg); 956 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
973 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); 957 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
@@ -990,16 +974,15 @@ static void rt61pci_config(struct rt2x00_dev *rt2x00dev,
990 /* Always recalculate LNA gain before changing configuration */ 974 /* Always recalculate LNA gain before changing configuration */
991 rt61pci_config_lna_gain(rt2x00dev, libconf); 975 rt61pci_config_lna_gain(rt2x00dev, libconf);
992 976
993 if (flags & CONFIG_UPDATE_PHYMODE) 977 if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
994 rt61pci_config_phymode(rt2x00dev, libconf->basic_rates);
995 if (flags & CONFIG_UPDATE_CHANNEL)
996 rt61pci_config_channel(rt2x00dev, &libconf->rf, 978 rt61pci_config_channel(rt2x00dev, &libconf->rf,
997 libconf->conf->power_level); 979 libconf->conf->power_level);
998 if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) 980 if ((flags & IEEE80211_CONF_CHANGE_POWER) &&
981 !(flags & IEEE80211_CONF_CHANGE_CHANNEL))
999 rt61pci_config_txpower(rt2x00dev, libconf->conf->power_level); 982 rt61pci_config_txpower(rt2x00dev, libconf->conf->power_level);
1000 if (flags & CONFIG_UPDATE_ANTENNA) 983 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
1001 rt61pci_config_antenna(rt2x00dev, &libconf->ant); 984 rt61pci_config_retry_limit(rt2x00dev, libconf);
1002 if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) 985 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
1003 rt61pci_config_duration(rt2x00dev, libconf); 986 rt61pci_config_duration(rt2x00dev, libconf);
1004} 987}
1005 988
@@ -1263,33 +1246,44 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, const void *data,
1263/* 1246/*
1264 * Initialization functions. 1247 * Initialization functions.
1265 */ 1248 */
1266static void rt61pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 1249static bool rt61pci_get_entry_state(struct queue_entry *entry)
1267 struct queue_entry *entry)
1268{ 1250{
1269 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1251 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1270 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1271 u32 word; 1252 u32 word;
1272 1253
1273 rt2x00_desc_read(entry_priv->desc, 5, &word); 1254 if (entry->queue->qid == QID_RX) {
1274 rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS, 1255 rt2x00_desc_read(entry_priv->desc, 0, &word);
1275 skbdesc->skb_dma); 1256
1276 rt2x00_desc_write(entry_priv->desc, 5, word); 1257 return rt2x00_get_field32(word, RXD_W0_OWNER_NIC);
1258 } else {
1259 rt2x00_desc_read(entry_priv->desc, 0, &word);
1277 1260
1278 rt2x00_desc_read(entry_priv->desc, 0, &word); 1261 return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
1279 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 1262 rt2x00_get_field32(word, TXD_W0_VALID));
1280 rt2x00_desc_write(entry_priv->desc, 0, word); 1263 }
1281} 1264}
1282 1265
1283static void rt61pci_init_txentry(struct rt2x00_dev *rt2x00dev, 1266static void rt61pci_clear_entry(struct queue_entry *entry)
1284 struct queue_entry *entry)
1285{ 1267{
1286 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1268 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1269 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1287 u32 word; 1270 u32 word;
1288 1271
1289 rt2x00_desc_read(entry_priv->desc, 0, &word); 1272 if (entry->queue->qid == QID_RX) {
1290 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 1273 rt2x00_desc_read(entry_priv->desc, 5, &word);
1291 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 1274 rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS,
1292 rt2x00_desc_write(entry_priv->desc, 0, word); 1275 skbdesc->skb_dma);
1276 rt2x00_desc_write(entry_priv->desc, 5, word);
1277
1278 rt2x00_desc_read(entry_priv->desc, 0, &word);
1279 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
1280 rt2x00_desc_write(entry_priv->desc, 0, word);
1281 } else {
1282 rt2x00_desc_read(entry_priv->desc, 0, &word);
1283 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
1284 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
1285 rt2x00_desc_write(entry_priv->desc, 0, word);
1286 }
1293} 1287}
1294 1288
1295static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev) 1289static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev)
@@ -1784,8 +1778,8 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1784 rt2x00_desc_write(txd, 2, word); 1778 rt2x00_desc_write(txd, 2, word);
1785 1779
1786 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { 1780 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
1787 _rt2x00_desc_write(txd, 3, skbdesc->iv); 1781 _rt2x00_desc_write(txd, 3, skbdesc->iv[0]);
1788 _rt2x00_desc_write(txd, 4, skbdesc->eiv); 1782 _rt2x00_desc_write(txd, 4, skbdesc->iv[1]);
1789 } 1783 }
1790 1784
1791 rt2x00_desc_read(txd, 5, &word); 1785 rt2x00_desc_read(txd, 5, &word);
@@ -1934,7 +1928,7 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1934} 1928}
1935 1929
1936static void rt61pci_fill_rxdone(struct queue_entry *entry, 1930static void rt61pci_fill_rxdone(struct queue_entry *entry,
1937 struct rxdone_entry_desc *rxdesc) 1931 struct rxdone_entry_desc *rxdesc)
1938{ 1932{
1939 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1933 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1940 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1934 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
@@ -1955,9 +1949,12 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
1955 } 1949 }
1956 1950
1957 if (rxdesc->cipher != CIPHER_NONE) { 1951 if (rxdesc->cipher != CIPHER_NONE) {
1958 _rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv); 1952 _rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv[0]);
1959 _rt2x00_desc_read(entry_priv->desc, 3, &rxdesc->eiv); 1953 _rt2x00_desc_read(entry_priv->desc, 3, &rxdesc->iv[1]);
1954 rxdesc->dev_flags |= RXDONE_CRYPTO_IV;
1955
1960 _rt2x00_desc_read(entry_priv->desc, 4, &rxdesc->icv); 1956 _rt2x00_desc_read(entry_priv->desc, 4, &rxdesc->icv);
1957 rxdesc->dev_flags |= RXDONE_CRYPTO_ICV;
1961 1958
1962 /* 1959 /*
1963 * Hardware has stripped IV/EIV data from 802.11 frame during 1960 * Hardware has stripped IV/EIV data from 802.11 frame during
@@ -2175,10 +2172,8 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2175 */ 2172 */
2176 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 2173 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
2177 if (!is_valid_ether_addr(mac)) { 2174 if (!is_valid_ether_addr(mac)) {
2178 DECLARE_MAC_BUF(macbuf);
2179
2180 random_ether_addr(mac); 2175 random_ether_addr(mac);
2181 EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); 2176 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
2182 } 2177 }
2183 2178
2184 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); 2179 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
@@ -2630,20 +2625,6 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2630/* 2625/*
2631 * IEEE80211 stack callback functions. 2626 * IEEE80211 stack callback functions.
2632 */ 2627 */
2633static int rt61pci_set_retry_limit(struct ieee80211_hw *hw,
2634 u32 short_retry, u32 long_retry)
2635{
2636 struct rt2x00_dev *rt2x00dev = hw->priv;
2637 u32 reg;
2638
2639 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg);
2640 rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT, long_retry);
2641 rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT, short_retry);
2642 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
2643
2644 return 0;
2645}
2646
2647static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, 2628static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2648 const struct ieee80211_tx_queue_params *params) 2629 const struct ieee80211_tx_queue_params *params)
2649{ 2630{
@@ -2726,7 +2707,6 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2726 .configure_filter = rt2x00mac_configure_filter, 2707 .configure_filter = rt2x00mac_configure_filter,
2727 .set_key = rt2x00mac_set_key, 2708 .set_key = rt2x00mac_set_key,
2728 .get_stats = rt2x00mac_get_stats, 2709 .get_stats = rt2x00mac_get_stats,
2729 .set_retry_limit = rt61pci_set_retry_limit,
2730 .bss_info_changed = rt2x00mac_bss_info_changed, 2710 .bss_info_changed = rt2x00mac_bss_info_changed,
2731 .conf_tx = rt61pci_conf_tx, 2711 .conf_tx = rt61pci_conf_tx,
2732 .get_tx_stats = rt2x00mac_get_tx_stats, 2712 .get_tx_stats = rt2x00mac_get_tx_stats,
@@ -2741,8 +2721,8 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2741 .load_firmware = rt61pci_load_firmware, 2721 .load_firmware = rt61pci_load_firmware,
2742 .initialize = rt2x00pci_initialize, 2722 .initialize = rt2x00pci_initialize,
2743 .uninitialize = rt2x00pci_uninitialize, 2723 .uninitialize = rt2x00pci_uninitialize,
2744 .init_rxentry = rt61pci_init_rxentry, 2724 .get_entry_state = rt61pci_get_entry_state,
2745 .init_txentry = rt61pci_init_txentry, 2725 .clear_entry = rt61pci_clear_entry,
2746 .set_device_state = rt61pci_set_device_state, 2726 .set_device_state = rt61pci_set_device_state,
2747 .rfkill_poll = rt61pci_rfkill_poll, 2727 .rfkill_poll = rt61pci_rfkill_poll,
2748 .link_stats = rt61pci_link_stats, 2728 .link_stats = rt61pci_link_stats,
@@ -2758,6 +2738,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2758 .config_filter = rt61pci_config_filter, 2738 .config_filter = rt61pci_config_filter,
2759 .config_intf = rt61pci_config_intf, 2739 .config_intf = rt61pci_config_intf,
2760 .config_erp = rt61pci_config_erp, 2740 .config_erp = rt61pci_config_erp,
2741 .config_ant = rt61pci_config_ant,
2761 .config = rt61pci_config, 2742 .config = rt61pci_config,
2762}; 2743};
2763 2744
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 8ec1451308cc..65fe3332364a 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -48,7 +48,9 @@
48#define CSR_REG_SIZE 0x04b0 48#define CSR_REG_SIZE 0x04b0
49#define EEPROM_BASE 0x0000 49#define EEPROM_BASE 0x0000
50#define EEPROM_SIZE 0x0100 50#define EEPROM_SIZE 0x0100
51#define BBP_BASE 0x0000
51#define BBP_SIZE 0x0080 52#define BBP_SIZE 0x0080
53#define RF_BASE 0x0000
52#define RF_SIZE 0x0014 54#define RF_SIZE 0x0014
53 55
54/* 56/*
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 934f8e03c5aa..d638a8a59370 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -46,7 +46,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
46/* 46/*
47 * Register access. 47 * Register access.
48 * All access to the CSR registers will go through the methods 48 * All access to the CSR registers will go through the methods
49 * rt73usb_register_read and rt73usb_register_write. 49 * rt2x00usb_register_read and rt2x00usb_register_write.
50 * BBP and RF register require indirect register access, 50 * BBP and RF register require indirect register access,
51 * and use the CSR registers BBPCSR and RFCSR to achieve this. 51 * and use the CSR registers BBPCSR and RFCSR to achieve this.
52 * These indirect registers work with busy bits, 52 * These indirect registers work with busy bits,
@@ -55,113 +55,35 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
55 * between each attampt. When the busy bit is still set at that time, 55 * between each attampt. When the busy bit is still set at that time,
56 * the access attempt is considered to have failed, 56 * the access attempt is considered to have failed,
57 * and we will print an error. 57 * and we will print an error.
58 * The _lock versions must be used if you already hold the usb_cache_mutex 58 * The _lock versions must be used if you already hold the csr_mutex
59 */ 59 */
60static inline void rt73usb_register_read(struct rt2x00_dev *rt2x00dev, 60#define WAIT_FOR_BBP(__dev, __reg) \
61 const unsigned int offset, u32 *value) 61 rt2x00usb_regbusy_read((__dev), PHY_CSR3, PHY_CSR3_BUSY, (__reg))
62{ 62#define WAIT_FOR_RF(__dev, __reg) \
63 __le32 reg; 63 rt2x00usb_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg))
64 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
65 USB_VENDOR_REQUEST_IN, offset,
66 &reg, sizeof(u32), REGISTER_TIMEOUT);
67 *value = le32_to_cpu(reg);
68}
69
70static inline void rt73usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
71 const unsigned int offset, u32 *value)
72{
73 __le32 reg;
74 rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ,
75 USB_VENDOR_REQUEST_IN, offset,
76 &reg, sizeof(u32), REGISTER_TIMEOUT);
77 *value = le32_to_cpu(reg);
78}
79
80static inline void rt73usb_register_multiread(struct rt2x00_dev *rt2x00dev,
81 const unsigned int offset,
82 void *value, const u32 length)
83{
84 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
85 USB_VENDOR_REQUEST_IN, offset,
86 value, length,
87 REGISTER_TIMEOUT32(length));
88}
89
90static inline void rt73usb_register_write(struct rt2x00_dev *rt2x00dev,
91 const unsigned int offset, u32 value)
92{
93 __le32 reg = cpu_to_le32(value);
94 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
95 USB_VENDOR_REQUEST_OUT, offset,
96 &reg, sizeof(u32), REGISTER_TIMEOUT);
97}
98
99static inline void rt73usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
100 const unsigned int offset, u32 value)
101{
102 __le32 reg = cpu_to_le32(value);
103 rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_WRITE,
104 USB_VENDOR_REQUEST_OUT, offset,
105 &reg, sizeof(u32), REGISTER_TIMEOUT);
106}
107
108static inline void rt73usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
109 const unsigned int offset,
110 void *value, const u32 length)
111{
112 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
113 USB_VENDOR_REQUEST_OUT, offset,
114 value, length,
115 REGISTER_TIMEOUT32(length));
116}
117
118static u32 rt73usb_bbp_check(struct rt2x00_dev *rt2x00dev)
119{
120 u32 reg;
121 unsigned int i;
122
123 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
124 rt73usb_register_read_lock(rt2x00dev, PHY_CSR3, &reg);
125 if (!rt2x00_get_field32(reg, PHY_CSR3_BUSY))
126 break;
127 udelay(REGISTER_BUSY_DELAY);
128 }
129
130 return reg;
131}
132 64
133static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev, 65static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev,
134 const unsigned int word, const u8 value) 66 const unsigned int word, const u8 value)
135{ 67{
136 u32 reg; 68 u32 reg;
137 69
138 mutex_lock(&rt2x00dev->usb_cache_mutex); 70 mutex_lock(&rt2x00dev->csr_mutex);
139 71
140 /* 72 /*
141 * Wait until the BBP becomes ready. 73 * Wait until the BBP becomes available, afterwards we
74 * can safely write the new data into the register.
142 */ 75 */
143 reg = rt73usb_bbp_check(rt2x00dev); 76 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
144 if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) 77 reg = 0;
145 goto exit_fail; 78 rt2x00_set_field32(&reg, PHY_CSR3_VALUE, value);
146 79 rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word);
147 /* 80 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1);
148 * Write the data into the BBP. 81 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 0);
149 */ 82
150 reg = 0; 83 rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR3, reg);
151 rt2x00_set_field32(&reg, PHY_CSR3_VALUE, value); 84 }
152 rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word);
153 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1);
154 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 0);
155
156 rt73usb_register_write_lock(rt2x00dev, PHY_CSR3, reg);
157 mutex_unlock(&rt2x00dev->usb_cache_mutex);
158
159 return;
160
161exit_fail:
162 mutex_unlock(&rt2x00dev->usb_cache_mutex);
163 85
164 ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n"); 86 mutex_unlock(&rt2x00dev->csr_mutex);
165} 87}
166 88
167static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev, 89static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
@@ -169,123 +91,95 @@ static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
169{ 91{
170 u32 reg; 92 u32 reg;
171 93
172 mutex_lock(&rt2x00dev->usb_cache_mutex); 94 mutex_lock(&rt2x00dev->csr_mutex);
173
174 /*
175 * Wait until the BBP becomes ready.
176 */
177 reg = rt73usb_bbp_check(rt2x00dev);
178 if (rt2x00_get_field32(reg, PHY_CSR3_BUSY))
179 goto exit_fail;
180 95
181 /* 96 /*
182 * Write the request into the BBP. 97 * Wait until the BBP becomes available, afterwards we
98 * can safely write the read request into the register.
99 * After the data has been written, we wait until hardware
100 * returns the correct value, if at any time the register
101 * doesn't become available in time, reg will be 0xffffffff
102 * which means we return 0xff to the caller.
183 */ 103 */
184 reg = 0; 104 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
185 rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word); 105 reg = 0;
186 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1); 106 rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word);
187 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 1); 107 rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1);
108 rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 1);
188 109
189 rt73usb_register_write_lock(rt2x00dev, PHY_CSR3, reg); 110 rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR3, reg);
190 111
191 /* 112 WAIT_FOR_BBP(rt2x00dev, &reg);
192 * Wait until the BBP becomes ready. 113 }
193 */
194 reg = rt73usb_bbp_check(rt2x00dev);
195 if (rt2x00_get_field32(reg, PHY_CSR3_BUSY))
196 goto exit_fail;
197 114
198 *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); 115 *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE);
199 mutex_unlock(&rt2x00dev->usb_cache_mutex);
200
201 return;
202
203exit_fail:
204 mutex_unlock(&rt2x00dev->usb_cache_mutex);
205 116
206 ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); 117 mutex_unlock(&rt2x00dev->csr_mutex);
207 *value = 0xff;
208} 118}
209 119
210static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev, 120static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
211 const unsigned int word, const u32 value) 121 const unsigned int word, const u32 value)
212{ 122{
213 u32 reg; 123 u32 reg;
214 unsigned int i;
215 124
216 if (!word) 125 if (!word)
217 return; 126 return;
218 127
219 mutex_lock(&rt2x00dev->usb_cache_mutex); 128 mutex_lock(&rt2x00dev->csr_mutex);
220
221 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
222 rt73usb_register_read_lock(rt2x00dev, PHY_CSR4, &reg);
223 if (!rt2x00_get_field32(reg, PHY_CSR4_BUSY))
224 goto rf_write;
225 udelay(REGISTER_BUSY_DELAY);
226 }
227
228 mutex_unlock(&rt2x00dev->usb_cache_mutex);
229 ERROR(rt2x00dev, "PHY_CSR4 register busy. Write failed.\n");
230 return;
231
232rf_write:
233 reg = 0;
234 rt2x00_set_field32(&reg, PHY_CSR4_VALUE, value);
235 129
236 /* 130 /*
237 * RF5225 and RF2527 contain 21 bits per RF register value, 131 * Wait until the RF becomes available, afterwards we
238 * all others contain 20 bits. 132 * can safely write the new data into the register.
239 */ 133 */
240 rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS, 134 if (WAIT_FOR_RF(rt2x00dev, &reg)) {
241 20 + (rt2x00_rf(&rt2x00dev->chip, RF5225) || 135 reg = 0;
242 rt2x00_rf(&rt2x00dev->chip, RF2527))); 136 rt2x00_set_field32(&reg, PHY_CSR4_VALUE, value);
243 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0); 137 /*
244 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1); 138 * RF5225 and RF2527 contain 21 bits per RF register value,
245 139 * all others contain 20 bits.
246 rt73usb_register_write_lock(rt2x00dev, PHY_CSR4, reg); 140 */
247 rt2x00_rf_write(rt2x00dev, word, value); 141 rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS,
248 mutex_unlock(&rt2x00dev->usb_cache_mutex); 142 20 + (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
249} 143 rt2x00_rf(&rt2x00dev->chip, RF2527)));
250 144 rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
251#ifdef CONFIG_RT2X00_LIB_DEBUGFS 145 rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
252#define CSR_OFFSET(__word) ( CSR_REG_BASE + ((__word) * sizeof(u32)) ) 146
253 147 rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR4, reg);
254static void rt73usb_read_csr(struct rt2x00_dev *rt2x00dev, 148 rt2x00_rf_write(rt2x00dev, word, value);
255 const unsigned int word, u32 *data) 149 }
256{
257 rt73usb_register_read(rt2x00dev, CSR_OFFSET(word), data);
258}
259 150
260static void rt73usb_write_csr(struct rt2x00_dev *rt2x00dev, 151 mutex_unlock(&rt2x00dev->csr_mutex);
261 const unsigned int word, u32 data)
262{
263 rt73usb_register_write(rt2x00dev, CSR_OFFSET(word), data);
264} 152}
265 153
154#ifdef CONFIG_RT2X00_LIB_DEBUGFS
266static const struct rt2x00debug rt73usb_rt2x00debug = { 155static const struct rt2x00debug rt73usb_rt2x00debug = {
267 .owner = THIS_MODULE, 156 .owner = THIS_MODULE,
268 .csr = { 157 .csr = {
269 .read = rt73usb_read_csr, 158 .read = rt2x00usb_register_read,
270 .write = rt73usb_write_csr, 159 .write = rt2x00usb_register_write,
160 .flags = RT2X00DEBUGFS_OFFSET,
161 .word_base = CSR_REG_BASE,
271 .word_size = sizeof(u32), 162 .word_size = sizeof(u32),
272 .word_count = CSR_REG_SIZE / sizeof(u32), 163 .word_count = CSR_REG_SIZE / sizeof(u32),
273 }, 164 },
274 .eeprom = { 165 .eeprom = {
275 .read = rt2x00_eeprom_read, 166 .read = rt2x00_eeprom_read,
276 .write = rt2x00_eeprom_write, 167 .write = rt2x00_eeprom_write,
168 .word_base = EEPROM_BASE,
277 .word_size = sizeof(u16), 169 .word_size = sizeof(u16),
278 .word_count = EEPROM_SIZE / sizeof(u16), 170 .word_count = EEPROM_SIZE / sizeof(u16),
279 }, 171 },
280 .bbp = { 172 .bbp = {
281 .read = rt73usb_bbp_read, 173 .read = rt73usb_bbp_read,
282 .write = rt73usb_bbp_write, 174 .write = rt73usb_bbp_write,
175 .word_base = BBP_BASE,
283 .word_size = sizeof(u8), 176 .word_size = sizeof(u8),
284 .word_count = BBP_SIZE / sizeof(u8), 177 .word_count = BBP_SIZE / sizeof(u8),
285 }, 178 },
286 .rf = { 179 .rf = {
287 .read = rt2x00_rf_read, 180 .read = rt2x00_rf_read,
288 .write = rt73usb_rf_write, 181 .write = rt73usb_rf_write,
182 .word_base = RF_BASE,
289 .word_size = sizeof(u32), 183 .word_size = sizeof(u32),
290 .word_count = RF_SIZE / sizeof(u32), 184 .word_count = RF_SIZE / sizeof(u32),
291 }, 185 },
@@ -341,10 +235,10 @@ static int rt73usb_blink_set(struct led_classdev *led_cdev,
341 container_of(led_cdev, struct rt2x00_led, led_dev); 235 container_of(led_cdev, struct rt2x00_led, led_dev);
342 u32 reg; 236 u32 reg;
343 237
344 rt73usb_register_read(led->rt2x00dev, MAC_CSR14, &reg); 238 rt2x00usb_register_read(led->rt2x00dev, MAC_CSR14, &reg);
345 rt2x00_set_field32(&reg, MAC_CSR14_ON_PERIOD, *delay_on); 239 rt2x00_set_field32(&reg, MAC_CSR14_ON_PERIOD, *delay_on);
346 rt2x00_set_field32(&reg, MAC_CSR14_OFF_PERIOD, *delay_off); 240 rt2x00_set_field32(&reg, MAC_CSR14_OFF_PERIOD, *delay_off);
347 rt73usb_register_write(led->rt2x00dev, MAC_CSR14, reg); 241 rt2x00usb_register_write(led->rt2x00dev, MAC_CSR14, reg);
348 242
349 return 0; 243 return 0;
350} 244}
@@ -387,7 +281,7 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
387 */ 281 */
388 mask = (0xf << crypto->bssidx); 282 mask = (0xf << crypto->bssidx);
389 283
390 rt73usb_register_read(rt2x00dev, SEC_CSR0, &reg); 284 rt2x00usb_register_read(rt2x00dev, SEC_CSR0, &reg);
391 reg &= mask; 285 reg &= mask;
392 286
393 if (reg && reg == mask) 287 if (reg && reg == mask)
@@ -424,16 +318,16 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
424 field.bit_offset = (3 * key->hw_key_idx); 318 field.bit_offset = (3 * key->hw_key_idx);
425 field.bit_mask = 0x7 << field.bit_offset; 319 field.bit_mask = 0x7 << field.bit_offset;
426 320
427 rt73usb_register_read(rt2x00dev, SEC_CSR1, &reg); 321 rt2x00usb_register_read(rt2x00dev, SEC_CSR1, &reg);
428 rt2x00_set_field32(&reg, field, crypto->cipher); 322 rt2x00_set_field32(&reg, field, crypto->cipher);
429 rt73usb_register_write(rt2x00dev, SEC_CSR1, reg); 323 rt2x00usb_register_write(rt2x00dev, SEC_CSR1, reg);
430 } else { 324 } else {
431 field.bit_offset = (3 * (key->hw_key_idx - 8)); 325 field.bit_offset = (3 * (key->hw_key_idx - 8));
432 field.bit_mask = 0x7 << field.bit_offset; 326 field.bit_mask = 0x7 << field.bit_offset;
433 327
434 rt73usb_register_read(rt2x00dev, SEC_CSR5, &reg); 328 rt2x00usb_register_read(rt2x00dev, SEC_CSR5, &reg);
435 rt2x00_set_field32(&reg, field, crypto->cipher); 329 rt2x00_set_field32(&reg, field, crypto->cipher);
436 rt73usb_register_write(rt2x00dev, SEC_CSR5, reg); 330 rt2x00usb_register_write(rt2x00dev, SEC_CSR5, reg);
437 } 331 }
438 332
439 /* 333 /*
@@ -456,12 +350,12 @@ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
456 */ 350 */
457 mask = 1 << key->hw_key_idx; 351 mask = 1 << key->hw_key_idx;
458 352
459 rt73usb_register_read(rt2x00dev, SEC_CSR0, &reg); 353 rt2x00usb_register_read(rt2x00dev, SEC_CSR0, &reg);
460 if (crypto->cmd == SET_KEY) 354 if (crypto->cmd == SET_KEY)
461 reg |= mask; 355 reg |= mask;
462 else if (crypto->cmd == DISABLE_KEY) 356 else if (crypto->cmd == DISABLE_KEY)
463 reg &= ~mask; 357 reg &= ~mask;
464 rt73usb_register_write(rt2x00dev, SEC_CSR0, reg); 358 rt2x00usb_register_write(rt2x00dev, SEC_CSR0, reg);
465 359
466 return 0; 360 return 0;
467} 361}
@@ -486,10 +380,10 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
486 * When both registers are full, we drop the key, 380 * When both registers are full, we drop the key,
487 * otherwise we use the first invalid entry. 381 * otherwise we use the first invalid entry.
488 */ 382 */
489 rt73usb_register_read(rt2x00dev, SEC_CSR2, &reg); 383 rt2x00usb_register_read(rt2x00dev, SEC_CSR2, &reg);
490 if (reg && reg == ~0) { 384 if (reg && reg == ~0) {
491 key->hw_key_idx = 32; 385 key->hw_key_idx = 32;
492 rt73usb_register_read(rt2x00dev, SEC_CSR3, &reg); 386 rt2x00usb_register_read(rt2x00dev, SEC_CSR3, &reg);
493 if (reg && reg == ~0) 387 if (reg && reg == ~0)
494 return -ENOSPC; 388 return -ENOSPC;
495 } 389 }
@@ -517,14 +411,14 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
517 /* 411 /*
518 * Send the address and cipher type to the hardware register. 412 * Send the address and cipher type to the hardware register.
519 * This data fits within the CSR cache size, so we can use 413 * This data fits within the CSR cache size, so we can use
520 * rt73usb_register_multiwrite() directly. 414 * rt2x00usb_register_multiwrite() directly.
521 */ 415 */
522 memset(&addr_entry, 0, sizeof(addr_entry)); 416 memset(&addr_entry, 0, sizeof(addr_entry));
523 memcpy(&addr_entry, crypto->address, ETH_ALEN); 417 memcpy(&addr_entry, crypto->address, ETH_ALEN);
524 addr_entry.cipher = crypto->cipher; 418 addr_entry.cipher = crypto->cipher;
525 419
526 reg = PAIRWISE_TA_ENTRY(key->hw_key_idx); 420 reg = PAIRWISE_TA_ENTRY(key->hw_key_idx);
527 rt73usb_register_multiwrite(rt2x00dev, reg, 421 rt2x00usb_register_multiwrite(rt2x00dev, reg,
528 &addr_entry, sizeof(addr_entry)); 422 &addr_entry, sizeof(addr_entry));
529 423
530 /* 424 /*
@@ -532,9 +426,9 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
532 * without this received frames will not be decrypted 426 * without this received frames will not be decrypted
533 * by the hardware. 427 * by the hardware.
534 */ 428 */
535 rt73usb_register_read(rt2x00dev, SEC_CSR4, &reg); 429 rt2x00usb_register_read(rt2x00dev, SEC_CSR4, &reg);
536 reg |= (1 << crypto->bssidx); 430 reg |= (1 << crypto->bssidx);
537 rt73usb_register_write(rt2x00dev, SEC_CSR4, reg); 431 rt2x00usb_register_write(rt2x00dev, SEC_CSR4, reg);
538 432
539 /* 433 /*
540 * The driver does not support the IV/EIV generation 434 * The driver does not support the IV/EIV generation
@@ -557,21 +451,21 @@ static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
557 if (key->hw_key_idx < 32) { 451 if (key->hw_key_idx < 32) {
558 mask = 1 << key->hw_key_idx; 452 mask = 1 << key->hw_key_idx;
559 453
560 rt73usb_register_read(rt2x00dev, SEC_CSR2, &reg); 454 rt2x00usb_register_read(rt2x00dev, SEC_CSR2, &reg);
561 if (crypto->cmd == SET_KEY) 455 if (crypto->cmd == SET_KEY)
562 reg |= mask; 456 reg |= mask;
563 else if (crypto->cmd == DISABLE_KEY) 457 else if (crypto->cmd == DISABLE_KEY)
564 reg &= ~mask; 458 reg &= ~mask;
565 rt73usb_register_write(rt2x00dev, SEC_CSR2, reg); 459 rt2x00usb_register_write(rt2x00dev, SEC_CSR2, reg);
566 } else { 460 } else {
567 mask = 1 << (key->hw_key_idx - 32); 461 mask = 1 << (key->hw_key_idx - 32);
568 462
569 rt73usb_register_read(rt2x00dev, SEC_CSR3, &reg); 463 rt2x00usb_register_read(rt2x00dev, SEC_CSR3, &reg);
570 if (crypto->cmd == SET_KEY) 464 if (crypto->cmd == SET_KEY)
571 reg |= mask; 465 reg |= mask;
572 else if (crypto->cmd == DISABLE_KEY) 466 else if (crypto->cmd == DISABLE_KEY)
573 reg &= ~mask; 467 reg &= ~mask;
574 rt73usb_register_write(rt2x00dev, SEC_CSR3, reg); 468 rt2x00usb_register_write(rt2x00dev, SEC_CSR3, reg);
575 } 469 }
576 470
577 return 0; 471 return 0;
@@ -588,7 +482,7 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
588 * and broadcast frames will always be accepted since 482 * and broadcast frames will always be accepted since
589 * there is no filter for it at this time. 483 * there is no filter for it at this time.
590 */ 484 */
591 rt73usb_register_read(rt2x00dev, TXRX_CSR0, &reg); 485 rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
592 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CRC, 486 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CRC,
593 !(filter_flags & FIF_FCSFAIL)); 487 !(filter_flags & FIF_FCSFAIL));
594 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_PHYSICAL, 488 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_PHYSICAL,
@@ -606,7 +500,7 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
606 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_BROADCAST, 0); 500 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_BROADCAST, 0);
607 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_ACK_CTS, 501 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_ACK_CTS,
608 !(filter_flags & FIF_CONTROL)); 502 !(filter_flags & FIF_CONTROL));
609 rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); 503 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
610} 504}
611 505
612static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev, 506static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
@@ -625,16 +519,16 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
625 * bits which (when set to 0) will invalidate the entire beacon. 519 * bits which (when set to 0) will invalidate the entire beacon.
626 */ 520 */
627 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); 521 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
628 rt73usb_register_write(rt2x00dev, beacon_base, 0); 522 rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
629 523
630 /* 524 /*
631 * Enable synchronisation. 525 * Enable synchronisation.
632 */ 526 */
633 rt73usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 527 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
634 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1); 528 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
635 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync); 529 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
636 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1); 530 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
637 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); 531 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
638 } 532 }
639 533
640 if (flags & CONFIG_UPDATE_MAC) { 534 if (flags & CONFIG_UPDATE_MAC) {
@@ -642,7 +536,7 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
642 rt2x00_set_field32(&reg, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); 536 rt2x00_set_field32(&reg, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff);
643 conf->mac[1] = cpu_to_le32(reg); 537 conf->mac[1] = cpu_to_le32(reg);
644 538
645 rt73usb_register_multiwrite(rt2x00dev, MAC_CSR2, 539 rt2x00usb_register_multiwrite(rt2x00dev, MAC_CSR2,
646 conf->mac, sizeof(conf->mac)); 540 conf->mac, sizeof(conf->mac));
647 } 541 }
648 542
@@ -651,7 +545,7 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
651 rt2x00_set_field32(&reg, MAC_CSR5_BSS_ID_MASK, 3); 545 rt2x00_set_field32(&reg, MAC_CSR5_BSS_ID_MASK, 3);
652 conf->bssid[1] = cpu_to_le32(reg); 546 conf->bssid[1] = cpu_to_le32(reg);
653 547
654 rt73usb_register_multiwrite(rt2x00dev, MAC_CSR4, 548 rt2x00usb_register_multiwrite(rt2x00dev, MAC_CSR4,
655 conf->bssid, sizeof(conf->bssid)); 549 conf->bssid, sizeof(conf->bssid));
656 } 550 }
657} 551}
@@ -661,95 +555,26 @@ static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev,
661{ 555{
662 u32 reg; 556 u32 reg;
663 557
664 rt73usb_register_read(rt2x00dev, TXRX_CSR0, &reg); 558 rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
665 rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, erp->ack_timeout); 559 rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, erp->ack_timeout);
666 rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); 560 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
667 561
668 rt73usb_register_read(rt2x00dev, TXRX_CSR4, &reg); 562 rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
669 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, 563 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE,
670 !!erp->short_preamble); 564 !!erp->short_preamble);
671 rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg); 565 rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg);
672}
673
674static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
675 struct rt2x00lib_conf *libconf)
676{
677 u16 eeprom;
678 short lna_gain = 0;
679
680 if (libconf->band == IEEE80211_BAND_2GHZ) {
681 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
682 lna_gain += 14;
683
684 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
685 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
686 } else {
687 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
688 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
689 }
690
691 rt2x00dev->lna_gain = lna_gain;
692}
693
694static void rt73usb_config_phymode(struct rt2x00_dev *rt2x00dev,
695 const int basic_rate_mask)
696{
697 rt73usb_register_write(rt2x00dev, TXRX_CSR5, basic_rate_mask);
698}
699
700static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev,
701 struct rf_channel *rf, const int txpower)
702{
703 u8 r3;
704 u8 r94;
705 u8 smart;
706
707 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
708 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
709
710 smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
711 rt2x00_rf(&rt2x00dev->chip, RF2527));
712
713 rt73usb_bbp_read(rt2x00dev, 3, &r3);
714 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
715 rt73usb_bbp_write(rt2x00dev, 3, r3);
716
717 r94 = 6;
718 if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94))
719 r94 += txpower - MAX_TXPOWER;
720 else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94))
721 r94 += txpower;
722 rt73usb_bbp_write(rt2x00dev, 94, r94);
723
724 rt73usb_rf_write(rt2x00dev, 1, rf->rf1);
725 rt73usb_rf_write(rt2x00dev, 2, rf->rf2);
726 rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
727 rt73usb_rf_write(rt2x00dev, 4, rf->rf4);
728 566
729 rt73usb_rf_write(rt2x00dev, 1, rf->rf1); 567 rt2x00usb_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates);
730 rt73usb_rf_write(rt2x00dev, 2, rf->rf2);
731 rt73usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
732 rt73usb_rf_write(rt2x00dev, 4, rf->rf4);
733 568
734 rt73usb_rf_write(rt2x00dev, 1, rf->rf1); 569 rt2x00usb_register_read(rt2x00dev, MAC_CSR9, &reg);
735 rt73usb_rf_write(rt2x00dev, 2, rf->rf2); 570 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time);
736 rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); 571 rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg);
737 rt73usb_rf_write(rt2x00dev, 4, rf->rf4);
738 572
739 udelay(10); 573 rt2x00usb_register_read(rt2x00dev, MAC_CSR8, &reg);
740} 574 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs);
741 575 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3);
742static void rt73usb_config_txpower(struct rt2x00_dev *rt2x00dev, 576 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs);
743 const int txpower) 577 rt2x00usb_register_write(rt2x00dev, MAC_CSR8, reg);
744{
745 struct rf_channel rf;
746
747 rt2x00_rf_read(rt2x00dev, 1, &rf.rf1);
748 rt2x00_rf_read(rt2x00dev, 2, &rf.rf2);
749 rt2x00_rf_read(rt2x00dev, 3, &rf.rf3);
750 rt2x00_rf_read(rt2x00dev, 4, &rf.rf4);
751
752 rt73usb_config_channel(rt2x00dev, &rf, txpower);
753} 578}
754 579
755static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev, 580static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
@@ -869,8 +694,8 @@ static const struct antenna_sel antenna_sel_bg[] = {
869 { 98, { 0x48, 0x48 } }, 694 { 98, { 0x48, 0x48 } },
870}; 695};
871 696
872static void rt73usb_config_antenna(struct rt2x00_dev *rt2x00dev, 697static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
873 struct antenna_setup *ant) 698 struct antenna_setup *ant)
874{ 699{
875 const struct antenna_sel *sel; 700 const struct antenna_sel *sel;
876 unsigned int lna; 701 unsigned int lna;
@@ -895,14 +720,14 @@ static void rt73usb_config_antenna(struct rt2x00_dev *rt2x00dev,
895 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) 720 for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
896 rt73usb_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]); 721 rt73usb_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]);
897 722
898 rt73usb_register_read(rt2x00dev, PHY_CSR0, &reg); 723 rt2x00usb_register_read(rt2x00dev, PHY_CSR0, &reg);
899 724
900 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG, 725 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
901 (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)); 726 (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ));
902 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A, 727 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
903 (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)); 728 (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ));
904 729
905 rt73usb_register_write(rt2x00dev, PHY_CSR0, reg); 730 rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
906 731
907 if (rt2x00_rf(&rt2x00dev->chip, RF5226) || 732 if (rt2x00_rf(&rt2x00dev->chip, RF5226) ||
908 rt2x00_rf(&rt2x00dev->chip, RF5225)) 733 rt2x00_rf(&rt2x00dev->chip, RF5225))
@@ -912,33 +737,111 @@ static void rt73usb_config_antenna(struct rt2x00_dev *rt2x00dev,
912 rt73usb_config_antenna_2x(rt2x00dev, ant); 737 rt73usb_config_antenna_2x(rt2x00dev, ant);
913} 738}
914 739
915static void rt73usb_config_duration(struct rt2x00_dev *rt2x00dev, 740static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
916 struct rt2x00lib_conf *libconf) 741 struct rt2x00lib_conf *libconf)
917{ 742{
743 u16 eeprom;
744 short lna_gain = 0;
745
746 if (libconf->conf->channel->band == IEEE80211_BAND_2GHZ) {
747 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags))
748 lna_gain += 14;
749
750 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
751 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
752 } else {
753 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
754 lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1);
755 }
756
757 rt2x00dev->lna_gain = lna_gain;
758}
759
760static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev,
761 struct rf_channel *rf, const int txpower)
762{
763 u8 r3;
764 u8 r94;
765 u8 smart;
766
767 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
768 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
769
770 smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
771 rt2x00_rf(&rt2x00dev->chip, RF2527));
772
773 rt73usb_bbp_read(rt2x00dev, 3, &r3);
774 rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
775 rt73usb_bbp_write(rt2x00dev, 3, r3);
776
777 r94 = 6;
778 if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94))
779 r94 += txpower - MAX_TXPOWER;
780 else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94))
781 r94 += txpower;
782 rt73usb_bbp_write(rt2x00dev, 94, r94);
783
784 rt73usb_rf_write(rt2x00dev, 1, rf->rf1);
785 rt73usb_rf_write(rt2x00dev, 2, rf->rf2);
786 rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
787 rt73usb_rf_write(rt2x00dev, 4, rf->rf4);
788
789 rt73usb_rf_write(rt2x00dev, 1, rf->rf1);
790 rt73usb_rf_write(rt2x00dev, 2, rf->rf2);
791 rt73usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
792 rt73usb_rf_write(rt2x00dev, 4, rf->rf4);
793
794 rt73usb_rf_write(rt2x00dev, 1, rf->rf1);
795 rt73usb_rf_write(rt2x00dev, 2, rf->rf2);
796 rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
797 rt73usb_rf_write(rt2x00dev, 4, rf->rf4);
798
799 udelay(10);
800}
801
802static void rt73usb_config_txpower(struct rt2x00_dev *rt2x00dev,
803 const int txpower)
804{
805 struct rf_channel rf;
806
807 rt2x00_rf_read(rt2x00dev, 1, &rf.rf1);
808 rt2x00_rf_read(rt2x00dev, 2, &rf.rf2);
809 rt2x00_rf_read(rt2x00dev, 3, &rf.rf3);
810 rt2x00_rf_read(rt2x00dev, 4, &rf.rf4);
811
812 rt73usb_config_channel(rt2x00dev, &rf, txpower);
813}
814
815static void rt73usb_config_retry_limit(struct rt2x00_dev *rt2x00dev,
816 struct rt2x00lib_conf *libconf)
817{
918 u32 reg; 818 u32 reg;
919 819
920 rt73usb_register_read(rt2x00dev, MAC_CSR9, &reg); 820 rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
921 rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, libconf->slot_time); 821 rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT,
922 rt73usb_register_write(rt2x00dev, MAC_CSR9, reg); 822 libconf->conf->long_frame_max_tx_count);
823 rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT,
824 libconf->conf->short_frame_max_tx_count);
825 rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg);
826}
923 827
924 rt73usb_register_read(rt2x00dev, MAC_CSR8, &reg); 828static void rt73usb_config_duration(struct rt2x00_dev *rt2x00dev,
925 rt2x00_set_field32(&reg, MAC_CSR8_SIFS, libconf->sifs); 829 struct rt2x00lib_conf *libconf)
926 rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); 830{
927 rt2x00_set_field32(&reg, MAC_CSR8_EIFS, libconf->eifs); 831 u32 reg;
928 rt73usb_register_write(rt2x00dev, MAC_CSR8, reg);
929 832
930 rt73usb_register_read(rt2x00dev, TXRX_CSR0, &reg); 833 rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
931 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); 834 rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER);
932 rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); 835 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
933 836
934 rt73usb_register_read(rt2x00dev, TXRX_CSR4, &reg); 837 rt2x00usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
935 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1); 838 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1);
936 rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg); 839 rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg);
937 840
938 rt73usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 841 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
939 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 842 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL,
940 libconf->conf->beacon_int * 16); 843 libconf->conf->beacon_int * 16);
941 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); 844 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
942} 845}
943 846
944static void rt73usb_config(struct rt2x00_dev *rt2x00dev, 847static void rt73usb_config(struct rt2x00_dev *rt2x00dev,
@@ -948,16 +851,15 @@ static void rt73usb_config(struct rt2x00_dev *rt2x00dev,
948 /* Always recalculate LNA gain before changing configuration */ 851 /* Always recalculate LNA gain before changing configuration */
949 rt73usb_config_lna_gain(rt2x00dev, libconf); 852 rt73usb_config_lna_gain(rt2x00dev, libconf);
950 853
951 if (flags & CONFIG_UPDATE_PHYMODE) 854 if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
952 rt73usb_config_phymode(rt2x00dev, libconf->basic_rates);
953 if (flags & CONFIG_UPDATE_CHANNEL)
954 rt73usb_config_channel(rt2x00dev, &libconf->rf, 855 rt73usb_config_channel(rt2x00dev, &libconf->rf,
955 libconf->conf->power_level); 856 libconf->conf->power_level);
956 if ((flags & CONFIG_UPDATE_TXPOWER) && !(flags & CONFIG_UPDATE_CHANNEL)) 857 if ((flags & IEEE80211_CONF_CHANGE_POWER) &&
858 !(flags & IEEE80211_CONF_CHANGE_CHANNEL))
957 rt73usb_config_txpower(rt2x00dev, libconf->conf->power_level); 859 rt73usb_config_txpower(rt2x00dev, libconf->conf->power_level);
958 if (flags & CONFIG_UPDATE_ANTENNA) 860 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
959 rt73usb_config_antenna(rt2x00dev, &libconf->ant); 861 rt73usb_config_retry_limit(rt2x00dev, libconf);
960 if (flags & (CONFIG_UPDATE_SLOT_TIME | CONFIG_UPDATE_BEACON_INT)) 862 if (flags & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
961 rt73usb_config_duration(rt2x00dev, libconf); 863 rt73usb_config_duration(rt2x00dev, libconf);
962} 864}
963 865
@@ -972,13 +874,13 @@ static void rt73usb_link_stats(struct rt2x00_dev *rt2x00dev,
972 /* 874 /*
973 * Update FCS error count from register. 875 * Update FCS error count from register.
974 */ 876 */
975 rt73usb_register_read(rt2x00dev, STA_CSR0, &reg); 877 rt2x00usb_register_read(rt2x00dev, STA_CSR0, &reg);
976 qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR); 878 qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR);
977 879
978 /* 880 /*
979 * Update False CCA count from register. 881 * Update False CCA count from register.
980 */ 882 */
981 rt73usb_register_read(rt2x00dev, STA_CSR1, &reg); 883 rt2x00usb_register_read(rt2x00dev, STA_CSR1, &reg);
982 qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); 884 qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR);
983} 885}
984 886
@@ -1138,7 +1040,7 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, const void *data,
1138 * Wait for stable hardware. 1040 * Wait for stable hardware.
1139 */ 1041 */
1140 for (i = 0; i < 100; i++) { 1042 for (i = 0; i < 100; i++) {
1141 rt73usb_register_read(rt2x00dev, MAC_CSR0, &reg); 1043 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
1142 if (reg) 1044 if (reg)
1143 break; 1045 break;
1144 msleep(1); 1046 msleep(1);
@@ -1180,13 +1082,13 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
1180{ 1082{
1181 u32 reg; 1083 u32 reg;
1182 1084
1183 rt73usb_register_read(rt2x00dev, TXRX_CSR0, &reg); 1085 rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
1184 rt2x00_set_field32(&reg, TXRX_CSR0_AUTO_TX_SEQ, 1); 1086 rt2x00_set_field32(&reg, TXRX_CSR0_AUTO_TX_SEQ, 1);
1185 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0); 1087 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0);
1186 rt2x00_set_field32(&reg, TXRX_CSR0_TX_WITHOUT_WAITING, 0); 1088 rt2x00_set_field32(&reg, TXRX_CSR0_TX_WITHOUT_WAITING, 0);
1187 rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); 1089 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
1188 1090
1189 rt73usb_register_read(rt2x00dev, TXRX_CSR1, &reg); 1091 rt2x00usb_register_read(rt2x00dev, TXRX_CSR1, &reg);
1190 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */ 1092 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */
1191 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0_VALID, 1); 1093 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0_VALID, 1);
1192 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID1, 30); /* Rssi */ 1094 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID1, 30); /* Rssi */
@@ -1195,12 +1097,12 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
1195 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID2_VALID, 1); 1097 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID2_VALID, 1);
1196 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3, 30); /* Rssi */ 1098 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3, 30); /* Rssi */
1197 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3_VALID, 1); 1099 rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3_VALID, 1);
1198 rt73usb_register_write(rt2x00dev, TXRX_CSR1, reg); 1100 rt2x00usb_register_write(rt2x00dev, TXRX_CSR1, reg);
1199 1101
1200 /* 1102 /*
1201 * CCK TXD BBP registers 1103 * CCK TXD BBP registers
1202 */ 1104 */
1203 rt73usb_register_read(rt2x00dev, TXRX_CSR2, &reg); 1105 rt2x00usb_register_read(rt2x00dev, TXRX_CSR2, &reg);
1204 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0, 13); 1106 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0, 13);
1205 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0_VALID, 1); 1107 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0_VALID, 1);
1206 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID1, 12); 1108 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID1, 12);
@@ -1209,77 +1111,77 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
1209 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID2_VALID, 1); 1111 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID2_VALID, 1);
1210 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3, 10); 1112 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3, 10);
1211 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3_VALID, 1); 1113 rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3_VALID, 1);
1212 rt73usb_register_write(rt2x00dev, TXRX_CSR2, reg); 1114 rt2x00usb_register_write(rt2x00dev, TXRX_CSR2, reg);
1213 1115
1214 /* 1116 /*
1215 * OFDM TXD BBP registers 1117 * OFDM TXD BBP registers
1216 */ 1118 */
1217 rt73usb_register_read(rt2x00dev, TXRX_CSR3, &reg); 1119 rt2x00usb_register_read(rt2x00dev, TXRX_CSR3, &reg);
1218 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0, 7); 1120 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0, 7);
1219 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0_VALID, 1); 1121 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0_VALID, 1);
1220 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1, 6); 1122 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1, 6);
1221 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1_VALID, 1); 1123 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1_VALID, 1);
1222 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2, 5); 1124 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2, 5);
1223 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2_VALID, 1); 1125 rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2_VALID, 1);
1224 rt73usb_register_write(rt2x00dev, TXRX_CSR3, reg); 1126 rt2x00usb_register_write(rt2x00dev, TXRX_CSR3, reg);
1225 1127
1226 rt73usb_register_read(rt2x00dev, TXRX_CSR7, &reg); 1128 rt2x00usb_register_read(rt2x00dev, TXRX_CSR7, &reg);
1227 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_6MBS, 59); 1129 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_6MBS, 59);
1228 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_9MBS, 53); 1130 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_9MBS, 53);
1229 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_12MBS, 49); 1131 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_12MBS, 49);
1230 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_18MBS, 46); 1132 rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_18MBS, 46);
1231 rt73usb_register_write(rt2x00dev, TXRX_CSR7, reg); 1133 rt2x00usb_register_write(rt2x00dev, TXRX_CSR7, reg);
1232 1134
1233 rt73usb_register_read(rt2x00dev, TXRX_CSR8, &reg); 1135 rt2x00usb_register_read(rt2x00dev, TXRX_CSR8, &reg);
1234 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_24MBS, 44); 1136 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_24MBS, 44);
1235 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_36MBS, 42); 1137 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_36MBS, 42);
1236 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_48MBS, 42); 1138 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_48MBS, 42);
1237 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_54MBS, 42); 1139 rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_54MBS, 42);
1238 rt73usb_register_write(rt2x00dev, TXRX_CSR8, reg); 1140 rt2x00usb_register_write(rt2x00dev, TXRX_CSR8, reg);
1239 1141
1240 rt73usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 1142 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1241 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 0); 1143 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 0);
1242 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0); 1144 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
1243 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, 0); 1145 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, 0);
1244 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); 1146 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
1245 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1147 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1246 rt2x00_set_field32(&reg, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0); 1148 rt2x00_set_field32(&reg, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0);
1247 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1149 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1248 1150
1249 rt73usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); 1151 rt2x00usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f);
1250 1152
1251 rt73usb_register_read(rt2x00dev, MAC_CSR6, &reg); 1153 rt2x00usb_register_read(rt2x00dev, MAC_CSR6, &reg);
1252 rt2x00_set_field32(&reg, MAC_CSR6_MAX_FRAME_UNIT, 0xfff); 1154 rt2x00_set_field32(&reg, MAC_CSR6_MAX_FRAME_UNIT, 0xfff);
1253 rt73usb_register_write(rt2x00dev, MAC_CSR6, reg); 1155 rt2x00usb_register_write(rt2x00dev, MAC_CSR6, reg);
1254 1156
1255 rt73usb_register_write(rt2x00dev, MAC_CSR10, 0x00000718); 1157 rt2x00usb_register_write(rt2x00dev, MAC_CSR10, 0x00000718);
1256 1158
1257 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) 1159 if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE))
1258 return -EBUSY; 1160 return -EBUSY;
1259 1161
1260 rt73usb_register_write(rt2x00dev, MAC_CSR13, 0x00007f00); 1162 rt2x00usb_register_write(rt2x00dev, MAC_CSR13, 0x00007f00);
1261 1163
1262 /* 1164 /*
1263 * Invalidate all Shared Keys (SEC_CSR0), 1165 * Invalidate all Shared Keys (SEC_CSR0),
1264 * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5) 1166 * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5)
1265 */ 1167 */
1266 rt73usb_register_write(rt2x00dev, SEC_CSR0, 0x00000000); 1168 rt2x00usb_register_write(rt2x00dev, SEC_CSR0, 0x00000000);
1267 rt73usb_register_write(rt2x00dev, SEC_CSR1, 0x00000000); 1169 rt2x00usb_register_write(rt2x00dev, SEC_CSR1, 0x00000000);
1268 rt73usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000); 1170 rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000);
1269 1171
1270 reg = 0x000023b0; 1172 reg = 0x000023b0;
1271 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 1173 if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
1272 rt2x00_rf(&rt2x00dev->chip, RF2527)) 1174 rt2x00_rf(&rt2x00dev->chip, RF2527))
1273 rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1); 1175 rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1);
1274 rt73usb_register_write(rt2x00dev, PHY_CSR1, reg); 1176 rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg);
1275 1177
1276 rt73usb_register_write(rt2x00dev, PHY_CSR5, 0x00040a06); 1178 rt2x00usb_register_write(rt2x00dev, PHY_CSR5, 0x00040a06);
1277 rt73usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606); 1179 rt2x00usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606);
1278 rt73usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408); 1180 rt2x00usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408);
1279 1181
1280 rt73usb_register_read(rt2x00dev, MAC_CSR9, &reg); 1182 rt2x00usb_register_read(rt2x00dev, MAC_CSR9, &reg);
1281 rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0); 1183 rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0);
1282 rt73usb_register_write(rt2x00dev, MAC_CSR9, reg); 1184 rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg);
1283 1185
1284 /* 1186 /*
1285 * Clear all beacons 1187 * Clear all beacons
@@ -1287,36 +1189,36 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
1287 * the first byte since that byte contains the VALID and OWNER 1189 * the first byte since that byte contains the VALID and OWNER
1288 * bits which (when set to 0) will invalidate the entire beacon. 1190 * bits which (when set to 0) will invalidate the entire beacon.
1289 */ 1191 */
1290 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0); 1192 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
1291 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0); 1193 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
1292 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0); 1194 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
1293 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0); 1195 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
1294 1196
1295 /* 1197 /*
1296 * We must clear the error counters. 1198 * We must clear the error counters.
1297 * These registers are cleared on read, 1199 * These registers are cleared on read,
1298 * so we may pass a useless variable to store the value. 1200 * so we may pass a useless variable to store the value.
1299 */ 1201 */
1300 rt73usb_register_read(rt2x00dev, STA_CSR0, &reg); 1202 rt2x00usb_register_read(rt2x00dev, STA_CSR0, &reg);
1301 rt73usb_register_read(rt2x00dev, STA_CSR1, &reg); 1203 rt2x00usb_register_read(rt2x00dev, STA_CSR1, &reg);
1302 rt73usb_register_read(rt2x00dev, STA_CSR2, &reg); 1204 rt2x00usb_register_read(rt2x00dev, STA_CSR2, &reg);
1303 1205
1304 /* 1206 /*
1305 * Reset MAC and BBP registers. 1207 * Reset MAC and BBP registers.
1306 */ 1208 */
1307 rt73usb_register_read(rt2x00dev, MAC_CSR1, &reg); 1209 rt2x00usb_register_read(rt2x00dev, MAC_CSR1, &reg);
1308 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 1); 1210 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 1);
1309 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 1); 1211 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 1);
1310 rt73usb_register_write(rt2x00dev, MAC_CSR1, reg); 1212 rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg);
1311 1213
1312 rt73usb_register_read(rt2x00dev, MAC_CSR1, &reg); 1214 rt2x00usb_register_read(rt2x00dev, MAC_CSR1, &reg);
1313 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 0); 1215 rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 0);
1314 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 0); 1216 rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 0);
1315 rt73usb_register_write(rt2x00dev, MAC_CSR1, reg); 1217 rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg);
1316 1218
1317 rt73usb_register_read(rt2x00dev, MAC_CSR1, &reg); 1219 rt2x00usb_register_read(rt2x00dev, MAC_CSR1, &reg);
1318 rt2x00_set_field32(&reg, MAC_CSR1_HOST_READY, 1); 1220 rt2x00_set_field32(&reg, MAC_CSR1_HOST_READY, 1);
1319 rt73usb_register_write(rt2x00dev, MAC_CSR1, reg); 1221 rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg);
1320 1222
1321 return 0; 1223 return 0;
1322} 1224}
@@ -1394,11 +1296,11 @@ static void rt73usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
1394{ 1296{
1395 u32 reg; 1297 u32 reg;
1396 1298
1397 rt73usb_register_read(rt2x00dev, TXRX_CSR0, &reg); 1299 rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
1398 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1300 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX,
1399 (state == STATE_RADIO_RX_OFF) || 1301 (state == STATE_RADIO_RX_OFF) ||
1400 (state == STATE_RADIO_RX_OFF_LINK)); 1302 (state == STATE_RADIO_RX_OFF_LINK));
1401 rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); 1303 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
1402} 1304}
1403 1305
1404static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev) 1306static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -1415,12 +1317,12 @@ static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev)
1415 1317
1416static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev) 1318static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev)
1417{ 1319{
1418 rt73usb_register_write(rt2x00dev, MAC_CSR10, 0x00001818); 1320 rt2x00usb_register_write(rt2x00dev, MAC_CSR10, 0x00001818);
1419 1321
1420 /* 1322 /*
1421 * Disable synchronisation. 1323 * Disable synchronisation.
1422 */ 1324 */
1423 rt73usb_register_write(rt2x00dev, TXRX_CSR9, 0); 1325 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, 0);
1424 1326
1425 rt2x00usb_disable_radio(rt2x00dev); 1327 rt2x00usb_disable_radio(rt2x00dev);
1426} 1328}
@@ -1433,10 +1335,10 @@ static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
1433 1335
1434 put_to_sleep = (state != STATE_AWAKE); 1336 put_to_sleep = (state != STATE_AWAKE);
1435 1337
1436 rt73usb_register_read(rt2x00dev, MAC_CSR12, &reg); 1338 rt2x00usb_register_read(rt2x00dev, MAC_CSR12, &reg);
1437 rt2x00_set_field32(&reg, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep); 1339 rt2x00_set_field32(&reg, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep);
1438 rt2x00_set_field32(&reg, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep); 1340 rt2x00_set_field32(&reg, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep);
1439 rt73usb_register_write(rt2x00dev, MAC_CSR12, reg); 1341 rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg);
1440 1342
1441 /* 1343 /*
1442 * Device is not guaranteed to be in the requested state yet. 1344 * Device is not guaranteed to be in the requested state yet.
@@ -1444,7 +1346,7 @@ static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state)
1444 * device has entered the correct state. 1346 * device has entered the correct state.
1445 */ 1347 */
1446 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 1348 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1447 rt73usb_register_read(rt2x00dev, MAC_CSR12, &reg); 1349 rt2x00usb_register_read(rt2x00dev, MAC_CSR12, &reg);
1448 state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE); 1350 state = rt2x00_get_field32(reg, MAC_CSR12_BBP_CURRENT_STATE);
1449 if (state == !put_to_sleep) 1351 if (state == !put_to_sleep)
1450 return 0; 1352 return 0;
@@ -1526,8 +1428,8 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1526 rt2x00_desc_write(txd, 2, word); 1428 rt2x00_desc_write(txd, 2, word);
1527 1429
1528 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { 1430 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) {
1529 _rt2x00_desc_write(txd, 3, skbdesc->iv); 1431 _rt2x00_desc_write(txd, 3, skbdesc->iv[0]);
1530 _rt2x00_desc_write(txd, 4, skbdesc->eiv); 1432 _rt2x00_desc_write(txd, 4, skbdesc->iv[1]);
1531 } 1433 }
1532 1434
1533 rt2x00_desc_read(txd, 5, &word); 1435 rt2x00_desc_read(txd, 5, &word);
@@ -1584,11 +1486,11 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1584 * Disable beaconing while we are reloading the beacon data, 1486 * Disable beaconing while we are reloading the beacon data,
1585 * otherwise we might be sending out invalid data. 1487 * otherwise we might be sending out invalid data.
1586 */ 1488 */
1587 rt73usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 1489 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1588 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0); 1490 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
1589 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); 1491 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
1590 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 1492 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1591 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1493 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1592 1494
1593 /* 1495 /*
1594 * Write entire beacon with descriptor to register. 1496 * Write entire beacon with descriptor to register.
@@ -1606,8 +1508,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1606 entry->skb = NULL; 1508 entry->skb = NULL;
1607} 1509}
1608 1510
1609static int rt73usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev, 1511static int rt73usb_get_tx_data_len(struct queue_entry *entry)
1610 struct sk_buff *skb)
1611{ 1512{
1612 int length; 1513 int length;
1613 1514
@@ -1615,8 +1516,8 @@ static int rt73usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev,
1615 * The length _must_ be a multiple of 4, 1516 * The length _must_ be a multiple of 4,
1616 * but it must _not_ be a multiple of the USB packet size. 1517 * but it must _not_ be a multiple of the USB packet size.
1617 */ 1518 */
1618 length = roundup(skb->len, 4); 1519 length = roundup(entry->skb->len, 4);
1619 length += (4 * !(length % rt2x00dev->usb_maxpacket)); 1520 length += (4 * !(length % entry->queue->usb_maxpacket));
1620 1521
1621 return length; 1522 return length;
1622} 1523}
@@ -1635,14 +1536,14 @@ static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1635 * For Wi-Fi faily generated beacons between participating stations. 1536 * For Wi-Fi faily generated beacons between participating stations.
1636 * Set TBTT phase adaptive adjustment step to 8us (default 16us) 1537 * Set TBTT phase adaptive adjustment step to 8us (default 16us)
1637 */ 1538 */
1638 rt73usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); 1539 rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
1639 1540
1640 rt73usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 1541 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1641 if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) { 1542 if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) {
1642 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1); 1543 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1643 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1); 1544 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1644 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 1545 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1645 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1546 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1646 } 1547 }
1647} 1548}
1648 1549
@@ -1685,7 +1586,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1685} 1586}
1686 1587
1687static void rt73usb_fill_rxdone(struct queue_entry *entry, 1588static void rt73usb_fill_rxdone(struct queue_entry *entry,
1688 struct rxdone_entry_desc *rxdesc) 1589 struct rxdone_entry_desc *rxdesc)
1689{ 1590{
1690 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1591 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1691 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1592 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
@@ -1717,9 +1618,12 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1717 } 1618 }
1718 1619
1719 if (rxdesc->cipher != CIPHER_NONE) { 1620 if (rxdesc->cipher != CIPHER_NONE) {
1720 _rt2x00_desc_read(rxd, 2, &rxdesc->iv); 1621 _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
1721 _rt2x00_desc_read(rxd, 3, &rxdesc->eiv); 1622 _rt2x00_desc_read(rxd, 3, &rxdesc->iv[1]);
1623 rxdesc->dev_flags |= RXDONE_CRYPTO_IV;
1624
1722 _rt2x00_desc_read(rxd, 4, &rxdesc->icv); 1625 _rt2x00_desc_read(rxd, 4, &rxdesc->icv);
1626 rxdesc->dev_flags |= RXDONE_CRYPTO_ICV;
1723 1627
1724 /* 1628 /*
1725 * Hardware has stripped IV/EIV data from 802.11 frame during 1629 * Hardware has stripped IV/EIV data from 802.11 frame during
@@ -1781,10 +1685,8 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1781 */ 1685 */
1782 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); 1686 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1783 if (!is_valid_ether_addr(mac)) { 1687 if (!is_valid_ether_addr(mac)) {
1784 DECLARE_MAC_BUF(macbuf);
1785
1786 random_ether_addr(mac); 1688 random_ether_addr(mac);
1787 EEPROM(rt2x00dev, "MAC: %s\n", print_mac(macbuf, mac)); 1689 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
1788 } 1690 }
1789 1691
1790 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); 1692 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
@@ -1883,7 +1785,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1883 * Identify RF chipset. 1785 * Identify RF chipset.
1884 */ 1786 */
1885 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1787 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1886 rt73usb_register_read(rt2x00dev, MAC_CSR0, &reg); 1788 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
1887 rt2x00_set_chip(rt2x00dev, RT2571, value, reg); 1789 rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
1888 1790
1889 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x25730)) { 1791 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x25730)) {
@@ -2211,20 +2113,6 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
2211/* 2113/*
2212 * IEEE80211 stack callback functions. 2114 * IEEE80211 stack callback functions.
2213 */ 2115 */
2214static int rt73usb_set_retry_limit(struct ieee80211_hw *hw,
2215 u32 short_retry, u32 long_retry)
2216{
2217 struct rt2x00_dev *rt2x00dev = hw->priv;
2218 u32 reg;
2219
2220 rt73usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
2221 rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT, long_retry);
2222 rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT, short_retry);
2223 rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg);
2224
2225 return 0;
2226}
2227
2228static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, 2116static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2229 const struct ieee80211_tx_queue_params *params) 2117 const struct ieee80211_tx_queue_params *params)
2230{ 2118{
@@ -2251,33 +2139,33 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2251 field.bit_offset = queue_idx * 16; 2139 field.bit_offset = queue_idx * 16;
2252 field.bit_mask = 0xffff << field.bit_offset; 2140 field.bit_mask = 0xffff << field.bit_offset;
2253 2141
2254 rt73usb_register_read(rt2x00dev, AC_TXOP_CSR0, &reg); 2142 rt2x00usb_register_read(rt2x00dev, AC_TXOP_CSR0, &reg);
2255 rt2x00_set_field32(&reg, field, queue->txop); 2143 rt2x00_set_field32(&reg, field, queue->txop);
2256 rt73usb_register_write(rt2x00dev, AC_TXOP_CSR0, reg); 2144 rt2x00usb_register_write(rt2x00dev, AC_TXOP_CSR0, reg);
2257 } else if (queue_idx < 4) { 2145 } else if (queue_idx < 4) {
2258 field.bit_offset = (queue_idx - 2) * 16; 2146 field.bit_offset = (queue_idx - 2) * 16;
2259 field.bit_mask = 0xffff << field.bit_offset; 2147 field.bit_mask = 0xffff << field.bit_offset;
2260 2148
2261 rt73usb_register_read(rt2x00dev, AC_TXOP_CSR1, &reg); 2149 rt2x00usb_register_read(rt2x00dev, AC_TXOP_CSR1, &reg);
2262 rt2x00_set_field32(&reg, field, queue->txop); 2150 rt2x00_set_field32(&reg, field, queue->txop);
2263 rt73usb_register_write(rt2x00dev, AC_TXOP_CSR1, reg); 2151 rt2x00usb_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
2264 } 2152 }
2265 2153
2266 /* Update WMM registers */ 2154 /* Update WMM registers */
2267 field.bit_offset = queue_idx * 4; 2155 field.bit_offset = queue_idx * 4;
2268 field.bit_mask = 0xf << field.bit_offset; 2156 field.bit_mask = 0xf << field.bit_offset;
2269 2157
2270 rt73usb_register_read(rt2x00dev, AIFSN_CSR, &reg); 2158 rt2x00usb_register_read(rt2x00dev, AIFSN_CSR, &reg);
2271 rt2x00_set_field32(&reg, field, queue->aifs); 2159 rt2x00_set_field32(&reg, field, queue->aifs);
2272 rt73usb_register_write(rt2x00dev, AIFSN_CSR, reg); 2160 rt2x00usb_register_write(rt2x00dev, AIFSN_CSR, reg);
2273 2161
2274 rt73usb_register_read(rt2x00dev, CWMIN_CSR, &reg); 2162 rt2x00usb_register_read(rt2x00dev, CWMIN_CSR, &reg);
2275 rt2x00_set_field32(&reg, field, queue->cw_min); 2163 rt2x00_set_field32(&reg, field, queue->cw_min);
2276 rt73usb_register_write(rt2x00dev, CWMIN_CSR, reg); 2164 rt2x00usb_register_write(rt2x00dev, CWMIN_CSR, reg);
2277 2165
2278 rt73usb_register_read(rt2x00dev, CWMAX_CSR, &reg); 2166 rt2x00usb_register_read(rt2x00dev, CWMAX_CSR, &reg);
2279 rt2x00_set_field32(&reg, field, queue->cw_max); 2167 rt2x00_set_field32(&reg, field, queue->cw_max);
2280 rt73usb_register_write(rt2x00dev, CWMAX_CSR, reg); 2168 rt2x00usb_register_write(rt2x00dev, CWMAX_CSR, reg);
2281 2169
2282 return 0; 2170 return 0;
2283} 2171}
@@ -2295,9 +2183,9 @@ static u64 rt73usb_get_tsf(struct ieee80211_hw *hw)
2295 u64 tsf; 2183 u64 tsf;
2296 u32 reg; 2184 u32 reg;
2297 2185
2298 rt73usb_register_read(rt2x00dev, TXRX_CSR13, &reg); 2186 rt2x00usb_register_read(rt2x00dev, TXRX_CSR13, &reg);
2299 tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32; 2187 tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32;
2300 rt73usb_register_read(rt2x00dev, TXRX_CSR12, &reg); 2188 rt2x00usb_register_read(rt2x00dev, TXRX_CSR12, &reg);
2301 tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER); 2189 tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER);
2302 2190
2303 return tsf; 2191 return tsf;
@@ -2317,7 +2205,6 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
2317 .configure_filter = rt2x00mac_configure_filter, 2205 .configure_filter = rt2x00mac_configure_filter,
2318 .set_key = rt2x00mac_set_key, 2206 .set_key = rt2x00mac_set_key,
2319 .get_stats = rt2x00mac_get_stats, 2207 .get_stats = rt2x00mac_get_stats,
2320 .set_retry_limit = rt73usb_set_retry_limit,
2321 .bss_info_changed = rt2x00mac_bss_info_changed, 2208 .bss_info_changed = rt2x00mac_bss_info_changed,
2322 .conf_tx = rt73usb_conf_tx, 2209 .conf_tx = rt73usb_conf_tx,
2323 .get_tx_stats = rt2x00mac_get_tx_stats, 2210 .get_tx_stats = rt2x00mac_get_tx_stats,
@@ -2331,8 +2218,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2331 .load_firmware = rt73usb_load_firmware, 2218 .load_firmware = rt73usb_load_firmware,
2332 .initialize = rt2x00usb_initialize, 2219 .initialize = rt2x00usb_initialize,
2333 .uninitialize = rt2x00usb_uninitialize, 2220 .uninitialize = rt2x00usb_uninitialize,
2334 .init_rxentry = rt2x00usb_init_rxentry, 2221 .clear_entry = rt2x00usb_clear_entry,
2335 .init_txentry = rt2x00usb_init_txentry,
2336 .set_device_state = rt73usb_set_device_state, 2222 .set_device_state = rt73usb_set_device_state,
2337 .link_stats = rt73usb_link_stats, 2223 .link_stats = rt73usb_link_stats,
2338 .reset_tuner = rt73usb_reset_tuner, 2224 .reset_tuner = rt73usb_reset_tuner,
@@ -2348,6 +2234,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2348 .config_filter = rt73usb_config_filter, 2234 .config_filter = rt73usb_config_filter,
2349 .config_intf = rt73usb_config_intf, 2235 .config_intf = rt73usb_config_intf,
2350 .config_erp = rt73usb_config_erp, 2236 .config_erp = rt73usb_config_erp,
2237 .config_ant = rt73usb_config_ant,
2351 .config = rt73usb_config, 2238 .config = rt73usb_config,
2352}; 2239};
2353 2240
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 868386c457f6..46e1405eb0e2 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -48,7 +48,9 @@
48#define CSR_REG_SIZE 0x04b0 48#define CSR_REG_SIZE 0x04b0
49#define EEPROM_BASE 0x0000 49#define EEPROM_BASE 0x0000
50#define EEPROM_SIZE 0x0100 50#define EEPROM_SIZE 0x0100
51#define BBP_BASE 0x0000
51#define BBP_SIZE 0x0080 52#define BBP_SIZE 0x0080
53#define RF_BASE 0x0000
52#define RF_SIZE 0x0014 54#define RF_SIZE 0x0014
53 55
54/* 56/*
diff --git a/drivers/net/wireless/rtl818x/Makefile b/drivers/net/wireless/rtl818x/Makefile
new file mode 100644
index 000000000000..c113b3e69046
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/Makefile
@@ -0,0 +1,7 @@
1rtl8180-objs := rtl8180_dev.o rtl8180_rtl8225.o rtl8180_sa2400.o rtl8180_max2820.o rtl8180_grf5101.o
2rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o
3
4obj-$(CONFIG_RTL8180) += rtl8180.o
5obj-$(CONFIG_RTL8187) += rtl8187.o
6
7
diff --git a/drivers/net/wireless/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 8721282a8185..8721282a8185 100644
--- a/drivers/net/wireless/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
diff --git a/drivers/net/wireless/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index df7e78ee8a88..5f887fb137a9 100644
--- a/drivers/net/wireless/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -182,15 +182,13 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
182 skb->len, PCI_DMA_TODEVICE); 182 skb->len, PCI_DMA_TODEVICE);
183 183
184 info = IEEE80211_SKB_CB(skb); 184 info = IEEE80211_SKB_CB(skb);
185 memset(&info->status, 0, sizeof(info->status)); 185 ieee80211_tx_info_clear_status(info);
186 186
187 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 187 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
188 if (flags & RTL818X_TX_DESC_FLAG_TX_OK) 188 (flags & RTL818X_TX_DESC_FLAG_TX_OK))
189 info->flags |= IEEE80211_TX_STAT_ACK; 189 info->flags |= IEEE80211_TX_STAT_ACK;
190 else 190
191 info->status.excessive_retries = 1; 191 info->status.rates[0].count = (flags & 0xFF) + 1;
192 }
193 info->status.retry_count = flags & 0xFF;
194 192
195 ieee80211_tx_status_irqsafe(dev, skb); 193 ieee80211_tx_status_irqsafe(dev, skb);
196 if (ring->entries - skb_queue_len(&ring->queue) == 2) 194 if (ring->entries - skb_queue_len(&ring->queue) == 2)
@@ -243,6 +241,7 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
243 unsigned int idx, prio; 241 unsigned int idx, prio;
244 dma_addr_t mapping; 242 dma_addr_t mapping;
245 u32 tx_flags; 243 u32 tx_flags;
244 u8 rc_flags;
246 u16 plcp_len = 0; 245 u16 plcp_len = 0;
247 __le16 rts_duration = 0; 246 __le16 rts_duration = 0;
248 247
@@ -261,15 +260,16 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
261 tx_flags |= RTL818X_TX_DESC_FLAG_DMA | 260 tx_flags |= RTL818X_TX_DESC_FLAG_DMA |
262 RTL818X_TX_DESC_FLAG_NO_ENC; 261 RTL818X_TX_DESC_FLAG_NO_ENC;
263 262
264 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { 263 rc_flags = info->control.rates[0].flags;
264 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
265 tx_flags |= RTL818X_TX_DESC_FLAG_RTS; 265 tx_flags |= RTL818X_TX_DESC_FLAG_RTS;
266 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 266 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
267 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 267 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
268 tx_flags |= RTL818X_TX_DESC_FLAG_CTS; 268 tx_flags |= RTL818X_TX_DESC_FLAG_CTS;
269 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 269 tx_flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
270 } 270 }
271 271
272 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) 272 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
273 rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len, 273 rts_duration = ieee80211_rts_duration(dev, priv->vif, skb->len,
274 info); 274 info);
275 275
@@ -292,9 +292,9 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
292 entry->plcp_len = cpu_to_le16(plcp_len); 292 entry->plcp_len = cpu_to_le16(plcp_len);
293 entry->tx_buf = cpu_to_le32(mapping); 293 entry->tx_buf = cpu_to_le32(mapping);
294 entry->frame_len = cpu_to_le32(skb->len); 294 entry->frame_len = cpu_to_le32(skb->len);
295 entry->flags2 = info->control.retries[0].rate_idx >= 0 ? 295 entry->flags2 = info->control.rates[1].idx >= 0 ?
296 ieee80211_get_alt_retry_rate(dev, info, 0)->bitrate << 4 : 0; 296 ieee80211_get_alt_retry_rate(dev, info, 0)->bitrate << 4 : 0;
297 entry->retry_limit = info->control.retry_limit; 297 entry->retry_limit = info->control.rates[0].count;
298 entry->flags = cpu_to_le32(tx_flags); 298 entry->flags = cpu_to_le32(tx_flags);
299 __skb_queue_tail(&ring->queue, skb); 299 __skb_queue_tail(&ring->queue, skb);
300 if (ring->entries - skb_queue_len(&ring->queue) < 2) 300 if (ring->entries - skb_queue_len(&ring->queue) < 2)
@@ -692,9 +692,10 @@ static void rtl8180_remove_interface(struct ieee80211_hw *dev,
692 priv->vif = NULL; 692 priv->vif = NULL;
693} 693}
694 694
695static int rtl8180_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) 695static int rtl8180_config(struct ieee80211_hw *dev, u32 changed)
696{ 696{
697 struct rtl8180_priv *priv = dev->priv; 697 struct rtl8180_priv *priv = dev->priv;
698 struct ieee80211_conf *conf = &dev->conf;
698 699
699 priv->rf->set_chan(dev, conf); 700 priv->rf->set_chan(dev, conf);
700 701
@@ -719,6 +720,17 @@ static int rtl8180_config_interface(struct ieee80211_hw *dev,
719 return 0; 720 return 0;
720} 721}
721 722
723static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
724 struct ieee80211_vif *vif,
725 struct ieee80211_bss_conf *info,
726 u32 changed)
727{
728 struct rtl8180_priv *priv = dev->priv;
729
730 if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp)
731 priv->rf->conf_erp(dev, info);
732}
733
722static void rtl8180_configure_filter(struct ieee80211_hw *dev, 734static void rtl8180_configure_filter(struct ieee80211_hw *dev,
723 unsigned int changed_flags, 735 unsigned int changed_flags,
724 unsigned int *total_flags, 736 unsigned int *total_flags,
@@ -759,6 +771,7 @@ static const struct ieee80211_ops rtl8180_ops = {
759 .remove_interface = rtl8180_remove_interface, 771 .remove_interface = rtl8180_remove_interface,
760 .config = rtl8180_config, 772 .config = rtl8180_config,
761 .config_interface = rtl8180_config_interface, 773 .config_interface = rtl8180_config_interface,
774 .bss_info_changed = rtl8180_bss_info_changed,
762 .configure_filter = rtl8180_configure_filter, 775 .configure_filter = rtl8180_configure_filter,
763}; 776};
764 777
@@ -806,7 +819,6 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
806 const char *chip_name, *rf_name = NULL; 819 const char *chip_name, *rf_name = NULL;
807 u32 reg; 820 u32 reg;
808 u16 eeprom_val; 821 u16 eeprom_val;
809 DECLARE_MAC_BUF(mac);
810 822
811 err = pci_enable_device(pdev); 823 err = pci_enable_device(pdev);
812 if (err) { 824 if (err) {
@@ -855,7 +867,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
855 priv = dev->priv; 867 priv = dev->priv;
856 priv->pdev = pdev; 868 priv->pdev = pdev;
857 869
858 dev->max_altrates = 1; 870 dev->max_rates = 2;
859 SET_IEEE80211_DEV(dev, &pdev->dev); 871 SET_IEEE80211_DEV(dev, &pdev->dev);
860 pci_set_drvdata(pdev, dev); 872 pci_set_drvdata(pdev, dev);
861 873
@@ -1002,8 +1014,8 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
1002 goto err_iounmap; 1014 goto err_iounmap;
1003 } 1015 }
1004 1016
1005 printk(KERN_INFO "%s: hwaddr %s, %s + %s\n", 1017 printk(KERN_INFO "%s: hwaddr %pM, %s + %s\n",
1006 wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr), 1018 wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
1007 chip_name, priv->rf->name); 1019 chip_name, priv->rf->name);
1008 1020
1009 return 0; 1021 return 0;
diff --git a/drivers/net/wireless/rtl8180_grf5101.c b/drivers/net/wireless/rtl818x/rtl8180_grf5101.c
index 947ee55f18b2..947ee55f18b2 100644
--- a/drivers/net/wireless/rtl8180_grf5101.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_grf5101.c
diff --git a/drivers/net/wireless/rtl8180_grf5101.h b/drivers/net/wireless/rtl818x/rtl8180_grf5101.h
index 76647111bcff..76647111bcff 100644
--- a/drivers/net/wireless/rtl8180_grf5101.h
+++ b/drivers/net/wireless/rtl818x/rtl8180_grf5101.h
diff --git a/drivers/net/wireless/rtl8180_max2820.c b/drivers/net/wireless/rtl818x/rtl8180_max2820.c
index 6c825fd7f3b6..6c825fd7f3b6 100644
--- a/drivers/net/wireless/rtl8180_max2820.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_max2820.c
diff --git a/drivers/net/wireless/rtl8180_max2820.h b/drivers/net/wireless/rtl818x/rtl8180_max2820.h
index 61cf6d1e7d57..61cf6d1e7d57 100644
--- a/drivers/net/wireless/rtl8180_max2820.h
+++ b/drivers/net/wireless/rtl818x/rtl8180_max2820.h
diff --git a/drivers/net/wireless/rtl8180_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c
index cd22781728a9..4d2be0d9672b 100644
--- a/drivers/net/wireless/rtl8180_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c
@@ -725,8 +725,14 @@ static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
725 725
726 rtl8225_write(dev, 0x7, rtl8225_chan[chan - 1]); 726 rtl8225_write(dev, 0x7, rtl8225_chan[chan - 1]);
727 msleep(10); 727 msleep(10);
728}
729
730static void rtl8225_rf_conf_erp(struct ieee80211_hw *dev,
731 struct ieee80211_bss_conf *info)
732{
733 struct rtl8180_priv *priv = dev->priv;
728 734
729 if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME) { 735 if (info->use_short_slot) {
730 rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9); 736 rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9);
731 rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22); 737 rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
732 rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14); 738 rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14);
@@ -745,14 +751,16 @@ static const struct rtl818x_rf_ops rtl8225_ops = {
745 .name = "rtl8225", 751 .name = "rtl8225",
746 .init = rtl8225_rf_init, 752 .init = rtl8225_rf_init,
747 .stop = rtl8225_rf_stop, 753 .stop = rtl8225_rf_stop,
748 .set_chan = rtl8225_rf_set_channel 754 .set_chan = rtl8225_rf_set_channel,
755 .conf_erp = rtl8225_rf_conf_erp,
749}; 756};
750 757
751static const struct rtl818x_rf_ops rtl8225z2_ops = { 758static const struct rtl818x_rf_ops rtl8225z2_ops = {
752 .name = "rtl8225z2", 759 .name = "rtl8225z2",
753 .init = rtl8225z2_rf_init, 760 .init = rtl8225z2_rf_init,
754 .stop = rtl8225_rf_stop, 761 .stop = rtl8225_rf_stop,
755 .set_chan = rtl8225_rf_set_channel 762 .set_chan = rtl8225_rf_set_channel,
763 .conf_erp = rtl8225_rf_conf_erp,
756}; 764};
757 765
758const struct rtl818x_rf_ops * rtl8180_detect_rf(struct ieee80211_hw *dev) 766const struct rtl818x_rf_ops * rtl8180_detect_rf(struct ieee80211_hw *dev)
diff --git a/drivers/net/wireless/rtl8180_rtl8225.h b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.h
index 310013a2d726..310013a2d726 100644
--- a/drivers/net/wireless/rtl8180_rtl8225.h
+++ b/drivers/net/wireless/rtl818x/rtl8180_rtl8225.h
diff --git a/drivers/net/wireless/rtl8180_sa2400.c b/drivers/net/wireless/rtl818x/rtl8180_sa2400.c
index cea4e0ccb92d..cea4e0ccb92d 100644
--- a/drivers/net/wireless/rtl8180_sa2400.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_sa2400.c
diff --git a/drivers/net/wireless/rtl8180_sa2400.h b/drivers/net/wireless/rtl818x/rtl8180_sa2400.h
index a4aaa0d413f1..a4aaa0d413f1 100644
--- a/drivers/net/wireless/rtl8180_sa2400.h
+++ b/drivers/net/wireless/rtl818x/rtl8180_sa2400.h
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index e82bb4d289e8..3b1e1c2aad26 100644
--- a/drivers/net/wireless/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -99,8 +99,8 @@ struct rtl8187_priv {
99 struct ieee80211_supported_band band; 99 struct ieee80211_supported_band band;
100 struct usb_device *udev; 100 struct usb_device *udev;
101 u32 rx_conf; 101 u32 rx_conf;
102 struct usb_anchor anchored;
102 u16 txpwr_base; 103 u16 txpwr_base;
103 u16 seqno;
104 u8 asic_rev; 104 u8 asic_rev;
105 u8 is_rtl8187b; 105 u8 is_rtl8187b;
106 enum { 106 enum {
@@ -112,6 +112,12 @@ struct rtl8187_priv {
112 u8 signal; 112 u8 signal;
113 u8 quality; 113 u8 quality;
114 u8 noise; 114 u8 noise;
115 u8 slot_time;
116 u8 aifsn[4];
117 struct {
118 __le64 buf;
119 struct sk_buff_head queue;
120 } b_tx_status;
115}; 121};
116 122
117void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data); 123void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 69eb0132593b..00ce3ef39abe 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -7,6 +7,11 @@
7 * Based on the r8187 driver, which is: 7 * Based on the r8187 driver, which is:
8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al. 8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
9 * 9 *
10 * The driver was extended to the RTL8187B in 2008 by:
11 * Herton Ronaldo Krzesinski <herton@mandriva.com.br>
12 * Hin-Tak Leung <htl10@users.sourceforge.net>
13 * Larry Finger <Larry.Finger@lwfinger.net>
14 *
10 * Magic delays and register offsets below are taken from the original 15 * Magic delays and register offsets below are taken from the original
11 * r8187 driver sources. Thanks to Realtek for their support! 16 * r8187 driver sources. Thanks to Realtek for their support!
12 * 17 *
@@ -27,6 +32,9 @@
27 32
28MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); 33MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
29MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>"); 34MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
35MODULE_AUTHOR("Herton Ronaldo Krzesinski <herton@mandriva.com.br>");
36MODULE_AUTHOR("Hin-Tak Leung <htl10@users.sourceforge.net>");
37MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
30MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver"); 38MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver");
31MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
32 40
@@ -91,7 +99,6 @@ static const struct ieee80211_channel rtl818x_channels[] = {
91static void rtl8187_iowrite_async_cb(struct urb *urb) 99static void rtl8187_iowrite_async_cb(struct urb *urb)
92{ 100{
93 kfree(urb->context); 101 kfree(urb->context);
94 usb_free_urb(urb);
95} 102}
96 103
97static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr, 104static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr,
@@ -128,11 +135,13 @@ static void rtl8187_iowrite_async(struct rtl8187_priv *priv, __le16 addr,
128 usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0), 135 usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0),
129 (unsigned char *)dr, buf, len, 136 (unsigned char *)dr, buf, len,
130 rtl8187_iowrite_async_cb, buf); 137 rtl8187_iowrite_async_cb, buf);
138 usb_anchor_urb(urb, &priv->anchored);
131 rc = usb_submit_urb(urb, GFP_ATOMIC); 139 rc = usb_submit_urb(urb, GFP_ATOMIC);
132 if (rc < 0) { 140 if (rc < 0) {
133 kfree(buf); 141 kfree(buf);
134 usb_free_urb(urb); 142 usb_unanchor_urb(urb);
135 } 143 }
144 usb_free_urb(urb);
136} 145}
137 146
138static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv, 147static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv,
@@ -155,30 +164,45 @@ void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
155 rtl818x_iowrite8(priv, &priv->map->PHY[2], (data >> 16) & 0xFF); 164 rtl818x_iowrite8(priv, &priv->map->PHY[2], (data >> 16) & 0xFF);
156 rtl818x_iowrite8(priv, &priv->map->PHY[1], (data >> 8) & 0xFF); 165 rtl818x_iowrite8(priv, &priv->map->PHY[1], (data >> 8) & 0xFF);
157 rtl818x_iowrite8(priv, &priv->map->PHY[0], data & 0xFF); 166 rtl818x_iowrite8(priv, &priv->map->PHY[0], data & 0xFF);
158
159 msleep(1);
160} 167}
161 168
162static void rtl8187_tx_cb(struct urb *urb) 169static void rtl8187_tx_cb(struct urb *urb)
163{ 170{
164 struct sk_buff *skb = (struct sk_buff *)urb->context; 171 struct sk_buff *skb = (struct sk_buff *)urb->context;
165 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 172 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
166 struct ieee80211_hw *hw = info->driver_data[0]; 173 struct ieee80211_hw *hw = info->rate_driver_data[0];
167 struct rtl8187_priv *priv = hw->priv; 174 struct rtl8187_priv *priv = hw->priv;
168 175
169 usb_free_urb(info->driver_data[1]);
170 skb_pull(skb, priv->is_rtl8187b ? sizeof(struct rtl8187b_tx_hdr) : 176 skb_pull(skb, priv->is_rtl8187b ? sizeof(struct rtl8187b_tx_hdr) :
171 sizeof(struct rtl8187_tx_hdr)); 177 sizeof(struct rtl8187_tx_hdr));
172 memset(&info->status, 0, sizeof(info->status)); 178 ieee80211_tx_info_clear_status(info);
173 info->flags |= IEEE80211_TX_STAT_ACK; 179
174 ieee80211_tx_status_irqsafe(hw, skb); 180 if (!urb->status &&
181 !(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
182 priv->is_rtl8187b) {
183 skb_queue_tail(&priv->b_tx_status.queue, skb);
184
185 /* queue is "full", discard last items */
186 while (skb_queue_len(&priv->b_tx_status.queue) > 5) {
187 struct sk_buff *old_skb;
188
189 dev_dbg(&priv->udev->dev,
190 "transmit status queue full\n");
191
192 old_skb = skb_dequeue(&priv->b_tx_status.queue);
193 ieee80211_tx_status_irqsafe(hw, old_skb);
194 }
195 } else {
196 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !urb->status)
197 info->flags |= IEEE80211_TX_STAT_ACK;
198 ieee80211_tx_status_irqsafe(hw, skb);
199 }
175} 200}
176 201
177static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 202static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
178{ 203{
179 struct rtl8187_priv *priv = dev->priv; 204 struct rtl8187_priv *priv = dev->priv;
180 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 205 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
181 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
182 unsigned int ep; 206 unsigned int ep;
183 void *buf; 207 void *buf;
184 struct urb *urb; 208 struct urb *urb;
@@ -189,7 +213,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
189 urb = usb_alloc_urb(0, GFP_ATOMIC); 213 urb = usb_alloc_urb(0, GFP_ATOMIC);
190 if (!urb) { 214 if (!urb) {
191 kfree_skb(skb); 215 kfree_skb(skb);
192 return 0; 216 return -ENOMEM;
193 } 217 }
194 218
195 flags = skb->len; 219 flags = skb->len;
@@ -198,12 +222,12 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
198 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24; 222 flags |= ieee80211_get_tx_rate(dev, info)->hw_value << 24;
199 if (ieee80211_has_morefrags(((struct ieee80211_hdr *)skb->data)->frame_control)) 223 if (ieee80211_has_morefrags(((struct ieee80211_hdr *)skb->data)->frame_control))
200 flags |= RTL818X_TX_DESC_FLAG_MOREFRAG; 224 flags |= RTL818X_TX_DESC_FLAG_MOREFRAG;
201 if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { 225 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
202 flags |= RTL818X_TX_DESC_FLAG_RTS; 226 flags |= RTL818X_TX_DESC_FLAG_RTS;
203 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 227 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
204 rts_dur = ieee80211_rts_duration(dev, priv->vif, 228 rts_dur = ieee80211_rts_duration(dev, priv->vif,
205 skb->len, info); 229 skb->len, info);
206 } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { 230 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
207 flags |= RTL818X_TX_DESC_FLAG_CTS; 231 flags |= RTL818X_TX_DESC_FLAG_CTS;
208 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19; 232 flags |= ieee80211_get_rts_cts_rate(dev, info)->hw_value << 19;
209 } 233 }
@@ -214,7 +238,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
214 hdr->flags = cpu_to_le32(flags); 238 hdr->flags = cpu_to_le32(flags);
215 hdr->len = 0; 239 hdr->len = 0;
216 hdr->rts_duration = rts_dur; 240 hdr->rts_duration = rts_dur;
217 hdr->retry = cpu_to_le32(info->control.retry_limit << 8); 241 hdr->retry = cpu_to_le32((info->control.rates[0].count - 1) << 8);
218 buf = hdr; 242 buf = hdr;
219 243
220 ep = 2; 244 ep = 2;
@@ -232,7 +256,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
232 memset(hdr, 0, sizeof(*hdr)); 256 memset(hdr, 0, sizeof(*hdr));
233 hdr->flags = cpu_to_le32(flags); 257 hdr->flags = cpu_to_le32(flags);
234 hdr->rts_duration = rts_dur; 258 hdr->rts_duration = rts_dur;
235 hdr->retry = cpu_to_le32(info->control.retry_limit << 8); 259 hdr->retry = cpu_to_le32((info->control.rates[0].count - 1) << 8);
236 hdr->tx_duration = 260 hdr->tx_duration =
237 ieee80211_generic_frame_duration(dev, priv->vif, 261 ieee80211_generic_frame_duration(dev, priv->vif,
238 skb->len, txrate); 262 skb->len, txrate);
@@ -244,32 +268,20 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
244 ep = epmap[skb_get_queue_mapping(skb)]; 268 ep = epmap[skb_get_queue_mapping(skb)];
245 } 269 }
246 270
247 /* FIXME: The sequence that follows is needed for this driver to 271 info->rate_driver_data[0] = dev;
248 * work with mac80211 since "mac80211: fix TX sequence numbers". 272 info->rate_driver_data[1] = urb;
249 * As with the temporary code in rt2x00, changes will be needed
250 * to get proper sequence numbers on beacons. In addition, this
251 * patch places the sequence number in the hardware state, which
252 * limits us to a single virtual state.
253 */
254 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
255 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
256 priv->seqno += 0x10;
257 ieee80211hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
258 ieee80211hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
259 }
260
261 info->driver_data[0] = dev;
262 info->driver_data[1] = urb;
263 273
264 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep), 274 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, ep),
265 buf, skb->len, rtl8187_tx_cb, skb); 275 buf, skb->len, rtl8187_tx_cb, skb);
276 usb_anchor_urb(urb, &priv->anchored);
266 rc = usb_submit_urb(urb, GFP_ATOMIC); 277 rc = usb_submit_urb(urb, GFP_ATOMIC);
267 if (rc < 0) { 278 if (rc < 0) {
268 usb_free_urb(urb); 279 usb_unanchor_urb(urb);
269 kfree_skb(skb); 280 kfree_skb(skb);
270 } 281 }
282 usb_free_urb(urb);
271 283
272 return 0; 284 return rc;
273} 285}
274 286
275static void rtl8187_rx_cb(struct urb *urb) 287static void rtl8187_rx_cb(struct urb *urb)
@@ -282,50 +294,35 @@ static void rtl8187_rx_cb(struct urb *urb)
282 int rate, signal; 294 int rate, signal;
283 u32 flags; 295 u32 flags;
284 u32 quality; 296 u32 quality;
297 unsigned long f;
285 298
286 spin_lock(&priv->rx_queue.lock); 299 spin_lock_irqsave(&priv->rx_queue.lock, f);
287 if (skb->next) 300 if (skb->next)
288 __skb_unlink(skb, &priv->rx_queue); 301 __skb_unlink(skb, &priv->rx_queue);
289 else { 302 else {
290 spin_unlock(&priv->rx_queue.lock); 303 spin_unlock_irqrestore(&priv->rx_queue.lock, f);
291 return; 304 return;
292 } 305 }
293 spin_unlock(&priv->rx_queue.lock); 306 spin_unlock_irqrestore(&priv->rx_queue.lock, f);
307 skb_put(skb, urb->actual_length);
294 308
295 if (unlikely(urb->status)) { 309 if (unlikely(urb->status)) {
296 usb_free_urb(urb);
297 dev_kfree_skb_irq(skb); 310 dev_kfree_skb_irq(skb);
298 return; 311 return;
299 } 312 }
300 313
301 skb_put(skb, urb->actual_length);
302 if (!priv->is_rtl8187b) { 314 if (!priv->is_rtl8187b) {
303 struct rtl8187_rx_hdr *hdr = 315 struct rtl8187_rx_hdr *hdr =
304 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr)); 316 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr));
305 flags = le32_to_cpu(hdr->flags); 317 flags = le32_to_cpu(hdr->flags);
306 signal = hdr->signal & 0x7f; 318 /* As with the RTL8187B below, the AGC is used to calculate
319 * signal strength and quality. In this case, the scaling
320 * constants are derived from the output of p54usb.
321 */
322 quality = 130 - ((41 * hdr->agc) >> 6);
323 signal = -4 - ((27 * hdr->agc) >> 6);
307 rx_status.antenna = (hdr->signal >> 7) & 1; 324 rx_status.antenna = (hdr->signal >> 7) & 1;
308 rx_status.noise = hdr->noise;
309 rx_status.mactime = le64_to_cpu(hdr->mac_time); 325 rx_status.mactime = le64_to_cpu(hdr->mac_time);
310 priv->quality = signal;
311 rx_status.qual = priv->quality;
312 priv->noise = hdr->noise;
313 rate = (flags >> 20) & 0xF;
314 if (rate > 3) { /* OFDM rate */
315 if (signal > 90)
316 signal = 90;
317 else if (signal < 25)
318 signal = 25;
319 signal = 90 - signal;
320 } else { /* CCK rate */
321 if (signal > 95)
322 signal = 95;
323 else if (signal < 30)
324 signal = 30;
325 signal = 95 - signal;
326 }
327 rx_status.signal = signal;
328 priv->signal = signal;
329 } else { 326 } else {
330 struct rtl8187b_rx_hdr *hdr = 327 struct rtl8187b_rx_hdr *hdr =
331 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr)); 328 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr));
@@ -343,18 +340,18 @@ static void rtl8187_rx_cb(struct urb *urb)
343 */ 340 */
344 flags = le32_to_cpu(hdr->flags); 341 flags = le32_to_cpu(hdr->flags);
345 quality = 170 - hdr->agc; 342 quality = 170 - hdr->agc;
346 if (quality > 100)
347 quality = 100;
348 signal = 14 - hdr->agc / 2; 343 signal = 14 - hdr->agc / 2;
349 rx_status.qual = quality;
350 priv->quality = quality;
351 rx_status.signal = signal;
352 priv->signal = signal;
353 rx_status.antenna = (hdr->rssi >> 7) & 1; 344 rx_status.antenna = (hdr->rssi >> 7) & 1;
354 rx_status.mactime = le64_to_cpu(hdr->mac_time); 345 rx_status.mactime = le64_to_cpu(hdr->mac_time);
355 rate = (flags >> 20) & 0xF;
356 } 346 }
357 347
348 if (quality > 100)
349 quality = 100;
350 rx_status.qual = quality;
351 priv->quality = quality;
352 rx_status.signal = signal;
353 priv->signal = signal;
354 rate = (flags >> 20) & 0xF;
358 skb_trim(skb, flags & 0x0FFF); 355 skb_trim(skb, flags & 0x0FFF);
359 rx_status.rate_idx = rate; 356 rx_status.rate_idx = rate;
360 rx_status.freq = dev->conf.channel->center_freq; 357 rx_status.freq = dev->conf.channel->center_freq;
@@ -366,7 +363,6 @@ static void rtl8187_rx_cb(struct urb *urb)
366 363
367 skb = dev_alloc_skb(RTL8187_MAX_RX); 364 skb = dev_alloc_skb(RTL8187_MAX_RX);
368 if (unlikely(!skb)) { 365 if (unlikely(!skb)) {
369 usb_free_urb(urb);
370 /* TODO check rx queue length and refill *somewhere* */ 366 /* TODO check rx queue length and refill *somewhere* */
371 return; 367 return;
372 } 368 }
@@ -378,24 +374,32 @@ static void rtl8187_rx_cb(struct urb *urb)
378 urb->context = skb; 374 urb->context = skb;
379 skb_queue_tail(&priv->rx_queue, skb); 375 skb_queue_tail(&priv->rx_queue, skb);
380 376
381 usb_submit_urb(urb, GFP_ATOMIC); 377 usb_anchor_urb(urb, &priv->anchored);
378 if (usb_submit_urb(urb, GFP_ATOMIC)) {
379 usb_unanchor_urb(urb);
380 skb_unlink(skb, &priv->rx_queue);
381 dev_kfree_skb_irq(skb);
382 }
382} 383}
383 384
384static int rtl8187_init_urbs(struct ieee80211_hw *dev) 385static int rtl8187_init_urbs(struct ieee80211_hw *dev)
385{ 386{
386 struct rtl8187_priv *priv = dev->priv; 387 struct rtl8187_priv *priv = dev->priv;
387 struct urb *entry; 388 struct urb *entry = NULL;
388 struct sk_buff *skb; 389 struct sk_buff *skb;
389 struct rtl8187_rx_info *info; 390 struct rtl8187_rx_info *info;
391 int ret = 0;
390 392
391 while (skb_queue_len(&priv->rx_queue) < 8) { 393 while (skb_queue_len(&priv->rx_queue) < 8) {
392 skb = __dev_alloc_skb(RTL8187_MAX_RX, GFP_KERNEL); 394 skb = __dev_alloc_skb(RTL8187_MAX_RX, GFP_KERNEL);
393 if (!skb) 395 if (!skb) {
394 break; 396 ret = -ENOMEM;
397 goto err;
398 }
395 entry = usb_alloc_urb(0, GFP_KERNEL); 399 entry = usb_alloc_urb(0, GFP_KERNEL);
396 if (!entry) { 400 if (!entry) {
397 kfree_skb(skb); 401 ret = -ENOMEM;
398 break; 402 goto err;
399 } 403 }
400 usb_fill_bulk_urb(entry, priv->udev, 404 usb_fill_bulk_urb(entry, priv->udev,
401 usb_rcvbulkpipe(priv->udev, 405 usb_rcvbulkpipe(priv->udev,
@@ -406,10 +410,129 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
406 info->urb = entry; 410 info->urb = entry;
407 info->dev = dev; 411 info->dev = dev;
408 skb_queue_tail(&priv->rx_queue, skb); 412 skb_queue_tail(&priv->rx_queue, skb);
409 usb_submit_urb(entry, GFP_KERNEL); 413 usb_anchor_urb(entry, &priv->anchored);
414 ret = usb_submit_urb(entry, GFP_KERNEL);
415 if (ret) {
416 skb_unlink(skb, &priv->rx_queue);
417 usb_unanchor_urb(entry);
418 goto err;
419 }
420 usb_free_urb(entry);
410 } 421 }
422 return ret;
411 423
412 return 0; 424err:
425 usb_free_urb(entry);
426 kfree_skb(skb);
427 usb_kill_anchored_urbs(&priv->anchored);
428 return ret;
429}
430
431static void rtl8187b_status_cb(struct urb *urb)
432{
433 struct ieee80211_hw *hw = (struct ieee80211_hw *)urb->context;
434 struct rtl8187_priv *priv = hw->priv;
435 u64 val;
436 unsigned int cmd_type;
437
438 if (unlikely(urb->status))
439 return;
440
441 /*
442 * Read from status buffer:
443 *
444 * bits [30:31] = cmd type:
445 * - 0 indicates tx beacon interrupt
446 * - 1 indicates tx close descriptor
447 *
448 * In the case of tx beacon interrupt:
449 * [0:9] = Last Beacon CW
450 * [10:29] = reserved
451 * [30:31] = 00b
452 * [32:63] = Last Beacon TSF
453 *
454 * If it's tx close descriptor:
455 * [0:7] = Packet Retry Count
456 * [8:14] = RTS Retry Count
457 * [15] = TOK
458 * [16:27] = Sequence No
459 * [28] = LS
460 * [29] = FS
461 * [30:31] = 01b
462 * [32:47] = unused (reserved?)
463 * [48:63] = MAC Used Time
464 */
465 val = le64_to_cpu(priv->b_tx_status.buf);
466
467 cmd_type = (val >> 30) & 0x3;
468 if (cmd_type == 1) {
469 unsigned int pkt_rc, seq_no;
470 bool tok;
471 struct sk_buff *skb;
472 struct ieee80211_hdr *ieee80211hdr;
473 unsigned long flags;
474
475 pkt_rc = val & 0xFF;
476 tok = val & (1 << 15);
477 seq_no = (val >> 16) & 0xFFF;
478
479 spin_lock_irqsave(&priv->b_tx_status.queue.lock, flags);
480 skb_queue_reverse_walk(&priv->b_tx_status.queue, skb) {
481 ieee80211hdr = (struct ieee80211_hdr *)skb->data;
482
483 /*
484 * While testing, it was discovered that the seq_no
485 * doesn't actually contains the sequence number.
486 * Instead of returning just the 12 bits of sequence
487 * number, hardware is returning entire sequence control
488 * (fragment number plus sequence number) in a 12 bit
489 * only field overflowing after some time. As a
490 * workaround, just consider the lower bits, and expect
491 * it's unlikely we wrongly ack some sent data
492 */
493 if ((le16_to_cpu(ieee80211hdr->seq_ctrl)
494 & 0xFFF) == seq_no)
495 break;
496 }
497 if (skb != (struct sk_buff *) &priv->b_tx_status.queue) {
498 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
499
500 __skb_unlink(skb, &priv->b_tx_status.queue);
501 if (tok)
502 info->flags |= IEEE80211_TX_STAT_ACK;
503 info->status.rates[0].count = pkt_rc + 1;
504
505 ieee80211_tx_status_irqsafe(hw, skb);
506 }
507 spin_unlock_irqrestore(&priv->b_tx_status.queue.lock, flags);
508 }
509
510 usb_anchor_urb(urb, &priv->anchored);
511 if (usb_submit_urb(urb, GFP_ATOMIC))
512 usb_unanchor_urb(urb);
513}
514
515static int rtl8187b_init_status_urb(struct ieee80211_hw *dev)
516{
517 struct rtl8187_priv *priv = dev->priv;
518 struct urb *entry;
519 int ret = 0;
520
521 entry = usb_alloc_urb(0, GFP_KERNEL);
522 if (!entry)
523 return -ENOMEM;
524
525 usb_fill_bulk_urb(entry, priv->udev, usb_rcvbulkpipe(priv->udev, 9),
526 &priv->b_tx_status.buf, sizeof(priv->b_tx_status.buf),
527 rtl8187b_status_cb, dev);
528
529 usb_anchor_urb(entry, &priv->anchored);
530 ret = usb_submit_urb(entry, GFP_KERNEL);
531 if (ret)
532 usb_unanchor_urb(entry);
533 usb_free_urb(entry);
534
535 return ret;
413} 536}
414 537
415static int rtl8187_cmd_reset(struct ieee80211_hw *dev) 538static int rtl8187_cmd_reset(struct ieee80211_hw *dev)
@@ -687,7 +810,7 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
687 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); 810 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
688 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488); 811 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488);
689 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); 812 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
690 msleep(1100); 813 msleep(100);
691 814
692 priv->rf->init(dev); 815 priv->rf->init(dev);
693 816
@@ -721,6 +844,13 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
721 844
722 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFEC, 0x0800, 1); 845 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFEC, 0x0800, 1);
723 846
847 priv->slot_time = 0x9;
848 priv->aifsn[0] = 2; /* AIFSN[AC_VO] */
849 priv->aifsn[1] = 2; /* AIFSN[AC_VI] */
850 priv->aifsn[2] = 7; /* AIFSN[AC_BK] */
851 priv->aifsn[3] = 3; /* AIFSN[AC_BE] */
852 rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0);
853
724 return 0; 854 return 0;
725} 855}
726 856
@@ -736,6 +866,9 @@ static int rtl8187_start(struct ieee80211_hw *dev)
736 return ret; 866 return ret;
737 867
738 mutex_lock(&priv->conf_mutex); 868 mutex_lock(&priv->conf_mutex);
869
870 init_usb_anchor(&priv->anchored);
871
739 if (priv->is_rtl8187b) { 872 if (priv->is_rtl8187b) {
740 reg = RTL818X_RX_CONF_MGMT | 873 reg = RTL818X_RX_CONF_MGMT |
741 RTL818X_RX_CONF_DATA | 874 RTL818X_RX_CONF_DATA |
@@ -757,6 +890,7 @@ static int rtl8187_start(struct ieee80211_hw *dev)
757 (7 << 0 /* long retry limit */) | 890 (7 << 0 /* long retry limit */) |
758 (7 << 21 /* MAX TX DMA */)); 891 (7 << 21 /* MAX TX DMA */));
759 rtl8187_init_urbs(dev); 892 rtl8187_init_urbs(dev);
893 rtl8187b_init_status_urb(dev);
760 mutex_unlock(&priv->conf_mutex); 894 mutex_unlock(&priv->conf_mutex);
761 return 0; 895 return 0;
762 } 896 }
@@ -809,7 +943,6 @@ static int rtl8187_start(struct ieee80211_hw *dev)
809static void rtl8187_stop(struct ieee80211_hw *dev) 943static void rtl8187_stop(struct ieee80211_hw *dev)
810{ 944{
811 struct rtl8187_priv *priv = dev->priv; 945 struct rtl8187_priv *priv = dev->priv;
812 struct rtl8187_rx_info *info;
813 struct sk_buff *skb; 946 struct sk_buff *skb;
814 u32 reg; 947 u32 reg;
815 948
@@ -828,11 +961,10 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
828 rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF); 961 rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF);
829 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 962 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
830 963
831 while ((skb = skb_dequeue(&priv->rx_queue))) { 964 while ((skb = skb_dequeue(&priv->b_tx_status.queue)))
832 info = (struct rtl8187_rx_info *)skb->cb; 965 dev_kfree_skb_any(skb);
833 usb_kill_urb(info->urb); 966
834 kfree_skb(skb); 967 usb_kill_anchored_urbs(&priv->anchored);
835 }
836 mutex_unlock(&priv->conf_mutex); 968 mutex_unlock(&priv->conf_mutex);
837} 969}
838 970
@@ -876,9 +1008,10 @@ static void rtl8187_remove_interface(struct ieee80211_hw *dev,
876 mutex_unlock(&priv->conf_mutex); 1008 mutex_unlock(&priv->conf_mutex);
877} 1009}
878 1010
879static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) 1011static int rtl8187_config(struct ieee80211_hw *dev, u32 changed)
880{ 1012{
881 struct rtl8187_priv *priv = dev->priv; 1013 struct rtl8187_priv *priv = dev->priv;
1014 struct ieee80211_conf *conf = &dev->conf;
882 u32 reg; 1015 u32 reg;
883 1016
884 mutex_lock(&priv->conf_mutex); 1017 mutex_lock(&priv->conf_mutex);
@@ -889,27 +1022,10 @@ static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
889 */ 1022 */
890 rtl818x_iowrite32(priv, &priv->map->TX_CONF, 1023 rtl818x_iowrite32(priv, &priv->map->TX_CONF,
891 reg | RTL818X_TX_CONF_LOOPBACK_MAC); 1024 reg | RTL818X_TX_CONF_LOOPBACK_MAC);
892 msleep(10);
893 priv->rf->set_chan(dev, conf); 1025 priv->rf->set_chan(dev, conf);
894 msleep(10); 1026 msleep(10);
895 rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg); 1027 rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg);
896 1028
897 if (!priv->is_rtl8187b) {
898 rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
899
900 if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME) {
901 rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9);
902 rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14);
903 rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x14);
904 rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0x73);
905 } else {
906 rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14);
907 rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24);
908 rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x24);
909 rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0xa5);
910 }
911 }
912
913 rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2); 1029 rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2);
914 rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100); 1030 rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100);
915 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100); 1031 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
@@ -944,6 +1060,89 @@ static int rtl8187_config_interface(struct ieee80211_hw *dev,
944 return 0; 1060 return 0;
945} 1061}
946 1062
1063/*
1064 * With 8187B, AC_*_PARAM clashes with FEMR definition in struct rtl818x_csr for
1065 * example. Thus we have to use raw values for AC_*_PARAM register addresses.
1066 */
1067static __le32 *rtl8187b_ac_addr[4] = {
1068 (__le32 *) 0xFFF0, /* AC_VO */
1069 (__le32 *) 0xFFF4, /* AC_VI */
1070 (__le32 *) 0xFFFC, /* AC_BK */
1071 (__le32 *) 0xFFF8, /* AC_BE */
1072};
1073
1074#define SIFS_TIME 0xa
1075
1076static void rtl8187_conf_erp(struct rtl8187_priv *priv, bool use_short_slot,
1077 bool use_short_preamble)
1078{
1079 if (priv->is_rtl8187b) {
1080 u8 difs, eifs;
1081 u16 ack_timeout;
1082 int queue;
1083
1084 if (use_short_slot) {
1085 priv->slot_time = 0x9;
1086 difs = 0x1c;
1087 eifs = 0x53;
1088 } else {
1089 priv->slot_time = 0x14;
1090 difs = 0x32;
1091 eifs = 0x5b;
1092 }
1093 rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
1094 rtl818x_iowrite8(priv, &priv->map->SLOT, priv->slot_time);
1095 rtl818x_iowrite8(priv, &priv->map->DIFS, difs);
1096
1097 /*
1098 * BRSR+1 on 8187B is in fact EIFS register
1099 * Value in units of 4 us
1100 */
1101 rtl818x_iowrite8(priv, (u8 *)&priv->map->BRSR + 1, eifs);
1102
1103 /*
1104 * For 8187B, CARRIER_SENSE_COUNTER is in fact ack timeout
1105 * register. In units of 4 us like eifs register
1106 * ack_timeout = ack duration + plcp + difs + preamble
1107 */
1108 ack_timeout = 112 + 48 + difs;
1109 if (use_short_preamble)
1110 ack_timeout += 72;
1111 else
1112 ack_timeout += 144;
1113 rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER,
1114 DIV_ROUND_UP(ack_timeout, 4));
1115
1116 for (queue = 0; queue < 4; queue++)
1117 rtl818x_iowrite8(priv, (u8 *) rtl8187b_ac_addr[queue],
1118 priv->aifsn[queue] * priv->slot_time +
1119 SIFS_TIME);
1120 } else {
1121 rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
1122 if (use_short_slot) {
1123 rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9);
1124 rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14);
1125 rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x14);
1126 } else {
1127 rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14);
1128 rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24);
1129 rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x24);
1130 }
1131 }
1132}
1133
1134static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
1135 struct ieee80211_vif *vif,
1136 struct ieee80211_bss_conf *info,
1137 u32 changed)
1138{
1139 struct rtl8187_priv *priv = dev->priv;
1140
1141 if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_ERP_PREAMBLE))
1142 rtl8187_conf_erp(priv, info->use_short_slot,
1143 info->use_short_preamble);
1144}
1145
947static void rtl8187_configure_filter(struct ieee80211_hw *dev, 1146static void rtl8187_configure_filter(struct ieee80211_hw *dev,
948 unsigned int changed_flags, 1147 unsigned int changed_flags,
949 unsigned int *total_flags, 1148 unsigned int *total_flags,
@@ -976,6 +1175,42 @@ static void rtl8187_configure_filter(struct ieee80211_hw *dev,
976 rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf); 1175 rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf);
977} 1176}
978 1177
1178static int rtl8187_conf_tx(struct ieee80211_hw *dev, u16 queue,
1179 const struct ieee80211_tx_queue_params *params)
1180{
1181 struct rtl8187_priv *priv = dev->priv;
1182 u8 cw_min, cw_max;
1183
1184 if (queue > 3)
1185 return -EINVAL;
1186
1187 cw_min = fls(params->cw_min);
1188 cw_max = fls(params->cw_max);
1189
1190 if (priv->is_rtl8187b) {
1191 priv->aifsn[queue] = params->aifs;
1192
1193 /*
1194 * This is the structure of AC_*_PARAM registers in 8187B:
1195 * - TXOP limit field, bit offset = 16
1196 * - ECWmax, bit offset = 12
1197 * - ECWmin, bit offset = 8
1198 * - AIFS, bit offset = 0
1199 */
1200 rtl818x_iowrite32(priv, rtl8187b_ac_addr[queue],
1201 (params->txop << 16) | (cw_max << 12) |
1202 (cw_min << 8) | (params->aifs *
1203 priv->slot_time + SIFS_TIME));
1204 } else {
1205 if (queue != 0)
1206 return -EINVAL;
1207
1208 rtl818x_iowrite8(priv, &priv->map->CW_VAL,
1209 cw_min | (cw_max << 4));
1210 }
1211 return 0;
1212}
1213
979static const struct ieee80211_ops rtl8187_ops = { 1214static const struct ieee80211_ops rtl8187_ops = {
980 .tx = rtl8187_tx, 1215 .tx = rtl8187_tx,
981 .start = rtl8187_start, 1216 .start = rtl8187_start,
@@ -984,7 +1219,9 @@ static const struct ieee80211_ops rtl8187_ops = {
984 .remove_interface = rtl8187_remove_interface, 1219 .remove_interface = rtl8187_remove_interface,
985 .config = rtl8187_config, 1220 .config = rtl8187_config,
986 .config_interface = rtl8187_config_interface, 1221 .config_interface = rtl8187_config_interface,
1222 .bss_info_changed = rtl8187_bss_info_changed,
987 .configure_filter = rtl8187_configure_filter, 1223 .configure_filter = rtl8187_configure_filter,
1224 .conf_tx = rtl8187_conf_tx
988}; 1225};
989 1226
990static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom) 1227static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom)
@@ -1029,7 +1266,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1029 const char *chip_name; 1266 const char *chip_name;
1030 u16 txpwr, reg; 1267 u16 txpwr, reg;
1031 int err, i; 1268 int err, i;
1032 DECLARE_MAC_BUF(mac);
1033 1269
1034 dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops); 1270 dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops);
1035 if (!dev) { 1271 if (!dev) {
@@ -1065,6 +1301,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1065 1301
1066 priv->mode = NL80211_IFTYPE_MONITOR; 1302 priv->mode = NL80211_IFTYPE_MONITOR;
1067 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1303 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1304 IEEE80211_HW_SIGNAL_DBM |
1068 IEEE80211_HW_RX_INCLUDES_FCS; 1305 IEEE80211_HW_RX_INCLUDES_FCS;
1069 1306
1070 eeprom.data = dev; 1307 eeprom.data = dev;
@@ -1180,16 +1417,13 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1180 (*channel++).hw_value = txpwr >> 8; 1417 (*channel++).hw_value = txpwr >> 8;
1181 } 1418 }
1182 1419
1183 if (priv->is_rtl8187b) { 1420 if (priv->is_rtl8187b)
1184 printk(KERN_WARNING "rtl8187: 8187B chip detected. Support " 1421 printk(KERN_WARNING "rtl8187: 8187B chip detected.\n");
1185 "is EXPERIMENTAL, and could damage your\n"
1186 " hardware, use at your own risk\n");
1187 dev->flags |= IEEE80211_HW_SIGNAL_DBM;
1188 } else {
1189 dev->flags |= IEEE80211_HW_SIGNAL_UNSPEC;
1190 dev->max_signal = 65;
1191 }
1192 1422
1423 /*
1424 * XXX: Once this driver supports anything that requires
1425 * beacons it must implement IEEE80211_TX_CTL_ASSIGN_SEQ.
1426 */
1193 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1427 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1194 1428
1195 if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b) 1429 if ((id->driver_info == DEVICE_RTL8187) && priv->is_rtl8187b)
@@ -1211,9 +1445,10 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1211 goto err_free_dev; 1445 goto err_free_dev;
1212 } 1446 }
1213 mutex_init(&priv->conf_mutex); 1447 mutex_init(&priv->conf_mutex);
1448 skb_queue_head_init(&priv->b_tx_status.queue);
1214 1449
1215 printk(KERN_INFO "%s: hwaddr %s, %s V%d + %s\n", 1450 printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s\n",
1216 wiphy_name(dev->wiphy), print_mac(mac, dev->wiphy->perm_addr), 1451 wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
1217 chip_name, priv->asic_rev, priv->rf->name); 1452 chip_name, priv->asic_rev, priv->rf->name);
1218 1453
1219 return 0; 1454 return 0;
diff --git a/drivers/net/wireless/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
index 1bae89903410..4e75e8e7fa90 100644
--- a/drivers/net/wireless/rtl8187_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
@@ -64,7 +64,6 @@ static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data)
64 64
65 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); 65 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
66 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84); 66 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
67 msleep(2);
68} 67}
69 68
70static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data) 69static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data)
@@ -98,7 +97,6 @@ static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data)
98 97
99 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); 98 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
100 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84); 99 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
101 msleep(2);
102} 100}
103 101
104static void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data) 102static void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data)
@@ -333,21 +331,21 @@ static void rtl8225_rf_init(struct ieee80211_hw *dev)
333 struct rtl8187_priv *priv = dev->priv; 331 struct rtl8187_priv *priv = dev->priv;
334 int i; 332 int i;
335 333
336 rtl8225_write(dev, 0x0, 0x067); msleep(1); 334 rtl8225_write(dev, 0x0, 0x067);
337 rtl8225_write(dev, 0x1, 0xFE0); msleep(1); 335 rtl8225_write(dev, 0x1, 0xFE0);
338 rtl8225_write(dev, 0x2, 0x44D); msleep(1); 336 rtl8225_write(dev, 0x2, 0x44D);
339 rtl8225_write(dev, 0x3, 0x441); msleep(1); 337 rtl8225_write(dev, 0x3, 0x441);
340 rtl8225_write(dev, 0x4, 0x486); msleep(1); 338 rtl8225_write(dev, 0x4, 0x486);
341 rtl8225_write(dev, 0x5, 0xBC0); msleep(1); 339 rtl8225_write(dev, 0x5, 0xBC0);
342 rtl8225_write(dev, 0x6, 0xAE6); msleep(1); 340 rtl8225_write(dev, 0x6, 0xAE6);
343 rtl8225_write(dev, 0x7, 0x82A); msleep(1); 341 rtl8225_write(dev, 0x7, 0x82A);
344 rtl8225_write(dev, 0x8, 0x01F); msleep(1); 342 rtl8225_write(dev, 0x8, 0x01F);
345 rtl8225_write(dev, 0x9, 0x334); msleep(1); 343 rtl8225_write(dev, 0x9, 0x334);
346 rtl8225_write(dev, 0xA, 0xFD4); msleep(1); 344 rtl8225_write(dev, 0xA, 0xFD4);
347 rtl8225_write(dev, 0xB, 0x391); msleep(1); 345 rtl8225_write(dev, 0xB, 0x391);
348 rtl8225_write(dev, 0xC, 0x050); msleep(1); 346 rtl8225_write(dev, 0xC, 0x050);
349 rtl8225_write(dev, 0xD, 0x6DB); msleep(1); 347 rtl8225_write(dev, 0xD, 0x6DB);
350 rtl8225_write(dev, 0xE, 0x029); msleep(1); 348 rtl8225_write(dev, 0xE, 0x029);
351 rtl8225_write(dev, 0xF, 0x914); msleep(100); 349 rtl8225_write(dev, 0xF, 0x914); msleep(100);
352 350
353 rtl8225_write(dev, 0x2, 0xC4D); msleep(200); 351 rtl8225_write(dev, 0x2, 0xC4D); msleep(200);
@@ -375,91 +373,89 @@ static void rtl8225_rf_init(struct ieee80211_hw *dev)
375 373
376 for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) { 374 for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) {
377 rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]); 375 rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]);
378 msleep(1);
379 rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i); 376 rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i);
380 msleep(1);
381 } 377 }
382 378
383 msleep(1); 379 msleep(1);
384 380
385 rtl8225_write_phy_ofdm(dev, 0x00, 0x01); msleep(1); 381 rtl8225_write_phy_ofdm(dev, 0x00, 0x01);
386 rtl8225_write_phy_ofdm(dev, 0x01, 0x02); msleep(1); 382 rtl8225_write_phy_ofdm(dev, 0x01, 0x02);
387 rtl8225_write_phy_ofdm(dev, 0x02, 0x42); msleep(1); 383 rtl8225_write_phy_ofdm(dev, 0x02, 0x42);
388 rtl8225_write_phy_ofdm(dev, 0x03, 0x00); msleep(1); 384 rtl8225_write_phy_ofdm(dev, 0x03, 0x00);
389 rtl8225_write_phy_ofdm(dev, 0x04, 0x00); msleep(1); 385 rtl8225_write_phy_ofdm(dev, 0x04, 0x00);
390 rtl8225_write_phy_ofdm(dev, 0x05, 0x00); msleep(1); 386 rtl8225_write_phy_ofdm(dev, 0x05, 0x00);
391 rtl8225_write_phy_ofdm(dev, 0x06, 0x40); msleep(1); 387 rtl8225_write_phy_ofdm(dev, 0x06, 0x40);
392 rtl8225_write_phy_ofdm(dev, 0x07, 0x00); msleep(1); 388 rtl8225_write_phy_ofdm(dev, 0x07, 0x00);
393 rtl8225_write_phy_ofdm(dev, 0x08, 0x40); msleep(1); 389 rtl8225_write_phy_ofdm(dev, 0x08, 0x40);
394 rtl8225_write_phy_ofdm(dev, 0x09, 0xfe); msleep(1); 390 rtl8225_write_phy_ofdm(dev, 0x09, 0xfe);
395 rtl8225_write_phy_ofdm(dev, 0x0a, 0x09); msleep(1); 391 rtl8225_write_phy_ofdm(dev, 0x0a, 0x09);
396 rtl8225_write_phy_ofdm(dev, 0x0b, 0x80); msleep(1); 392 rtl8225_write_phy_ofdm(dev, 0x0b, 0x80);
397 rtl8225_write_phy_ofdm(dev, 0x0c, 0x01); msleep(1); 393 rtl8225_write_phy_ofdm(dev, 0x0c, 0x01);
398 rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3); msleep(1); 394 rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3);
399 rtl8225_write_phy_ofdm(dev, 0x0f, 0x38); msleep(1); 395 rtl8225_write_phy_ofdm(dev, 0x0f, 0x38);
400 rtl8225_write_phy_ofdm(dev, 0x10, 0x84); msleep(1); 396 rtl8225_write_phy_ofdm(dev, 0x10, 0x84);
401 rtl8225_write_phy_ofdm(dev, 0x11, 0x06); msleep(1); 397 rtl8225_write_phy_ofdm(dev, 0x11, 0x06);
402 rtl8225_write_phy_ofdm(dev, 0x12, 0x20); msleep(1); 398 rtl8225_write_phy_ofdm(dev, 0x12, 0x20);
403 rtl8225_write_phy_ofdm(dev, 0x13, 0x20); msleep(1); 399 rtl8225_write_phy_ofdm(dev, 0x13, 0x20);
404 rtl8225_write_phy_ofdm(dev, 0x14, 0x00); msleep(1); 400 rtl8225_write_phy_ofdm(dev, 0x14, 0x00);
405 rtl8225_write_phy_ofdm(dev, 0x15, 0x40); msleep(1); 401 rtl8225_write_phy_ofdm(dev, 0x15, 0x40);
406 rtl8225_write_phy_ofdm(dev, 0x16, 0x00); msleep(1); 402 rtl8225_write_phy_ofdm(dev, 0x16, 0x00);
407 rtl8225_write_phy_ofdm(dev, 0x17, 0x40); msleep(1); 403 rtl8225_write_phy_ofdm(dev, 0x17, 0x40);
408 rtl8225_write_phy_ofdm(dev, 0x18, 0xef); msleep(1); 404 rtl8225_write_phy_ofdm(dev, 0x18, 0xef);
409 rtl8225_write_phy_ofdm(dev, 0x19, 0x19); msleep(1); 405 rtl8225_write_phy_ofdm(dev, 0x19, 0x19);
410 rtl8225_write_phy_ofdm(dev, 0x1a, 0x20); msleep(1); 406 rtl8225_write_phy_ofdm(dev, 0x1a, 0x20);
411 rtl8225_write_phy_ofdm(dev, 0x1b, 0x76); msleep(1); 407 rtl8225_write_phy_ofdm(dev, 0x1b, 0x76);
412 rtl8225_write_phy_ofdm(dev, 0x1c, 0x04); msleep(1); 408 rtl8225_write_phy_ofdm(dev, 0x1c, 0x04);
413 rtl8225_write_phy_ofdm(dev, 0x1e, 0x95); msleep(1); 409 rtl8225_write_phy_ofdm(dev, 0x1e, 0x95);
414 rtl8225_write_phy_ofdm(dev, 0x1f, 0x75); msleep(1); 410 rtl8225_write_phy_ofdm(dev, 0x1f, 0x75);
415 rtl8225_write_phy_ofdm(dev, 0x20, 0x1f); msleep(1); 411 rtl8225_write_phy_ofdm(dev, 0x20, 0x1f);
416 rtl8225_write_phy_ofdm(dev, 0x21, 0x27); msleep(1); 412 rtl8225_write_phy_ofdm(dev, 0x21, 0x27);
417 rtl8225_write_phy_ofdm(dev, 0x22, 0x16); msleep(1); 413 rtl8225_write_phy_ofdm(dev, 0x22, 0x16);
418 rtl8225_write_phy_ofdm(dev, 0x24, 0x46); msleep(1); 414 rtl8225_write_phy_ofdm(dev, 0x24, 0x46);
419 rtl8225_write_phy_ofdm(dev, 0x25, 0x20); msleep(1); 415 rtl8225_write_phy_ofdm(dev, 0x25, 0x20);
420 rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1); 416 rtl8225_write_phy_ofdm(dev, 0x26, 0x90);
421 rtl8225_write_phy_ofdm(dev, 0x27, 0x88); msleep(1); 417 rtl8225_write_phy_ofdm(dev, 0x27, 0x88);
422 418
423 rtl8225_write_phy_ofdm(dev, 0x0d, rtl8225_gain[2 * 4]); 419 rtl8225_write_phy_ofdm(dev, 0x0d, rtl8225_gain[2 * 4]);
424 rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225_gain[2 * 4 + 2]); 420 rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225_gain[2 * 4 + 2]);
425 rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225_gain[2 * 4 + 3]); 421 rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225_gain[2 * 4 + 3]);
426 rtl8225_write_phy_ofdm(dev, 0x23, rtl8225_gain[2 * 4 + 1]); 422 rtl8225_write_phy_ofdm(dev, 0x23, rtl8225_gain[2 * 4 + 1]);
427 423
428 rtl8225_write_phy_cck(dev, 0x00, 0x98); msleep(1); 424 rtl8225_write_phy_cck(dev, 0x00, 0x98);
429 rtl8225_write_phy_cck(dev, 0x03, 0x20); msleep(1); 425 rtl8225_write_phy_cck(dev, 0x03, 0x20);
430 rtl8225_write_phy_cck(dev, 0x04, 0x7e); msleep(1); 426 rtl8225_write_phy_cck(dev, 0x04, 0x7e);
431 rtl8225_write_phy_cck(dev, 0x05, 0x12); msleep(1); 427 rtl8225_write_phy_cck(dev, 0x05, 0x12);
432 rtl8225_write_phy_cck(dev, 0x06, 0xfc); msleep(1); 428 rtl8225_write_phy_cck(dev, 0x06, 0xfc);
433 rtl8225_write_phy_cck(dev, 0x07, 0x78); msleep(1); 429 rtl8225_write_phy_cck(dev, 0x07, 0x78);
434 rtl8225_write_phy_cck(dev, 0x08, 0x2e); msleep(1); 430 rtl8225_write_phy_cck(dev, 0x08, 0x2e);
435 rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1); 431 rtl8225_write_phy_cck(dev, 0x10, 0x9b);
436 rtl8225_write_phy_cck(dev, 0x11, 0x88); msleep(1); 432 rtl8225_write_phy_cck(dev, 0x11, 0x88);
437 rtl8225_write_phy_cck(dev, 0x12, 0x47); msleep(1); 433 rtl8225_write_phy_cck(dev, 0x12, 0x47);
438 rtl8225_write_phy_cck(dev, 0x13, 0xd0); 434 rtl8225_write_phy_cck(dev, 0x13, 0xd0);
439 rtl8225_write_phy_cck(dev, 0x19, 0x00); 435 rtl8225_write_phy_cck(dev, 0x19, 0x00);
440 rtl8225_write_phy_cck(dev, 0x1a, 0xa0); 436 rtl8225_write_phy_cck(dev, 0x1a, 0xa0);
441 rtl8225_write_phy_cck(dev, 0x1b, 0x08); 437 rtl8225_write_phy_cck(dev, 0x1b, 0x08);
442 rtl8225_write_phy_cck(dev, 0x40, 0x86); 438 rtl8225_write_phy_cck(dev, 0x40, 0x86);
443 rtl8225_write_phy_cck(dev, 0x41, 0x8d); msleep(1); 439 rtl8225_write_phy_cck(dev, 0x41, 0x8d);
444 rtl8225_write_phy_cck(dev, 0x42, 0x15); msleep(1); 440 rtl8225_write_phy_cck(dev, 0x42, 0x15);
445 rtl8225_write_phy_cck(dev, 0x43, 0x18); msleep(1); 441 rtl8225_write_phy_cck(dev, 0x43, 0x18);
446 rtl8225_write_phy_cck(dev, 0x44, 0x1f); msleep(1); 442 rtl8225_write_phy_cck(dev, 0x44, 0x1f);
447 rtl8225_write_phy_cck(dev, 0x45, 0x1e); msleep(1); 443 rtl8225_write_phy_cck(dev, 0x45, 0x1e);
448 rtl8225_write_phy_cck(dev, 0x46, 0x1a); msleep(1); 444 rtl8225_write_phy_cck(dev, 0x46, 0x1a);
449 rtl8225_write_phy_cck(dev, 0x47, 0x15); msleep(1); 445 rtl8225_write_phy_cck(dev, 0x47, 0x15);
450 rtl8225_write_phy_cck(dev, 0x48, 0x10); msleep(1); 446 rtl8225_write_phy_cck(dev, 0x48, 0x10);
451 rtl8225_write_phy_cck(dev, 0x49, 0x0a); msleep(1); 447 rtl8225_write_phy_cck(dev, 0x49, 0x0a);
452 rtl8225_write_phy_cck(dev, 0x4a, 0x05); msleep(1); 448 rtl8225_write_phy_cck(dev, 0x4a, 0x05);
453 rtl8225_write_phy_cck(dev, 0x4b, 0x02); msleep(1); 449 rtl8225_write_phy_cck(dev, 0x4b, 0x02);
454 rtl8225_write_phy_cck(dev, 0x4c, 0x05); msleep(1); 450 rtl8225_write_phy_cck(dev, 0x4c, 0x05);
455 451
456 rtl818x_iowrite8(priv, &priv->map->TESTR, 0x0D); 452 rtl818x_iowrite8(priv, &priv->map->TESTR, 0x0D);
457 453
458 rtl8225_rf_set_tx_power(dev, 1); 454 rtl8225_rf_set_tx_power(dev, 1);
459 455
460 /* RX antenna default to A */ 456 /* RX antenna default to A */
461 rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1); /* B: 0xDB */ 457 rtl8225_write_phy_cck(dev, 0x10, 0x9b); /* B: 0xDB */
462 rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1); /* B: 0x10 */ 458 rtl8225_write_phy_ofdm(dev, 0x26, 0x90); /* B: 0x10 */
463 459
464 rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */ 460 rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */
465 msleep(1); 461 msleep(1);
@@ -629,7 +625,7 @@ static void rtl8225z2_b_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
629 rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++); 625 rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++);
630 626
631 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 627 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
632 rtl8225z2_tx_gain_cck_ofdm[cck_power]); 628 rtl8225z2_tx_gain_cck_ofdm[cck_power] << 1);
633 msleep(1); 629 msleep(1);
634 630
635 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 631 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
@@ -687,22 +683,23 @@ static void rtl8225z2_rf_init(struct ieee80211_hw *dev)
687 struct rtl8187_priv *priv = dev->priv; 683 struct rtl8187_priv *priv = dev->priv;
688 int i; 684 int i;
689 685
690 rtl8225_write(dev, 0x0, 0x2BF); msleep(1); 686 rtl8225_write(dev, 0x0, 0x2BF);
691 rtl8225_write(dev, 0x1, 0xEE0); msleep(1); 687 rtl8225_write(dev, 0x1, 0xEE0);
692 rtl8225_write(dev, 0x2, 0x44D); msleep(1); 688 rtl8225_write(dev, 0x2, 0x44D);
693 rtl8225_write(dev, 0x3, 0x441); msleep(1); 689 rtl8225_write(dev, 0x3, 0x441);
694 rtl8225_write(dev, 0x4, 0x8C3); msleep(1); 690 rtl8225_write(dev, 0x4, 0x8C3);
695 rtl8225_write(dev, 0x5, 0xC72); msleep(1); 691 rtl8225_write(dev, 0x5, 0xC72);
696 rtl8225_write(dev, 0x6, 0x0E6); msleep(1); 692 rtl8225_write(dev, 0x6, 0x0E6);
697 rtl8225_write(dev, 0x7, 0x82A); msleep(1); 693 rtl8225_write(dev, 0x7, 0x82A);
698 rtl8225_write(dev, 0x8, 0x03F); msleep(1); 694 rtl8225_write(dev, 0x8, 0x03F);
699 rtl8225_write(dev, 0x9, 0x335); msleep(1); 695 rtl8225_write(dev, 0x9, 0x335);
700 rtl8225_write(dev, 0xa, 0x9D4); msleep(1); 696 rtl8225_write(dev, 0xa, 0x9D4);
701 rtl8225_write(dev, 0xb, 0x7BB); msleep(1); 697 rtl8225_write(dev, 0xb, 0x7BB);
702 rtl8225_write(dev, 0xc, 0x850); msleep(1); 698 rtl8225_write(dev, 0xc, 0x850);
703 rtl8225_write(dev, 0xd, 0xCDF); msleep(1); 699 rtl8225_write(dev, 0xd, 0xCDF);
704 rtl8225_write(dev, 0xe, 0x02B); msleep(1); 700 rtl8225_write(dev, 0xe, 0x02B);
705 rtl8225_write(dev, 0xf, 0x114); msleep(100); 701 rtl8225_write(dev, 0xf, 0x114);
702 msleep(100);
706 703
707 rtl8225_write(dev, 0x0, 0x1B7); 704 rtl8225_write(dev, 0x0, 0x1B7);
708 705
@@ -736,94 +733,92 @@ static void rtl8225z2_rf_init(struct ieee80211_hw *dev)
736 733
737 for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) { 734 for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) {
738 rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]); 735 rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]);
739 msleep(1);
740 rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i); 736 rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i);
741 msleep(1);
742 } 737 }
743 738
744 msleep(1); 739 msleep(1);
745 740
746 rtl8225_write_phy_ofdm(dev, 0x00, 0x01); msleep(1); 741 rtl8225_write_phy_ofdm(dev, 0x00, 0x01);
747 rtl8225_write_phy_ofdm(dev, 0x01, 0x02); msleep(1); 742 rtl8225_write_phy_ofdm(dev, 0x01, 0x02);
748 rtl8225_write_phy_ofdm(dev, 0x02, 0x42); msleep(1); 743 rtl8225_write_phy_ofdm(dev, 0x02, 0x42);
749 rtl8225_write_phy_ofdm(dev, 0x03, 0x00); msleep(1); 744 rtl8225_write_phy_ofdm(dev, 0x03, 0x00);
750 rtl8225_write_phy_ofdm(dev, 0x04, 0x00); msleep(1); 745 rtl8225_write_phy_ofdm(dev, 0x04, 0x00);
751 rtl8225_write_phy_ofdm(dev, 0x05, 0x00); msleep(1); 746 rtl8225_write_phy_ofdm(dev, 0x05, 0x00);
752 rtl8225_write_phy_ofdm(dev, 0x06, 0x40); msleep(1); 747 rtl8225_write_phy_ofdm(dev, 0x06, 0x40);
753 rtl8225_write_phy_ofdm(dev, 0x07, 0x00); msleep(1); 748 rtl8225_write_phy_ofdm(dev, 0x07, 0x00);
754 rtl8225_write_phy_ofdm(dev, 0x08, 0x40); msleep(1); 749 rtl8225_write_phy_ofdm(dev, 0x08, 0x40);
755 rtl8225_write_phy_ofdm(dev, 0x09, 0xfe); msleep(1); 750 rtl8225_write_phy_ofdm(dev, 0x09, 0xfe);
756 rtl8225_write_phy_ofdm(dev, 0x0a, 0x08); msleep(1); 751 rtl8225_write_phy_ofdm(dev, 0x0a, 0x08);
757 rtl8225_write_phy_ofdm(dev, 0x0b, 0x80); msleep(1); 752 rtl8225_write_phy_ofdm(dev, 0x0b, 0x80);
758 rtl8225_write_phy_ofdm(dev, 0x0c, 0x01); msleep(1); 753 rtl8225_write_phy_ofdm(dev, 0x0c, 0x01);
759 rtl8225_write_phy_ofdm(dev, 0x0d, 0x43); 754 rtl8225_write_phy_ofdm(dev, 0x0d, 0x43);
760 rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3); msleep(1); 755 rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3);
761 rtl8225_write_phy_ofdm(dev, 0x0f, 0x38); msleep(1); 756 rtl8225_write_phy_ofdm(dev, 0x0f, 0x38);
762 rtl8225_write_phy_ofdm(dev, 0x10, 0x84); msleep(1); 757 rtl8225_write_phy_ofdm(dev, 0x10, 0x84);
763 rtl8225_write_phy_ofdm(dev, 0x11, 0x07); msleep(1); 758 rtl8225_write_phy_ofdm(dev, 0x11, 0x07);
764 rtl8225_write_phy_ofdm(dev, 0x12, 0x20); msleep(1); 759 rtl8225_write_phy_ofdm(dev, 0x12, 0x20);
765 rtl8225_write_phy_ofdm(dev, 0x13, 0x20); msleep(1); 760 rtl8225_write_phy_ofdm(dev, 0x13, 0x20);
766 rtl8225_write_phy_ofdm(dev, 0x14, 0x00); msleep(1); 761 rtl8225_write_phy_ofdm(dev, 0x14, 0x00);
767 rtl8225_write_phy_ofdm(dev, 0x15, 0x40); msleep(1); 762 rtl8225_write_phy_ofdm(dev, 0x15, 0x40);
768 rtl8225_write_phy_ofdm(dev, 0x16, 0x00); msleep(1); 763 rtl8225_write_phy_ofdm(dev, 0x16, 0x00);
769 rtl8225_write_phy_ofdm(dev, 0x17, 0x40); msleep(1); 764 rtl8225_write_phy_ofdm(dev, 0x17, 0x40);
770 rtl8225_write_phy_ofdm(dev, 0x18, 0xef); msleep(1); 765 rtl8225_write_phy_ofdm(dev, 0x18, 0xef);
771 rtl8225_write_phy_ofdm(dev, 0x19, 0x19); msleep(1); 766 rtl8225_write_phy_ofdm(dev, 0x19, 0x19);
772 rtl8225_write_phy_ofdm(dev, 0x1a, 0x20); msleep(1); 767 rtl8225_write_phy_ofdm(dev, 0x1a, 0x20);
773 rtl8225_write_phy_ofdm(dev, 0x1b, 0x15); msleep(1); 768 rtl8225_write_phy_ofdm(dev, 0x1b, 0x15);
774 rtl8225_write_phy_ofdm(dev, 0x1c, 0x04); msleep(1); 769 rtl8225_write_phy_ofdm(dev, 0x1c, 0x04);
775 rtl8225_write_phy_ofdm(dev, 0x1d, 0xc5); msleep(1); 770 rtl8225_write_phy_ofdm(dev, 0x1d, 0xc5);
776 rtl8225_write_phy_ofdm(dev, 0x1e, 0x95); msleep(1); 771 rtl8225_write_phy_ofdm(dev, 0x1e, 0x95);
777 rtl8225_write_phy_ofdm(dev, 0x1f, 0x75); msleep(1); 772 rtl8225_write_phy_ofdm(dev, 0x1f, 0x75);
778 rtl8225_write_phy_ofdm(dev, 0x20, 0x1f); msleep(1); 773 rtl8225_write_phy_ofdm(dev, 0x20, 0x1f);
779 rtl8225_write_phy_ofdm(dev, 0x21, 0x17); msleep(1); 774 rtl8225_write_phy_ofdm(dev, 0x21, 0x17);
780 rtl8225_write_phy_ofdm(dev, 0x22, 0x16); msleep(1); 775 rtl8225_write_phy_ofdm(dev, 0x22, 0x16);
781 rtl8225_write_phy_ofdm(dev, 0x23, 0x80); msleep(1); //FIXME: not needed? 776 rtl8225_write_phy_ofdm(dev, 0x23, 0x80);
782 rtl8225_write_phy_ofdm(dev, 0x24, 0x46); msleep(1); 777 rtl8225_write_phy_ofdm(dev, 0x24, 0x46);
783 rtl8225_write_phy_ofdm(dev, 0x25, 0x00); msleep(1); 778 rtl8225_write_phy_ofdm(dev, 0x25, 0x00);
784 rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1); 779 rtl8225_write_phy_ofdm(dev, 0x26, 0x90);
785 rtl8225_write_phy_ofdm(dev, 0x27, 0x88); msleep(1); 780 rtl8225_write_phy_ofdm(dev, 0x27, 0x88);
786 781
787 rtl8225_write_phy_ofdm(dev, 0x0b, rtl8225z2_gain_bg[4 * 3]); 782 rtl8225_write_phy_ofdm(dev, 0x0b, rtl8225z2_gain_bg[4 * 3]);
788 rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225z2_gain_bg[4 * 3 + 1]); 783 rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225z2_gain_bg[4 * 3 + 1]);
789 rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225z2_gain_bg[4 * 3 + 2]); 784 rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225z2_gain_bg[4 * 3 + 2]);
790 rtl8225_write_phy_ofdm(dev, 0x21, 0x37); 785 rtl8225_write_phy_ofdm(dev, 0x21, 0x37);
791 786
792 rtl8225_write_phy_cck(dev, 0x00, 0x98); msleep(1); 787 rtl8225_write_phy_cck(dev, 0x00, 0x98);
793 rtl8225_write_phy_cck(dev, 0x03, 0x20); msleep(1); 788 rtl8225_write_phy_cck(dev, 0x03, 0x20);
794 rtl8225_write_phy_cck(dev, 0x04, 0x7e); msleep(1); 789 rtl8225_write_phy_cck(dev, 0x04, 0x7e);
795 rtl8225_write_phy_cck(dev, 0x05, 0x12); msleep(1); 790 rtl8225_write_phy_cck(dev, 0x05, 0x12);
796 rtl8225_write_phy_cck(dev, 0x06, 0xfc); msleep(1); 791 rtl8225_write_phy_cck(dev, 0x06, 0xfc);
797 rtl8225_write_phy_cck(dev, 0x07, 0x78); msleep(1); 792 rtl8225_write_phy_cck(dev, 0x07, 0x78);
798 rtl8225_write_phy_cck(dev, 0x08, 0x2e); msleep(1); 793 rtl8225_write_phy_cck(dev, 0x08, 0x2e);
799 rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1); 794 rtl8225_write_phy_cck(dev, 0x10, 0x9b);
800 rtl8225_write_phy_cck(dev, 0x11, 0x88); msleep(1); 795 rtl8225_write_phy_cck(dev, 0x11, 0x88);
801 rtl8225_write_phy_cck(dev, 0x12, 0x47); msleep(1); 796 rtl8225_write_phy_cck(dev, 0x12, 0x47);
802 rtl8225_write_phy_cck(dev, 0x13, 0xd0); 797 rtl8225_write_phy_cck(dev, 0x13, 0xd0);
803 rtl8225_write_phy_cck(dev, 0x19, 0x00); 798 rtl8225_write_phy_cck(dev, 0x19, 0x00);
804 rtl8225_write_phy_cck(dev, 0x1a, 0xa0); 799 rtl8225_write_phy_cck(dev, 0x1a, 0xa0);
805 rtl8225_write_phy_cck(dev, 0x1b, 0x08); 800 rtl8225_write_phy_cck(dev, 0x1b, 0x08);
806 rtl8225_write_phy_cck(dev, 0x40, 0x86); 801 rtl8225_write_phy_cck(dev, 0x40, 0x86);
807 rtl8225_write_phy_cck(dev, 0x41, 0x8d); msleep(1); 802 rtl8225_write_phy_cck(dev, 0x41, 0x8d);
808 rtl8225_write_phy_cck(dev, 0x42, 0x15); msleep(1); 803 rtl8225_write_phy_cck(dev, 0x42, 0x15);
809 rtl8225_write_phy_cck(dev, 0x43, 0x18); msleep(1); 804 rtl8225_write_phy_cck(dev, 0x43, 0x18);
810 rtl8225_write_phy_cck(dev, 0x44, 0x36); msleep(1); 805 rtl8225_write_phy_cck(dev, 0x44, 0x36);
811 rtl8225_write_phy_cck(dev, 0x45, 0x35); msleep(1); 806 rtl8225_write_phy_cck(dev, 0x45, 0x35);
812 rtl8225_write_phy_cck(dev, 0x46, 0x2e); msleep(1); 807 rtl8225_write_phy_cck(dev, 0x46, 0x2e);
813 rtl8225_write_phy_cck(dev, 0x47, 0x25); msleep(1); 808 rtl8225_write_phy_cck(dev, 0x47, 0x25);
814 rtl8225_write_phy_cck(dev, 0x48, 0x1c); msleep(1); 809 rtl8225_write_phy_cck(dev, 0x48, 0x1c);
815 rtl8225_write_phy_cck(dev, 0x49, 0x12); msleep(1); 810 rtl8225_write_phy_cck(dev, 0x49, 0x12);
816 rtl8225_write_phy_cck(dev, 0x4a, 0x09); msleep(1); 811 rtl8225_write_phy_cck(dev, 0x4a, 0x09);
817 rtl8225_write_phy_cck(dev, 0x4b, 0x04); msleep(1); 812 rtl8225_write_phy_cck(dev, 0x4b, 0x04);
818 rtl8225_write_phy_cck(dev, 0x4c, 0x05); msleep(1); 813 rtl8225_write_phy_cck(dev, 0x4c, 0x05);
819 814
820 rtl818x_iowrite8(priv, (u8 *)0xFF5B, 0x0D); msleep(1); 815 rtl818x_iowrite8(priv, (u8 *)0xFF5B, 0x0D); msleep(1);
821 816
822 rtl8225z2_rf_set_tx_power(dev, 1); 817 rtl8225z2_rf_set_tx_power(dev, 1);
823 818
824 /* RX antenna default to A */ 819 /* RX antenna default to A */
825 rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1); /* B: 0xDB */ 820 rtl8225_write_phy_cck(dev, 0x10, 0x9b); /* B: 0xDB */
826 rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1); /* B: 0x10 */ 821 rtl8225_write_phy_ofdm(dev, 0x26, 0x90); /* B: 0x10 */
827 822
828 rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */ 823 rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */
829 msleep(1); 824 msleep(1);
@@ -835,40 +830,38 @@ static void rtl8225z2_b_rf_init(struct ieee80211_hw *dev)
835 struct rtl8187_priv *priv = dev->priv; 830 struct rtl8187_priv *priv = dev->priv;
836 int i; 831 int i;
837 832
838 rtl8225_write(dev, 0x0, 0x0B7); msleep(1); 833 rtl8225_write(dev, 0x0, 0x0B7);
839 rtl8225_write(dev, 0x1, 0xEE0); msleep(1); 834 rtl8225_write(dev, 0x1, 0xEE0);
840 rtl8225_write(dev, 0x2, 0x44D); msleep(1); 835 rtl8225_write(dev, 0x2, 0x44D);
841 rtl8225_write(dev, 0x3, 0x441); msleep(1); 836 rtl8225_write(dev, 0x3, 0x441);
842 rtl8225_write(dev, 0x4, 0x8C3); msleep(1); 837 rtl8225_write(dev, 0x4, 0x8C3);
843 rtl8225_write(dev, 0x5, 0xC72); msleep(1); 838 rtl8225_write(dev, 0x5, 0xC72);
844 rtl8225_write(dev, 0x6, 0x0E6); msleep(1); 839 rtl8225_write(dev, 0x6, 0x0E6);
845 rtl8225_write(dev, 0x7, 0x82A); msleep(1); 840 rtl8225_write(dev, 0x7, 0x82A);
846 rtl8225_write(dev, 0x8, 0x03F); msleep(1); 841 rtl8225_write(dev, 0x8, 0x03F);
847 rtl8225_write(dev, 0x9, 0x335); msleep(1); 842 rtl8225_write(dev, 0x9, 0x335);
848 rtl8225_write(dev, 0xa, 0x9D4); msleep(1); 843 rtl8225_write(dev, 0xa, 0x9D4);
849 rtl8225_write(dev, 0xb, 0x7BB); msleep(1); 844 rtl8225_write(dev, 0xb, 0x7BB);
850 rtl8225_write(dev, 0xc, 0x850); msleep(1); 845 rtl8225_write(dev, 0xc, 0x850);
851 rtl8225_write(dev, 0xd, 0xCDF); msleep(1); 846 rtl8225_write(dev, 0xd, 0xCDF);
852 rtl8225_write(dev, 0xe, 0x02B); msleep(1); 847 rtl8225_write(dev, 0xe, 0x02B);
853 rtl8225_write(dev, 0xf, 0x114); msleep(1); 848 rtl8225_write(dev, 0xf, 0x114);
854 849
855 rtl8225_write(dev, 0x0, 0x1B7); msleep(1); 850 rtl8225_write(dev, 0x0, 0x1B7);
856 851
857 for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) { 852 for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) {
858 rtl8225_write(dev, 0x1, i + 1); msleep(1); 853 rtl8225_write(dev, 0x1, i + 1);
859 rtl8225_write(dev, 0x2, rtl8225z2_rxgain[i]); msleep(1); 854 rtl8225_write(dev, 0x2, rtl8225z2_rxgain[i]);
860 } 855 }
861 856
862 rtl8225_write(dev, 0x3, 0x080); msleep(1); 857 rtl8225_write(dev, 0x3, 0x080);
863 rtl8225_write(dev, 0x5, 0x004); msleep(1); 858 rtl8225_write(dev, 0x5, 0x004);
864 rtl8225_write(dev, 0x0, 0x0B7); msleep(1); 859 rtl8225_write(dev, 0x0, 0x0B7);
865 msleep(3000);
866 860
867 rtl8225_write(dev, 0x2, 0xC4D); msleep(1); 861 rtl8225_write(dev, 0x2, 0xC4D);
868 msleep(2000);
869 862
870 rtl8225_write(dev, 0x2, 0x44D); msleep(1); 863 rtl8225_write(dev, 0x2, 0x44D);
871 rtl8225_write(dev, 0x0, 0x2BF); msleep(1); 864 rtl8225_write(dev, 0x0, 0x2BF);
872 865
873 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x03); 866 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x03);
874 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x07); 867 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x07);
@@ -885,24 +878,10 @@ static void rtl8225z2_b_rf_init(struct ieee80211_hw *dev)
885 for (i = 0; i < ARRAY_SIZE(rtl8225z2_ofdm); i++) 878 for (i = 0; i < ARRAY_SIZE(rtl8225z2_ofdm); i++)
886 rtl8225_write_phy_ofdm(dev, i, rtl8225z2_ofdm[i]); 879 rtl8225_write_phy_ofdm(dev, i, rtl8225z2_ofdm[i]);
887 880
888 rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22); 881 rtl8225_write_phy_ofdm(dev, 0x97, 0x46);
889 rtl818x_iowrite8(priv, &priv->map->SLOT, 9); 882 rtl8225_write_phy_ofdm(dev, 0xa4, 0xb6);
890 rtl818x_iowrite8(priv, (u8 *)0xFFF0, 28); 883 rtl8225_write_phy_ofdm(dev, 0x85, 0xfc);
891 rtl818x_iowrite8(priv, (u8 *)0xFFF4, 28); 884 rtl8225_write_phy_cck(dev, 0xc1, 0x88);
892 rtl818x_iowrite8(priv, (u8 *)0xFFF8, 28);
893 rtl818x_iowrite8(priv, (u8 *)0xFFFC, 28);
894 rtl818x_iowrite8(priv, (u8 *)0xFF2D, 0x5B);
895 rtl818x_iowrite8(priv, (u8 *)0xFF79, 0x5B);
896 rtl818x_iowrite32(priv, (__le32 *)0xFFF0, (7 << 12) | (3 << 8) | 28);
897 rtl818x_iowrite32(priv, (__le32 *)0xFFF4, (7 << 12) | (3 << 8) | 28);
898 rtl818x_iowrite32(priv, (__le32 *)0xFFF8, (7 << 12) | (3 << 8) | 28);
899 rtl818x_iowrite32(priv, (__le32 *)0xFFFC, (7 << 12) | (3 << 8) | 28);
900 rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0);
901
902 rtl8225_write_phy_ofdm(dev, 0x97, 0x46); msleep(1);
903 rtl8225_write_phy_ofdm(dev, 0xa4, 0xb6); msleep(1);
904 rtl8225_write_phy_ofdm(dev, 0x85, 0xfc); msleep(1);
905 rtl8225_write_phy_cck(dev, 0xc1, 0x88); msleep(1);
906} 885}
907 886
908static void rtl8225_rf_stop(struct ieee80211_hw *dev) 887static void rtl8225_rf_stop(struct ieee80211_hw *dev)
@@ -910,7 +889,7 @@ static void rtl8225_rf_stop(struct ieee80211_hw *dev)
910 u8 reg; 889 u8 reg;
911 struct rtl8187_priv *priv = dev->priv; 890 struct rtl8187_priv *priv = dev->priv;
912 891
913 rtl8225_write(dev, 0x4, 0x1f); msleep(1); 892 rtl8225_write(dev, 0x4, 0x1f);
914 893
915 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 894 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
916 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); 895 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
diff --git a/drivers/net/wireless/rtl8187_rtl8225.h b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.h
index 20c5b6ead0f6..20c5b6ead0f6 100644
--- a/drivers/net/wireless/rtl8187_rtl8225.h
+++ b/drivers/net/wireless/rtl818x/rtl8187_rtl8225.h
diff --git a/drivers/net/wireless/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 3538b15211b1..34a5555cc19c 100644
--- a/drivers/net/wireless/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -191,6 +191,7 @@ struct rtl818x_rf_ops {
191 void (*init)(struct ieee80211_hw *); 191 void (*init)(struct ieee80211_hw *);
192 void (*stop)(struct ieee80211_hw *); 192 void (*stop)(struct ieee80211_hw *);
193 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *); 193 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *);
194 void (*conf_erp)(struct ieee80211_hw *, struct ieee80211_bss_conf *);
194}; 195};
195 196
196/* Tx/Rx flags are common between RTL818X chips */ 197/* Tx/Rx flags are common between RTL818X chips */
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 417e9e675fac..dd0de3a9ed4e 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -1234,7 +1234,7 @@ static void ResetRadio(struct strip *strip_info)
1234 1234
1235static void strip_write_some_more(struct tty_struct *tty) 1235static void strip_write_some_more(struct tty_struct *tty)
1236{ 1236{
1237 struct strip *strip_info = (struct strip *) tty->disc_data; 1237 struct strip *strip_info = tty->disc_data;
1238 1238
1239 /* First make sure we're connected. */ 1239 /* First make sure we're connected. */
1240 if (!strip_info || strip_info->magic != STRIP_MAGIC || 1240 if (!strip_info || strip_info->magic != STRIP_MAGIC ||
@@ -1252,7 +1252,7 @@ static void strip_write_some_more(struct tty_struct *tty)
1252#endif 1252#endif
1253 } else { /* Else start transmission of another packet */ 1253 } else { /* Else start transmission of another packet */
1254 1254
1255 tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 1255 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
1256 strip_unlock(strip_info); 1256 strip_unlock(strip_info);
1257 } 1257 }
1258} 1258}
@@ -1455,8 +1455,7 @@ static void strip_send(struct strip *strip_info, struct sk_buff *skb)
1455 */ 1455 */
1456 strip_info->tx_head = strip_info->tx_buff; 1456 strip_info->tx_head = strip_info->tx_buff;
1457 strip_info->tx_left = ptr - strip_info->tx_buff; 1457 strip_info->tx_left = ptr - strip_info->tx_buff;
1458 strip_info->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); 1458 set_bit(TTY_DO_WRITE_WAKEUP, &strip_info->tty->flags);
1459
1460 /* 1459 /*
1461 * 4. Debugging check to make sure we're not overflowing the buffer. 1460 * 4. Debugging check to make sure we're not overflowing the buffer.
1462 */ 1461 */
@@ -1997,7 +1996,6 @@ static void deliver_packet(struct strip *strip_info, STRIP_Header * header,
1997#ifdef EXT_COUNTERS 1996#ifdef EXT_COUNTERS
1998 strip_info->rx_bytes += packetlen; 1997 strip_info->rx_bytes += packetlen;
1999#endif 1998#endif
2000 skb->dev->last_rx = jiffies;
2001 netif_rx(skb); 1999 netif_rx(skb);
2002 } 2000 }
2003} 2001}
@@ -2261,7 +2259,7 @@ static void process_message(struct strip *strip_info)
2261static void strip_receive_buf(struct tty_struct *tty, const unsigned char *cp, 2259static void strip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
2262 char *fp, int count) 2260 char *fp, int count)
2263{ 2261{
2264 struct strip *strip_info = (struct strip *) tty->disc_data; 2262 struct strip *strip_info = tty->disc_data;
2265 const unsigned char *end = cp + count; 2263 const unsigned char *end = cp + count;
2266 2264
2267 if (!strip_info || strip_info->magic != STRIP_MAGIC 2265 if (!strip_info || strip_info->magic != STRIP_MAGIC
@@ -2455,8 +2453,7 @@ static int strip_close_low(struct net_device *dev)
2455 2453
2456 if (strip_info->tty == NULL) 2454 if (strip_info->tty == NULL)
2457 return -EBUSY; 2455 return -EBUSY;
2458 strip_info->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); 2456 clear_bit(TTY_DO_WRITE_WAKEUP, &strip_info->tty->flags);
2459
2460 netif_stop_queue(dev); 2457 netif_stop_queue(dev);
2461 2458
2462 /* 2459 /*
@@ -2490,7 +2487,6 @@ static void strip_dev_setup(struct net_device *dev)
2490 */ 2487 */
2491 2488
2492 dev->trans_start = 0; 2489 dev->trans_start = 0;
2493 dev->last_rx = 0;
2494 dev->tx_queue_len = 30; /* Drop after 30 frames queued */ 2490 dev->tx_queue_len = 30; /* Drop after 30 frames queued */
2495 2491
2496 dev->flags = 0; 2492 dev->flags = 0;
@@ -2498,7 +2494,7 @@ static void strip_dev_setup(struct net_device *dev)
2498 dev->type = ARPHRD_METRICOM; /* dtang */ 2494 dev->type = ARPHRD_METRICOM; /* dtang */
2499 dev->hard_header_len = sizeof(STRIP_Header); 2495 dev->hard_header_len = sizeof(STRIP_Header);
2500 /* 2496 /*
2501 * dev->priv Already holds a pointer to our struct strip 2497 * netdev_priv(dev) Already holds a pointer to our struct strip
2502 */ 2498 */
2503 2499
2504 *(MetricomAddress *) & dev->broadcast = broadcast_address; 2500 *(MetricomAddress *) & dev->broadcast = broadcast_address;
@@ -2598,7 +2594,7 @@ static struct strip *strip_alloc(void)
2598 2594
2599static int strip_open(struct tty_struct *tty) 2595static int strip_open(struct tty_struct *tty)
2600{ 2596{
2601 struct strip *strip_info = (struct strip *) tty->disc_data; 2597 struct strip *strip_info = tty->disc_data;
2602 2598
2603 /* 2599 /*
2604 * First make sure we're not already connected. 2600 * First make sure we're not already connected.
@@ -2669,7 +2665,7 @@ static int strip_open(struct tty_struct *tty)
2669 2665
2670static void strip_close(struct tty_struct *tty) 2666static void strip_close(struct tty_struct *tty)
2671{ 2667{
2672 struct strip *strip_info = (struct strip *) tty->disc_data; 2668 struct strip *strip_info = tty->disc_data;
2673 2669
2674 /* 2670 /*
2675 * First make sure we're connected. 2671 * First make sure we're connected.
@@ -2695,7 +2691,7 @@ static void strip_close(struct tty_struct *tty)
2695static int strip_ioctl(struct tty_struct *tty, struct file *file, 2691static int strip_ioctl(struct tty_struct *tty, struct file *file,
2696 unsigned int cmd, unsigned long arg) 2692 unsigned int cmd, unsigned long arg)
2697{ 2693{
2698 struct strip *strip_info = (struct strip *) tty->disc_data; 2694 struct strip *strip_info = tty->disc_data;
2699 2695
2700 /* 2696 /*
2701 * First make sure we're connected. 2697 * First make sure we're connected.
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index e939a73ff794..832679396b6c 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -134,7 +134,7 @@ static inline void wv_16_on(unsigned long ioaddr, u16 hacr)
134 */ 134 */
135static inline void wv_ints_off(struct net_device * dev) 135static inline void wv_ints_off(struct net_device * dev)
136{ 136{
137 net_local *lp = (net_local *) dev->priv; 137 net_local *lp = netdev_priv(dev);
138 unsigned long ioaddr = dev->base_addr; 138 unsigned long ioaddr = dev->base_addr;
139 139
140 lp->hacr &= ~HACR_INTRON; 140 lp->hacr &= ~HACR_INTRON;
@@ -148,7 +148,7 @@ static inline void wv_ints_off(struct net_device * dev)
148 */ 148 */
149static inline void wv_ints_on(struct net_device * dev) 149static inline void wv_ints_on(struct net_device * dev)
150{ 150{
151 net_local *lp = (net_local *) dev->priv; 151 net_local *lp = netdev_priv(dev);
152 unsigned long ioaddr = dev->base_addr; 152 unsigned long ioaddr = dev->base_addr;
153 153
154 lp->hacr |= HACR_INTRON; 154 lp->hacr |= HACR_INTRON;
@@ -526,7 +526,7 @@ static inline void obram_write(unsigned long ioaddr, u16 o, u8 * b, int n)
526 */ 526 */
527static void wv_ack(struct net_device * dev) 527static void wv_ack(struct net_device * dev)
528{ 528{
529 net_local *lp = (net_local *) dev->priv; 529 net_local *lp = netdev_priv(dev);
530 unsigned long ioaddr = dev->base_addr; 530 unsigned long ioaddr = dev->base_addr;
531 u16 scb_cs; 531 u16 scb_cs;
532 int i; 532 int i;
@@ -568,7 +568,7 @@ static void wv_ack(struct net_device * dev)
568 */ 568 */
569static int wv_synchronous_cmd(struct net_device * dev, const char *str) 569static int wv_synchronous_cmd(struct net_device * dev, const char *str)
570{ 570{
571 net_local *lp = (net_local *) dev->priv; 571 net_local *lp = netdev_priv(dev);
572 unsigned long ioaddr = dev->base_addr; 572 unsigned long ioaddr = dev->base_addr;
573 u16 scb_cmd; 573 u16 scb_cmd;
574 ach_t cb; 574 ach_t cb;
@@ -824,7 +824,7 @@ if (lp->tx_n_in_use > 0)
824 */ 824 */
825static void wv_82586_reconfig(struct net_device * dev) 825static void wv_82586_reconfig(struct net_device * dev)
826{ 826{
827 net_local *lp = (net_local *) dev->priv; 827 net_local *lp = netdev_priv(dev);
828 unsigned long flags; 828 unsigned long flags;
829 829
830 /* Arm the flag, will be cleard in wv_82586_config() */ 830 /* Arm the flag, will be cleard in wv_82586_config() */
@@ -859,8 +859,6 @@ static void wv_82586_reconfig(struct net_device * dev)
859 */ 859 */
860static void wv_psa_show(psa_t * p) 860static void wv_psa_show(psa_t * p)
861{ 861{
862 DECLARE_MAC_BUF(mac);
863
864 printk(KERN_DEBUG "##### WaveLAN PSA contents: #####\n"); 862 printk(KERN_DEBUG "##### WaveLAN PSA contents: #####\n");
865 printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n", 863 printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n",
866 p->psa_io_base_addr_1, 864 p->psa_io_base_addr_1,
@@ -872,13 +870,10 @@ static void wv_psa_show(psa_t * p)
872 printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params); 870 printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params);
873 printk("psa_int_req_no: %d\n", p->psa_int_req_no); 871 printk("psa_int_req_no: %d\n", p->psa_int_req_no);
874#ifdef DEBUG_SHOW_UNUSED 872#ifdef DEBUG_SHOW_UNUSED
875 printk(KERN_DEBUG "psa_unused0[]: %s\n", 873 printk(KERN_DEBUG "psa_unused0[]: %pM\n", p->psa_unused0);
876 print_mac(mac, p->psa_unused0));
877#endif /* DEBUG_SHOW_UNUSED */ 874#endif /* DEBUG_SHOW_UNUSED */
878 printk(KERN_DEBUG "psa_univ_mac_addr[]: %s\n", 875 printk(KERN_DEBUG "psa_univ_mac_addr[]: %pM\n", p->psa_univ_mac_addr);
879 print_mac(mac, p->psa_univ_mac_addr)); 876 printk(KERN_DEBUG "psa_local_mac_addr[]: %pM\n", p->psa_local_mac_addr);
880 printk(KERN_DEBUG "psa_local_mac_addr[]: %s\n",
881 print_mac(mac, p->psa_local_mac_addr));
882 printk(KERN_DEBUG "psa_univ_local_sel: %d, ", 877 printk(KERN_DEBUG "psa_univ_local_sel: %d, ",
883 p->psa_univ_local_sel); 878 p->psa_univ_local_sel);
884 printk("psa_comp_number: %d, ", p->psa_comp_number); 879 printk("psa_comp_number: %d, ", p->psa_comp_number);
@@ -927,7 +922,7 @@ static void wv_psa_show(psa_t * p)
927static void wv_mmc_show(struct net_device * dev) 922static void wv_mmc_show(struct net_device * dev)
928{ 923{
929 unsigned long ioaddr = dev->base_addr; 924 unsigned long ioaddr = dev->base_addr;
930 net_local *lp = (net_local *) dev->priv; 925 net_local *lp = netdev_priv(dev);
931 mmr_t m; 926 mmr_t m;
932 927
933 /* Basic check */ 928 /* Basic check */
@@ -1107,8 +1102,6 @@ static void wv_scb_show(unsigned long ioaddr)
1107 */ 1102 */
1108static void wv_ru_show(struct net_device * dev) 1103static void wv_ru_show(struct net_device * dev)
1109{ 1104{
1110 /* net_local *lp = (net_local *) dev->priv; */
1111
1112 printk(KERN_DEBUG 1105 printk(KERN_DEBUG
1113 "##### WaveLAN i82586 receiver unit status: #####\n"); 1106 "##### WaveLAN i82586 receiver unit status: #####\n");
1114 printk(KERN_DEBUG "ru:"); 1107 printk(KERN_DEBUG "ru:");
@@ -1153,7 +1146,7 @@ static void wv_cu_show_one(struct net_device * dev, net_local * lp, int i, u16 p
1153 */ 1146 */
1154static void wv_cu_show(struct net_device * dev) 1147static void wv_cu_show(struct net_device * dev)
1155{ 1148{
1156 net_local *lp = (net_local *) dev->priv; 1149 net_local *lp = netdev_priv(dev);
1157 unsigned int i; 1150 unsigned int i;
1158 u16 p; 1151 u16 p;
1159 1152
@@ -1195,7 +1188,7 @@ static void wv_local_show(struct net_device * dev)
1195{ 1188{
1196 net_local *lp; 1189 net_local *lp;
1197 1190
1198 lp = (net_local *) dev->priv; 1191 lp = netdev_priv(dev);
1199 1192
1200 printk(KERN_DEBUG "local:"); 1193 printk(KERN_DEBUG "local:");
1201 printk(" tx_n_in_use=%d,", lp->tx_n_in_use); 1194 printk(" tx_n_in_use=%d,", lp->tx_n_in_use);
@@ -1220,14 +1213,13 @@ static inline void wv_packet_info(u8 * p, /* Packet to dump */
1220{ /* Name of the function */ 1213{ /* Name of the function */
1221 int i; 1214 int i;
1222 int maxi; 1215 int maxi;
1223 DECLARE_MAC_BUF(mac);
1224 1216
1225 printk(KERN_DEBUG 1217 printk(KERN_DEBUG
1226 "%s: %s(): dest %s, length %d\n", 1218 "%s: %s(): dest %pM, length %d\n",
1227 msg1, msg2, print_mac(mac, p), length); 1219 msg1, msg2, p, length);
1228 printk(KERN_DEBUG 1220 printk(KERN_DEBUG
1229 "%s: %s(): src %s, type 0x%02X%02X\n", 1221 "%s: %s(): src %pM, type 0x%02X%02X\n",
1230 msg1, msg2, print_mac(mac, &p[6]), p[12], p[13]); 1222 msg1, msg2, &p[6], p[12], p[13]);
1231 1223
1232#ifdef DEBUG_PACKET_DUMP 1224#ifdef DEBUG_PACKET_DUMP
1233 1225
@@ -1256,11 +1248,8 @@ static inline void wv_packet_info(u8 * p, /* Packet to dump */
1256static void wv_init_info(struct net_device * dev) 1248static void wv_init_info(struct net_device * dev)
1257{ 1249{
1258 short ioaddr = dev->base_addr; 1250 short ioaddr = dev->base_addr;
1259 net_local *lp = (net_local *) dev->priv; 1251 net_local *lp = netdev_priv(dev);
1260 psa_t psa; 1252 psa_t psa;
1261#ifdef DEBUG_BASIC_SHOW
1262 DECLARE_MAC_BUF(mac);
1263#endif
1264 1253
1265 /* Read the parameter storage area */ 1254 /* Read the parameter storage area */
1266 psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa)); 1255 psa_read(ioaddr, lp->hacr, 0, (unsigned char *) &psa, sizeof(psa));
@@ -1277,8 +1266,8 @@ static void wv_init_info(struct net_device * dev)
1277 1266
1278#ifdef DEBUG_BASIC_SHOW 1267#ifdef DEBUG_BASIC_SHOW
1279 /* Now, let's go for the basic stuff. */ 1268 /* Now, let's go for the basic stuff. */
1280 printk(KERN_NOTICE "%s: WaveLAN at %#x, %s, IRQ %d", 1269 printk(KERN_NOTICE "%s: WaveLAN at %#x, %pM, IRQ %d",
1281 dev->name, ioaddr, print_mac(mac, dev->dev_addr), dev->irq); 1270 dev->name, ioaddr, dev->dev_addr, dev->irq);
1282 1271
1283 /* Print current network ID. */ 1272 /* Print current network ID. */
1284 if (psa.psa_nwid_select) 1273 if (psa.psa_nwid_select)
@@ -1369,7 +1358,7 @@ static en_stats *wavelan_get_stats(struct net_device * dev)
1369 printk(KERN_DEBUG "%s: <>wavelan_get_stats()\n", dev->name); 1358 printk(KERN_DEBUG "%s: <>wavelan_get_stats()\n", dev->name);
1370#endif 1359#endif
1371 1360
1372 return (&((net_local *) dev->priv)->stats); 1361 return &((net_local *)netdev_priv(dev))->stats;
1373} 1362}
1374 1363
1375/*------------------------------------------------------------------*/ 1364/*------------------------------------------------------------------*/
@@ -1382,7 +1371,7 @@ static en_stats *wavelan_get_stats(struct net_device * dev)
1382 */ 1371 */
1383static void wavelan_set_multicast_list(struct net_device * dev) 1372static void wavelan_set_multicast_list(struct net_device * dev)
1384{ 1373{
1385 net_local *lp = (net_local *) dev->priv; 1374 net_local *lp = netdev_priv(dev);
1386 1375
1387#ifdef DEBUG_IOCTL_TRACE 1376#ifdef DEBUG_IOCTL_TRACE
1388 printk(KERN_DEBUG "%s: ->wavelan_set_multicast_list()\n", 1377 printk(KERN_DEBUG "%s: ->wavelan_set_multicast_list()\n",
@@ -1716,7 +1705,7 @@ static inline void wl_spy_gather(struct net_device * dev,
1716 */ 1705 */
1717static inline void wl_his_gather(struct net_device * dev, u8 * stats) 1706static inline void wl_his_gather(struct net_device * dev, u8 * stats)
1718{ /* Statistics to gather */ 1707{ /* Statistics to gather */
1719 net_local *lp = (net_local *) dev->priv; 1708 net_local *lp = netdev_priv(dev);
1720 u8 level = stats[0] & MMR_SIGNAL_LVL; 1709 u8 level = stats[0] & MMR_SIGNAL_LVL;
1721 int i; 1710 int i;
1722 1711
@@ -1753,7 +1742,7 @@ static int wavelan_set_nwid(struct net_device *dev,
1753 char *extra) 1742 char *extra)
1754{ 1743{
1755 unsigned long ioaddr = dev->base_addr; 1744 unsigned long ioaddr = dev->base_addr;
1756 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 1745 net_local *lp = netdev_priv(dev); /* lp is not unused */
1757 psa_t psa; 1746 psa_t psa;
1758 mm_t m; 1747 mm_t m;
1759 unsigned long flags; 1748 unsigned long flags;
@@ -1812,7 +1801,7 @@ static int wavelan_get_nwid(struct net_device *dev,
1812 char *extra) 1801 char *extra)
1813{ 1802{
1814 unsigned long ioaddr = dev->base_addr; 1803 unsigned long ioaddr = dev->base_addr;
1815 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 1804 net_local *lp = netdev_priv(dev); /* lp is not unused */
1816 psa_t psa; 1805 psa_t psa;
1817 unsigned long flags; 1806 unsigned long flags;
1818 int ret = 0; 1807 int ret = 0;
@@ -1844,7 +1833,7 @@ static int wavelan_set_freq(struct net_device *dev,
1844 char *extra) 1833 char *extra)
1845{ 1834{
1846 unsigned long ioaddr = dev->base_addr; 1835 unsigned long ioaddr = dev->base_addr;
1847 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 1836 net_local *lp = netdev_priv(dev); /* lp is not unused */
1848 unsigned long flags; 1837 unsigned long flags;
1849 int ret; 1838 int ret;
1850 1839
@@ -1874,7 +1863,7 @@ static int wavelan_get_freq(struct net_device *dev,
1874 char *extra) 1863 char *extra)
1875{ 1864{
1876 unsigned long ioaddr = dev->base_addr; 1865 unsigned long ioaddr = dev->base_addr;
1877 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 1866 net_local *lp = netdev_priv(dev); /* lp is not unused */
1878 psa_t psa; 1867 psa_t psa;
1879 unsigned long flags; 1868 unsigned long flags;
1880 int ret = 0; 1869 int ret = 0;
@@ -1920,7 +1909,7 @@ static int wavelan_set_sens(struct net_device *dev,
1920 char *extra) 1909 char *extra)
1921{ 1910{
1922 unsigned long ioaddr = dev->base_addr; 1911 unsigned long ioaddr = dev->base_addr;
1923 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 1912 net_local *lp = netdev_priv(dev); /* lp is not unused */
1924 psa_t psa; 1913 psa_t psa;
1925 unsigned long flags; 1914 unsigned long flags;
1926 int ret = 0; 1915 int ret = 0;
@@ -1956,7 +1945,7 @@ static int wavelan_get_sens(struct net_device *dev,
1956 char *extra) 1945 char *extra)
1957{ 1946{
1958 unsigned long ioaddr = dev->base_addr; 1947 unsigned long ioaddr = dev->base_addr;
1959 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 1948 net_local *lp = netdev_priv(dev); /* lp is not unused */
1960 psa_t psa; 1949 psa_t psa;
1961 unsigned long flags; 1950 unsigned long flags;
1962 int ret = 0; 1951 int ret = 0;
@@ -1987,7 +1976,7 @@ static int wavelan_set_encode(struct net_device *dev,
1987 char *extra) 1976 char *extra)
1988{ 1977{
1989 unsigned long ioaddr = dev->base_addr; 1978 unsigned long ioaddr = dev->base_addr;
1990 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 1979 net_local *lp = netdev_priv(dev); /* lp is not unused */
1991 unsigned long flags; 1980 unsigned long flags;
1992 psa_t psa; 1981 psa_t psa;
1993 int ret = 0; 1982 int ret = 0;
@@ -2057,7 +2046,7 @@ static int wavelan_get_encode(struct net_device *dev,
2057 char *extra) 2046 char *extra)
2058{ 2047{
2059 unsigned long ioaddr = dev->base_addr; 2048 unsigned long ioaddr = dev->base_addr;
2060 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 2049 net_local *lp = netdev_priv(dev); /* lp is not unused */
2061 psa_t psa; 2050 psa_t psa;
2062 unsigned long flags; 2051 unsigned long flags;
2063 int ret = 0; 2052 int ret = 0;
@@ -2104,7 +2093,7 @@ static int wavelan_get_range(struct net_device *dev,
2104 char *extra) 2093 char *extra)
2105{ 2094{
2106 unsigned long ioaddr = dev->base_addr; 2095 unsigned long ioaddr = dev->base_addr;
2107 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 2096 net_local *lp = netdev_priv(dev); /* lp is not unused */
2108 struct iw_range *range = (struct iw_range *) extra; 2097 struct iw_range *range = (struct iw_range *) extra;
2109 unsigned long flags; 2098 unsigned long flags;
2110 int ret = 0; 2099 int ret = 0;
@@ -2179,7 +2168,7 @@ static int wavelan_set_qthr(struct net_device *dev,
2179 char *extra) 2168 char *extra)
2180{ 2169{
2181 unsigned long ioaddr = dev->base_addr; 2170 unsigned long ioaddr = dev->base_addr;
2182 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 2171 net_local *lp = netdev_priv(dev); /* lp is not unused */
2183 psa_t psa; 2172 psa_t psa;
2184 unsigned long flags; 2173 unsigned long flags;
2185 2174
@@ -2211,7 +2200,7 @@ static int wavelan_get_qthr(struct net_device *dev,
2211 char *extra) 2200 char *extra)
2212{ 2201{
2213 unsigned long ioaddr = dev->base_addr; 2202 unsigned long ioaddr = dev->base_addr;
2214 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 2203 net_local *lp = netdev_priv(dev); /* lp is not unused */
2215 psa_t psa; 2204 psa_t psa;
2216 unsigned long flags; 2205 unsigned long flags;
2217 2206
@@ -2239,7 +2228,7 @@ static int wavelan_set_histo(struct net_device *dev,
2239 union iwreq_data *wrqu, 2228 union iwreq_data *wrqu,
2240 char *extra) 2229 char *extra)
2241{ 2230{
2242 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 2231 net_local *lp = netdev_priv(dev); /* lp is not unused */
2243 2232
2244 /* Check the number of intervals. */ 2233 /* Check the number of intervals. */
2245 if (wrqu->data.length > 16) { 2234 if (wrqu->data.length > 16) {
@@ -2282,7 +2271,7 @@ static int wavelan_get_histo(struct net_device *dev,
2282 union iwreq_data *wrqu, 2271 union iwreq_data *wrqu,
2283 char *extra) 2272 char *extra)
2284{ 2273{
2285 net_local *lp = (net_local *) dev->priv; /* lp is not unused */ 2274 net_local *lp = netdev_priv(dev); /* lp is not unused */
2286 2275
2287 /* Set the number of intervals. */ 2276 /* Set the number of intervals. */
2288 wrqu->data.length = lp->his_number; 2277 wrqu->data.length = lp->his_number;
@@ -2386,7 +2375,7 @@ static const struct iw_handler_def wavelan_handler_def =
2386static iw_stats *wavelan_get_wireless_stats(struct net_device * dev) 2375static iw_stats *wavelan_get_wireless_stats(struct net_device * dev)
2387{ 2376{
2388 unsigned long ioaddr = dev->base_addr; 2377 unsigned long ioaddr = dev->base_addr;
2389 net_local *lp = (net_local *) dev->priv; 2378 net_local *lp = netdev_priv(dev);
2390 mmr_t m; 2379 mmr_t m;
2391 iw_stats *wstats; 2380 iw_stats *wstats;
2392 unsigned long flags; 2381 unsigned long flags;
@@ -2461,7 +2450,7 @@ static iw_stats *wavelan_get_wireless_stats(struct net_device * dev)
2461static void 2450static void
2462wv_packet_read(struct net_device * dev, u16 buf_off, int sksize) 2451wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
2463{ 2452{
2464 net_local *lp = (net_local *) dev->priv; 2453 net_local *lp = netdev_priv(dev);
2465 unsigned long ioaddr = dev->base_addr; 2454 unsigned long ioaddr = dev->base_addr;
2466 struct sk_buff *skb; 2455 struct sk_buff *skb;
2467 2456
@@ -2537,7 +2526,6 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
2537 netif_rx(skb); 2526 netif_rx(skb);
2538 2527
2539 /* Keep statistics up to date */ 2528 /* Keep statistics up to date */
2540 dev->last_rx = jiffies;
2541 lp->stats.rx_packets++; 2529 lp->stats.rx_packets++;
2542 lp->stats.rx_bytes += sksize; 2530 lp->stats.rx_bytes += sksize;
2543 2531
@@ -2556,7 +2544,7 @@ wv_packet_read(struct net_device * dev, u16 buf_off, int sksize)
2556static void wv_receive(struct net_device * dev) 2544static void wv_receive(struct net_device * dev)
2557{ 2545{
2558 unsigned long ioaddr = dev->base_addr; 2546 unsigned long ioaddr = dev->base_addr;
2559 net_local *lp = (net_local *) dev->priv; 2547 net_local *lp = netdev_priv(dev);
2560 fd_t fd; 2548 fd_t fd;
2561 rbd_t rbd; 2549 rbd_t rbd;
2562 int nreaped = 0; 2550 int nreaped = 0;
@@ -2738,7 +2726,7 @@ static void wv_receive(struct net_device * dev)
2738 */ 2726 */
2739static int wv_packet_write(struct net_device * dev, void *buf, short length) 2727static int wv_packet_write(struct net_device * dev, void *buf, short length)
2740{ 2728{
2741 net_local *lp = (net_local *) dev->priv; 2729 net_local *lp = netdev_priv(dev);
2742 unsigned long ioaddr = dev->base_addr; 2730 unsigned long ioaddr = dev->base_addr;
2743 unsigned short txblock; 2731 unsigned short txblock;
2744 unsigned short txpred; 2732 unsigned short txpred;
@@ -2869,7 +2857,7 @@ static int wv_packet_write(struct net_device * dev, void *buf, short length)
2869 */ 2857 */
2870static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev) 2858static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
2871{ 2859{
2872 net_local *lp = (net_local *) dev->priv; 2860 net_local *lp = netdev_priv(dev);
2873 unsigned long flags; 2861 unsigned long flags;
2874 char data[ETH_ZLEN]; 2862 char data[ETH_ZLEN];
2875 2863
@@ -2937,7 +2925,7 @@ static int wavelan_packet_xmit(struct sk_buff *skb, struct net_device * dev)
2937static int wv_mmc_init(struct net_device * dev) 2925static int wv_mmc_init(struct net_device * dev)
2938{ 2926{
2939 unsigned long ioaddr = dev->base_addr; 2927 unsigned long ioaddr = dev->base_addr;
2940 net_local *lp = (net_local *) dev->priv; 2928 net_local *lp = netdev_priv(dev);
2941 psa_t psa; 2929 psa_t psa;
2942 mmw_t m; 2930 mmw_t m;
2943 int configured; 2931 int configured;
@@ -3108,7 +3096,7 @@ static int wv_mmc_init(struct net_device * dev)
3108 */ 3096 */
3109static int wv_ru_start(struct net_device * dev) 3097static int wv_ru_start(struct net_device * dev)
3110{ 3098{
3111 net_local *lp = (net_local *) dev->priv; 3099 net_local *lp = netdev_priv(dev);
3112 unsigned long ioaddr = dev->base_addr; 3100 unsigned long ioaddr = dev->base_addr;
3113 u16 scb_cs; 3101 u16 scb_cs;
3114 fd_t fd; 3102 fd_t fd;
@@ -3200,7 +3188,7 @@ static int wv_ru_start(struct net_device * dev)
3200 */ 3188 */
3201static int wv_cu_start(struct net_device * dev) 3189static int wv_cu_start(struct net_device * dev)
3202{ 3190{
3203 net_local *lp = (net_local *) dev->priv; 3191 net_local *lp = netdev_priv(dev);
3204 unsigned long ioaddr = dev->base_addr; 3192 unsigned long ioaddr = dev->base_addr;
3205 int i; 3193 int i;
3206 u16 txblock; 3194 u16 txblock;
@@ -3301,7 +3289,7 @@ static int wv_cu_start(struct net_device * dev)
3301 */ 3289 */
3302static int wv_82586_start(struct net_device * dev) 3290static int wv_82586_start(struct net_device * dev)
3303{ 3291{
3304 net_local *lp = (net_local *) dev->priv; 3292 net_local *lp = netdev_priv(dev);
3305 unsigned long ioaddr = dev->base_addr; 3293 unsigned long ioaddr = dev->base_addr;
3306 scp_t scp; /* system configuration pointer */ 3294 scp_t scp; /* system configuration pointer */
3307 iscp_t iscp; /* intermediate scp */ 3295 iscp_t iscp; /* intermediate scp */
@@ -3433,7 +3421,7 @@ static int wv_82586_start(struct net_device * dev)
3433 */ 3421 */
3434static void wv_82586_config(struct net_device * dev) 3422static void wv_82586_config(struct net_device * dev)
3435{ 3423{
3436 net_local *lp = (net_local *) dev->priv; 3424 net_local *lp = netdev_priv(dev);
3437 unsigned long ioaddr = dev->base_addr; 3425 unsigned long ioaddr = dev->base_addr;
3438 unsigned short txblock; 3426 unsigned short txblock;
3439 unsigned short txpred; 3427 unsigned short txpred;
@@ -3565,15 +3553,11 @@ static void wv_82586_config(struct net_device * dev)
3565 WAVELAN_ADDR_SIZE >> 1); 3553 WAVELAN_ADDR_SIZE >> 1);
3566 3554
3567#ifdef DEBUG_CONFIG_INFO 3555#ifdef DEBUG_CONFIG_INFO
3568 {
3569 DECLARE_MAC_BUF(mac);
3570 printk(KERN_DEBUG 3556 printk(KERN_DEBUG
3571 "%s: wv_82586_config(): set %d multicast addresses:\n", 3557 "%s: wv_82586_config(): set %d multicast addresses:\n",
3572 dev->name, lp->mc_count); 3558 dev->name, lp->mc_count);
3573 for (dmi = dev->mc_list; dmi; dmi = dmi->next) 3559 for (dmi = dev->mc_list; dmi; dmi = dmi->next)
3574 printk(KERN_DEBUG " %s\n", 3560 printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
3575 print_mac(mac, dmi->dmi_addr));
3576 }
3577#endif 3561#endif
3578 } 3562 }
3579 3563
@@ -3613,7 +3597,7 @@ static void wv_82586_config(struct net_device * dev)
3613 */ 3597 */
3614static void wv_82586_stop(struct net_device * dev) 3598static void wv_82586_stop(struct net_device * dev)
3615{ 3599{
3616 net_local *lp = (net_local *) dev->priv; 3600 net_local *lp = netdev_priv(dev);
3617 unsigned long ioaddr = dev->base_addr; 3601 unsigned long ioaddr = dev->base_addr;
3618 u16 scb_cmd; 3602 u16 scb_cmd;
3619 3603
@@ -3650,7 +3634,7 @@ static void wv_82586_stop(struct net_device * dev)
3650 */ 3634 */
3651static int wv_hw_reset(struct net_device * dev) 3635static int wv_hw_reset(struct net_device * dev)
3652{ 3636{
3653 net_local *lp = (net_local *) dev->priv; 3637 net_local *lp = netdev_priv(dev);
3654 unsigned long ioaddr = dev->base_addr; 3638 unsigned long ioaddr = dev->base_addr;
3655 3639
3656#ifdef DEBUG_CONFIG_TRACE 3640#ifdef DEBUG_CONFIG_TRACE
@@ -3751,7 +3735,7 @@ static irqreturn_t wavelan_interrupt(int irq, void *dev_id)
3751 printk(KERN_DEBUG "%s: ->wavelan_interrupt()\n", dev->name); 3735 printk(KERN_DEBUG "%s: ->wavelan_interrupt()\n", dev->name);
3752#endif 3736#endif
3753 3737
3754 lp = (net_local *) dev->priv; 3738 lp = netdev_priv(dev);
3755 ioaddr = dev->base_addr; 3739 ioaddr = dev->base_addr;
3756 3740
3757#ifdef DEBUG_INTERRUPT_INFO 3741#ifdef DEBUG_INTERRUPT_INFO
@@ -3894,7 +3878,7 @@ static irqreturn_t wavelan_interrupt(int irq, void *dev_id)
3894 */ 3878 */
3895static void wavelan_watchdog(struct net_device * dev) 3879static void wavelan_watchdog(struct net_device * dev)
3896{ 3880{
3897 net_local * lp = (net_local *)dev->priv; 3881 net_local *lp = netdev_priv(dev);
3898 u_long ioaddr = dev->base_addr; 3882 u_long ioaddr = dev->base_addr;
3899 unsigned long flags; 3883 unsigned long flags;
3900 unsigned int nreaped; 3884 unsigned int nreaped;
@@ -3974,7 +3958,7 @@ static void wavelan_watchdog(struct net_device * dev)
3974 */ 3958 */
3975static int wavelan_open(struct net_device * dev) 3959static int wavelan_open(struct net_device * dev)
3976{ 3960{
3977 net_local * lp = (net_local *)dev->priv; 3961 net_local *lp = netdev_priv(dev);
3978 unsigned long flags; 3962 unsigned long flags;
3979 3963
3980#ifdef DEBUG_CALLBACK_TRACE 3964#ifdef DEBUG_CALLBACK_TRACE
@@ -4029,7 +4013,7 @@ static int wavelan_open(struct net_device * dev)
4029 */ 4013 */
4030static int wavelan_close(struct net_device * dev) 4014static int wavelan_close(struct net_device * dev)
4031{ 4015{
4032 net_local *lp = (net_local *) dev->priv; 4016 net_local *lp = netdev_priv(dev);
4033 unsigned long flags; 4017 unsigned long flags;
4034 4018
4035#ifdef DEBUG_CALLBACK_TRACE 4019#ifdef DEBUG_CALLBACK_TRACE
@@ -4128,8 +4112,8 @@ static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr)
4128 dev->if_port = 0; 4112 dev->if_port = 0;
4129 4113
4130 /* Initialize device structures */ 4114 /* Initialize device structures */
4131 memset(dev->priv, 0, sizeof(net_local)); 4115 memset(netdev_priv(dev), 0, sizeof(net_local));
4132 lp = (net_local *) dev->priv; 4116 lp = netdev_priv(dev);
4133 4117
4134 /* Back link to the device structure. */ 4118 /* Back link to the device structure. */
4135 lp->dev = dev; 4119 lp->dev = dev;
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index e124b1d6267a..de717f8ffd61 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -1020,7 +1020,6 @@ wv_82593_reconfig(struct net_device * dev)
1020static void 1020static void
1021wv_psa_show(psa_t * p) 1021wv_psa_show(psa_t * p)
1022{ 1022{
1023 DECLARE_MAC_BUF(mac);
1024 printk(KERN_DEBUG "##### wavelan psa contents: #####\n"); 1023 printk(KERN_DEBUG "##### wavelan psa contents: #####\n");
1025 printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n", 1024 printk(KERN_DEBUG "psa_io_base_addr_1: 0x%02X %02X %02X %02X\n",
1026 p->psa_io_base_addr_1, 1025 p->psa_io_base_addr_1,
@@ -1034,13 +1033,10 @@ wv_psa_show(psa_t * p)
1034 printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params); 1033 printk(KERN_DEBUG "psa_holi_params: 0x%02x, ", p->psa_holi_params);
1035 printk("psa_int_req_no: %d\n", p->psa_int_req_no); 1034 printk("psa_int_req_no: %d\n", p->psa_int_req_no);
1036#ifdef DEBUG_SHOW_UNUSED 1035#ifdef DEBUG_SHOW_UNUSED
1037 printk(KERN_DEBUG "psa_unused0[]: %s\n", 1036 printk(KERN_DEBUG "psa_unused0[]: %pM\n", p->psa_unused0);
1038 print_mac(mac, p->psa_unused0));
1039#endif /* DEBUG_SHOW_UNUSED */ 1037#endif /* DEBUG_SHOW_UNUSED */
1040 printk(KERN_DEBUG "psa_univ_mac_addr[]: %s\n", 1038 printk(KERN_DEBUG "psa_univ_mac_addr[]: %pM\n", p->psa_univ_mac_addr);
1041 print_mac(mac, p->psa_univ_mac_addr)); 1039 printk(KERN_DEBUG "psa_local_mac_addr[]: %pM\n", p->psa_local_mac_addr);
1042 printk(KERN_DEBUG "psa_local_mac_addr[]: %s\n",
1043 print_mac(mac, p->psa_local_mac_addr));
1044 printk(KERN_DEBUG "psa_univ_local_sel: %d, ", p->psa_univ_local_sel); 1040 printk(KERN_DEBUG "psa_univ_local_sel: %d, ", p->psa_univ_local_sel);
1045 printk("psa_comp_number: %d, ", p->psa_comp_number); 1041 printk("psa_comp_number: %d, ", p->psa_comp_number);
1046 printk("psa_thr_pre_set: 0x%02x\n", p->psa_thr_pre_set); 1042 printk("psa_thr_pre_set: 0x%02x\n", p->psa_thr_pre_set);
@@ -1238,12 +1234,11 @@ wv_packet_info(u_char * p, /* Packet to dump */
1238{ 1234{
1239 int i; 1235 int i;
1240 int maxi; 1236 int maxi;
1241 DECLARE_MAC_BUF(mac);
1242 1237
1243 printk(KERN_DEBUG "%s: %s(): dest %s, length %d\n", 1238 printk(KERN_DEBUG "%s: %s(): dest %pM, length %d\n",
1244 msg1, msg2, print_mac(mac, p), length); 1239 msg1, msg2, p, length);
1245 printk(KERN_DEBUG "%s: %s(): src %s, type 0x%02X%02X\n", 1240 printk(KERN_DEBUG "%s: %s(): src %pM, type 0x%02X%02X\n",
1246 msg1, msg2, print_mac(mac, &p[6]), p[12], p[13]); 1241 msg1, msg2, &p[6], p[12], p[13]);
1247 1242
1248#ifdef DEBUG_PACKET_DUMP 1243#ifdef DEBUG_PACKET_DUMP
1249 1244
@@ -1274,7 +1269,6 @@ wv_init_info(struct net_device * dev)
1274{ 1269{
1275 unsigned int base = dev->base_addr; 1270 unsigned int base = dev->base_addr;
1276 psa_t psa; 1271 psa_t psa;
1277 DECLARE_MAC_BUF(mac);
1278 1272
1279 /* Read the parameter storage area */ 1273 /* Read the parameter storage area */
1280 psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa)); 1274 psa_read(dev, 0, (unsigned char *) &psa, sizeof(psa));
@@ -1291,10 +1285,8 @@ wv_init_info(struct net_device * dev)
1291 1285
1292#ifdef DEBUG_BASIC_SHOW 1286#ifdef DEBUG_BASIC_SHOW
1293 /* Now, let's go for the basic stuff */ 1287 /* Now, let's go for the basic stuff */
1294 printk(KERN_NOTICE "%s: WaveLAN: port %#x, irq %d, " 1288 printk(KERN_NOTICE "%s: WaveLAN: port %#x, irq %d, hw_addr %pM",
1295 "hw_addr %s", 1289 dev->name, base, dev->irq, dev->dev_addr);
1296 dev->name, base, dev->irq,
1297 print_mac(mac, dev->dev_addr));
1298 1290
1299 /* Print current network id */ 1291 /* Print current network id */
1300 if(psa.psa_nwid_select) 1292 if(psa.psa_nwid_select)
@@ -2243,13 +2235,7 @@ static int wavelan_set_wap(struct net_device *dev,
2243 char *extra) 2235 char *extra)
2244{ 2236{
2245#ifdef DEBUG_IOCTL_INFO 2237#ifdef DEBUG_IOCTL_INFO
2246 printk(KERN_DEBUG "Set AP to : %02X:%02X:%02X:%02X:%02X:%02X\n", 2238 printk(KERN_DEBUG "Set AP to : %pM\n", wrqu->ap_addr.sa_data);
2247 wrqu->ap_addr.sa_data[0],
2248 wrqu->ap_addr.sa_data[1],
2249 wrqu->ap_addr.sa_data[2],
2250 wrqu->ap_addr.sa_data[3],
2251 wrqu->ap_addr.sa_data[4],
2252 wrqu->ap_addr.sa_data[5]);
2253#endif /* DEBUG_IOCTL_INFO */ 2239#endif /* DEBUG_IOCTL_INFO */
2254 2240
2255 return -EOPNOTSUPP; 2241 return -EOPNOTSUPP;
@@ -2892,7 +2878,6 @@ wv_packet_read(struct net_device * dev,
2892 netif_rx(skb); 2878 netif_rx(skb);
2893 2879
2894 /* Keep stats up to date */ 2880 /* Keep stats up to date */
2895 dev->last_rx = jiffies;
2896 lp->stats.rx_packets++; 2881 lp->stats.rx_packets++;
2897 lp->stats.rx_bytes += sksize; 2882 lp->stats.rx_bytes += sksize;
2898 2883
@@ -3647,12 +3632,10 @@ wv_82593_config(struct net_device * dev)
3647 int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count; 3632 int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count;
3648 3633
3649#ifdef DEBUG_CONFIG_INFO 3634#ifdef DEBUG_CONFIG_INFO
3650 DECLARE_MAC_BUF(mac);
3651 printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n", 3635 printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n",
3652 dev->name, lp->mc_count); 3636 dev->name, lp->mc_count);
3653 for(dmi=dev->mc_list; dmi; dmi=dmi->next) 3637 for(dmi=dev->mc_list; dmi; dmi=dmi->next)
3654 printk(KERN_DEBUG " %s\n", 3638 printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
3655 print_mac(mac, dmi->dmi_addr));
3656#endif 3639#endif
3657 3640
3658 /* Initialize adapter's ethernet multicast addresses */ 3641 /* Initialize adapter's ethernet multicast addresses */
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 65ceb088f700..59bb3a55ab48 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -2,7 +2,7 @@
2#define __WL3501_H__ 2#define __WL3501_H__
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <net/ieee80211.h> 5#include <linux/ieee80211.h>
6 6
7/* define for WLA 2.0 */ 7/* define for WLA 2.0 */
8#define WL3501_BLKSZ 256 8#define WL3501_BLKSZ 256
@@ -548,7 +548,7 @@ struct wl3501_80211_tx_plcp_hdr {
548 548
549struct wl3501_80211_tx_hdr { 549struct wl3501_80211_tx_hdr {
550 struct wl3501_80211_tx_plcp_hdr pclp_hdr; 550 struct wl3501_80211_tx_plcp_hdr pclp_hdr;
551 struct ieee80211_hdr_4addr mac_hdr; 551 struct ieee80211_hdr mac_hdr;
552} __attribute__ ((packed)); 552} __attribute__ ((packed));
553 553
554/* 554/*
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 68789c6e1ce9..c99a1b6b948f 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -860,10 +860,9 @@ static int wl3501_esbq_confirm(struct wl3501_card *this)
860static void wl3501_online(struct net_device *dev) 860static void wl3501_online(struct net_device *dev)
861{ 861{
862 struct wl3501_card *this = netdev_priv(dev); 862 struct wl3501_card *this = netdev_priv(dev);
863 DECLARE_MAC_BUF(mac);
864 863
865 printk(KERN_INFO "%s: Wireless LAN online. BSSID: %s\n", 864 printk(KERN_INFO "%s: Wireless LAN online. BSSID: %pM\n",
866 dev->name, print_mac(mac, this->bssid)); 865 dev->name, this->bssid);
867 netif_wake_queue(dev); 866 netif_wake_queue(dev);
868} 867}
869 868
@@ -1014,7 +1013,6 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
1014 wl3501_receive(this, skb->data, pkt_len); 1013 wl3501_receive(this, skb->data, pkt_len);
1015 skb_put(skb, pkt_len); 1014 skb_put(skb, pkt_len);
1016 skb->protocol = eth_type_trans(skb, dev); 1015 skb->protocol = eth_type_trans(skb, dev);
1017 dev->last_rx = jiffies;
1018 this->stats.rx_packets++; 1016 this->stats.rx_packets++;
1019 this->stats.rx_bytes += skb->len; 1017 this->stats.rx_bytes += skb->len;
1020 netif_rx(skb); 1018 netif_rx(skb);
@@ -1965,7 +1963,6 @@ static int wl3501_config(struct pcmcia_device *link)
1965 struct net_device *dev = link->priv; 1963 struct net_device *dev = link->priv;
1966 int i = 0, j, last_fn, last_ret; 1964 int i = 0, j, last_fn, last_ret;
1967 struct wl3501_card *this; 1965 struct wl3501_card *this;
1968 DECLARE_MAC_BUF(mac);
1969 1966
1970 /* Try allocating IO ports. This tries a few fixed addresses. If you 1967 /* Try allocating IO ports. This tries a few fixed addresses. If you
1971 * want, you can also read the card's config table to pick addresses -- 1968 * want, you can also read the card's config table to pick addresses --
@@ -2024,9 +2021,9 @@ static int wl3501_config(struct pcmcia_device *link)
2024 2021
2025 /* print probe information */ 2022 /* print probe information */
2026 printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, " 2023 printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, "
2027 "MAC addr in flash ROM:%s\n", 2024 "MAC addr in flash ROM:%pM\n",
2028 dev->name, this->base_addr, (int)dev->irq, 2025 dev->name, this->base_addr, (int)dev->irq,
2029 print_mac(mac, dev->dev_addr)); 2026 dev->dev_addr);
2030 /* 2027 /*
2031 * Initialize card parameters - added by jss 2028 * Initialize card parameters - added by jss
2032 */ 2029 */
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index b16ec6e5f0e3..b45c27d42fd8 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -17,11 +17,11 @@
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/wireless.h> 19#include <linux/wireless.h>
20#include <linux/ieee80211.h>
20#include <net/iw_handler.h> 21#include <net/iw_handler.h>
21#include <linux/string.h> 22#include <linux/string.h>
22#include <linux/if_arp.h> 23#include <linux/if_arp.h>
23#include <linux/firmware.h> 24#include <linux/firmware.h>
24#include <net/ieee80211.h>
25#include "zd1201.h" 25#include "zd1201.h"
26 26
27static struct usb_device_id zd1201_table[] = { 27static struct usb_device_id zd1201_table[] = {
@@ -328,7 +328,6 @@ static void zd1201_usbrx(struct urb *urb)
328 memcpy(skb_put(skb, 2), &data[datalen-24], 2); 328 memcpy(skb_put(skb, 2), &data[datalen-24], 2);
329 memcpy(skb_put(skb, len), data, len); 329 memcpy(skb_put(skb, len), data, len);
330 skb->protocol = eth_type_trans(skb, zd->dev); 330 skb->protocol = eth_type_trans(skb, zd->dev);
331 skb->dev->last_rx = jiffies;
332 zd->stats.rx_packets++; 331 zd->stats.rx_packets++;
333 zd->stats.rx_bytes += skb->len; 332 zd->stats.rx_bytes += skb->len;
334 netif_rx(skb); 333 netif_rx(skb);
@@ -346,7 +345,7 @@ static void zd1201_usbrx(struct urb *urb)
346 frag = kmalloc(sizeof(*frag), GFP_ATOMIC); 345 frag = kmalloc(sizeof(*frag), GFP_ATOMIC);
347 if (!frag) 346 if (!frag)
348 goto resubmit; 347 goto resubmit;
349 skb = dev_alloc_skb(IEEE80211_DATA_LEN +14+2); 348 skb = dev_alloc_skb(IEEE80211_MAX_DATA_LEN +14+2);
350 if (!skb) { 349 if (!skb) {
351 kfree(frag); 350 kfree(frag);
352 goto resubmit; 351 goto resubmit;
@@ -385,7 +384,6 @@ static void zd1201_usbrx(struct urb *urb)
385 memcpy(skb_put(skb, len), data+8, len); 384 memcpy(skb_put(skb, len), data+8, len);
386 } 385 }
387 skb->protocol = eth_type_trans(skb, zd->dev); 386 skb->protocol = eth_type_trans(skb, zd->dev);
388 skb->dev->last_rx = jiffies;
389 zd->stats.rx_packets++; 387 zd->stats.rx_packets++;
390 zd->stats.rx_bytes += skb->len; 388 zd->stats.rx_bytes += skb->len;
391 netif_rx(skb); 389 netif_rx(skb);
@@ -745,7 +743,7 @@ static int zd1201_join(struct zd1201 *zd, char *essid, int essidlen)
745 743
746static int zd1201_net_open(struct net_device *dev) 744static int zd1201_net_open(struct net_device *dev)
747{ 745{
748 struct zd1201 *zd = (struct zd1201 *)dev->priv; 746 struct zd1201 *zd = netdev_priv(dev);
749 747
750 /* Start MAC with wildcard if no essid set */ 748 /* Start MAC with wildcard if no essid set */
751 if (!zd->mac_enabled) 749 if (!zd->mac_enabled)
@@ -783,7 +781,7 @@ static int zd1201_net_stop(struct net_device *dev)
783 */ 781 */
784static int zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 782static int zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
785{ 783{
786 struct zd1201 *zd = (struct zd1201 *)dev->priv; 784 struct zd1201 *zd = netdev_priv(dev);
787 unsigned char *txbuf = zd->txdata; 785 unsigned char *txbuf = zd->txdata;
788 int txbuflen, pad = 0, err; 786 int txbuflen, pad = 0, err;
789 struct urb *urb = zd->tx_urb; 787 struct urb *urb = zd->tx_urb;
@@ -833,7 +831,7 @@ static int zd1201_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
833 831
834static void zd1201_tx_timeout(struct net_device *dev) 832static void zd1201_tx_timeout(struct net_device *dev)
835{ 833{
836 struct zd1201 *zd = (struct zd1201 *)dev->priv; 834 struct zd1201 *zd = netdev_priv(dev);
837 835
838 if (!zd) 836 if (!zd)
839 return; 837 return;
@@ -848,7 +846,7 @@ static void zd1201_tx_timeout(struct net_device *dev)
848static int zd1201_set_mac_address(struct net_device *dev, void *p) 846static int zd1201_set_mac_address(struct net_device *dev, void *p)
849{ 847{
850 struct sockaddr *addr = p; 848 struct sockaddr *addr = p;
851 struct zd1201 *zd = (struct zd1201 *)dev->priv; 849 struct zd1201 *zd = netdev_priv(dev);
852 int err; 850 int err;
853 851
854 if (!zd) 852 if (!zd)
@@ -865,21 +863,21 @@ static int zd1201_set_mac_address(struct net_device *dev, void *p)
865 863
866static struct net_device_stats *zd1201_get_stats(struct net_device *dev) 864static struct net_device_stats *zd1201_get_stats(struct net_device *dev)
867{ 865{
868 struct zd1201 *zd = (struct zd1201 *)dev->priv; 866 struct zd1201 *zd = netdev_priv(dev);
869 867
870 return &zd->stats; 868 return &zd->stats;
871} 869}
872 870
873static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev) 871static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev)
874{ 872{
875 struct zd1201 *zd = (struct zd1201 *)dev->priv; 873 struct zd1201 *zd = netdev_priv(dev);
876 874
877 return &zd->iwstats; 875 return &zd->iwstats;
878} 876}
879 877
880static void zd1201_set_multicast(struct net_device *dev) 878static void zd1201_set_multicast(struct net_device *dev)
881{ 879{
882 struct zd1201 *zd = (struct zd1201 *)dev->priv; 880 struct zd1201 *zd = netdev_priv(dev);
883 struct dev_mc_list *mc = dev->mc_list; 881 struct dev_mc_list *mc = dev->mc_list;
884 unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI]; 882 unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
885 int i; 883 int i;
@@ -899,7 +897,7 @@ static void zd1201_set_multicast(struct net_device *dev)
899static int zd1201_config_commit(struct net_device *dev, 897static int zd1201_config_commit(struct net_device *dev,
900 struct iw_request_info *info, struct iw_point *data, char *essid) 898 struct iw_request_info *info, struct iw_point *data, char *essid)
901{ 899{
902 struct zd1201 *zd = (struct zd1201 *)dev->priv; 900 struct zd1201 *zd = netdev_priv(dev);
903 901
904 return zd1201_mac_reset(zd); 902 return zd1201_mac_reset(zd);
905} 903}
@@ -914,7 +912,7 @@ static int zd1201_get_name(struct net_device *dev,
914static int zd1201_set_freq(struct net_device *dev, 912static int zd1201_set_freq(struct net_device *dev,
915 struct iw_request_info *info, struct iw_freq *freq, char *extra) 913 struct iw_request_info *info, struct iw_freq *freq, char *extra)
916{ 914{
917 struct zd1201 *zd = (struct zd1201 *)dev->priv; 915 struct zd1201 *zd = netdev_priv(dev);
918 short channel = 0; 916 short channel = 0;
919 int err; 917 int err;
920 918
@@ -939,7 +937,7 @@ static int zd1201_set_freq(struct net_device *dev,
939static int zd1201_get_freq(struct net_device *dev, 937static int zd1201_get_freq(struct net_device *dev,
940 struct iw_request_info *info, struct iw_freq *freq, char *extra) 938 struct iw_request_info *info, struct iw_freq *freq, char *extra)
941{ 939{
942 struct zd1201 *zd = (struct zd1201 *)dev->priv; 940 struct zd1201 *zd = netdev_priv(dev);
943 short channel; 941 short channel;
944 int err; 942 int err;
945 943
@@ -955,7 +953,7 @@ static int zd1201_get_freq(struct net_device *dev,
955static int zd1201_set_mode(struct net_device *dev, 953static int zd1201_set_mode(struct net_device *dev,
956 struct iw_request_info *info, __u32 *mode, char *extra) 954 struct iw_request_info *info, __u32 *mode, char *extra)
957{ 955{
958 struct zd1201 *zd = (struct zd1201 *)dev->priv; 956 struct zd1201 *zd = netdev_priv(dev);
959 short porttype, monitor = 0; 957 short porttype, monitor = 0;
960 unsigned char buffer[IW_ESSID_MAX_SIZE+2]; 958 unsigned char buffer[IW_ESSID_MAX_SIZE+2];
961 int err; 959 int err;
@@ -1017,7 +1015,7 @@ static int zd1201_set_mode(struct net_device *dev,
1017static int zd1201_get_mode(struct net_device *dev, 1015static int zd1201_get_mode(struct net_device *dev,
1018 struct iw_request_info *info, __u32 *mode, char *extra) 1016 struct iw_request_info *info, __u32 *mode, char *extra)
1019{ 1017{
1020 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1018 struct zd1201 *zd = netdev_priv(dev);
1021 short porttype; 1019 short porttype;
1022 int err; 1020 int err;
1023 1021
@@ -1093,7 +1091,7 @@ static int zd1201_get_range(struct net_device *dev,
1093static int zd1201_get_wap(struct net_device *dev, 1091static int zd1201_get_wap(struct net_device *dev,
1094 struct iw_request_info *info, struct sockaddr *ap_addr, char *extra) 1092 struct iw_request_info *info, struct sockaddr *ap_addr, char *extra)
1095{ 1093{
1096 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1094 struct zd1201 *zd = netdev_priv(dev);
1097 unsigned char buffer[6]; 1095 unsigned char buffer[6];
1098 1096
1099 if (!zd1201_getconfig(zd, ZD1201_RID_COMMSQUALITY, buffer, 6)) { 1097 if (!zd1201_getconfig(zd, ZD1201_RID_COMMSQUALITY, buffer, 6)) {
@@ -1121,7 +1119,7 @@ static int zd1201_set_scan(struct net_device *dev,
1121static int zd1201_get_scan(struct net_device *dev, 1119static int zd1201_get_scan(struct net_device *dev,
1122 struct iw_request_info *info, struct iw_point *srq, char *extra) 1120 struct iw_request_info *info, struct iw_point *srq, char *extra)
1123{ 1121{
1124 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1122 struct zd1201 *zd = netdev_priv(dev);
1125 int err, i, j, enabled_save; 1123 int err, i, j, enabled_save;
1126 struct iw_event iwe; 1124 struct iw_event iwe;
1127 char *cev = extra; 1125 char *cev = extra;
@@ -1213,7 +1211,7 @@ static int zd1201_get_scan(struct net_device *dev,
1213static int zd1201_set_essid(struct net_device *dev, 1211static int zd1201_set_essid(struct net_device *dev,
1214 struct iw_request_info *info, struct iw_point *data, char *essid) 1212 struct iw_request_info *info, struct iw_point *data, char *essid)
1215{ 1213{
1216 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1214 struct zd1201 *zd = netdev_priv(dev);
1217 1215
1218 if (data->length > IW_ESSID_MAX_SIZE) 1216 if (data->length > IW_ESSID_MAX_SIZE)
1219 return -EINVAL; 1217 return -EINVAL;
@@ -1228,7 +1226,7 @@ static int zd1201_set_essid(struct net_device *dev,
1228static int zd1201_get_essid(struct net_device *dev, 1226static int zd1201_get_essid(struct net_device *dev,
1229 struct iw_request_info *info, struct iw_point *data, char *essid) 1227 struct iw_request_info *info, struct iw_point *data, char *essid)
1230{ 1228{
1231 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1229 struct zd1201 *zd = netdev_priv(dev);
1232 1230
1233 memcpy(essid, zd->essid, zd->essidlen); 1231 memcpy(essid, zd->essid, zd->essidlen);
1234 data->flags = 1; 1232 data->flags = 1;
@@ -1249,7 +1247,7 @@ static int zd1201_get_nick(struct net_device *dev, struct iw_request_info *info,
1249static int zd1201_set_rate(struct net_device *dev, 1247static int zd1201_set_rate(struct net_device *dev,
1250 struct iw_request_info *info, struct iw_param *rrq, char *extra) 1248 struct iw_request_info *info, struct iw_param *rrq, char *extra)
1251{ 1249{
1252 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1250 struct zd1201 *zd = netdev_priv(dev);
1253 short rate; 1251 short rate;
1254 int err; 1252 int err;
1255 1253
@@ -1282,7 +1280,7 @@ static int zd1201_set_rate(struct net_device *dev,
1282static int zd1201_get_rate(struct net_device *dev, 1280static int zd1201_get_rate(struct net_device *dev,
1283 struct iw_request_info *info, struct iw_param *rrq, char *extra) 1281 struct iw_request_info *info, struct iw_param *rrq, char *extra)
1284{ 1282{
1285 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1283 struct zd1201 *zd = netdev_priv(dev);
1286 short rate; 1284 short rate;
1287 int err; 1285 int err;
1288 1286
@@ -1315,7 +1313,7 @@ static int zd1201_get_rate(struct net_device *dev,
1315static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info, 1313static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info,
1316 struct iw_param *rts, char *extra) 1314 struct iw_param *rts, char *extra)
1317{ 1315{
1318 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1316 struct zd1201 *zd = netdev_priv(dev);
1319 int err; 1317 int err;
1320 short val = rts->value; 1318 short val = rts->value;
1321 1319
@@ -1335,7 +1333,7 @@ static int zd1201_set_rts(struct net_device *dev, struct iw_request_info *info,
1335static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info, 1333static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info,
1336 struct iw_param *rts, char *extra) 1334 struct iw_param *rts, char *extra)
1337{ 1335{
1338 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1336 struct zd1201 *zd = netdev_priv(dev);
1339 short rtst; 1337 short rtst;
1340 int err; 1338 int err;
1341 1339
@@ -1352,7 +1350,7 @@ static int zd1201_get_rts(struct net_device *dev, struct iw_request_info *info,
1352static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info, 1350static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info,
1353 struct iw_param *frag, char *extra) 1351 struct iw_param *frag, char *extra)
1354{ 1352{
1355 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1353 struct zd1201 *zd = netdev_priv(dev);
1356 int err; 1354 int err;
1357 short val = frag->value; 1355 short val = frag->value;
1358 1356
@@ -1373,7 +1371,7 @@ static int zd1201_set_frag(struct net_device *dev, struct iw_request_info *info,
1373static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info, 1371static int zd1201_get_frag(struct net_device *dev, struct iw_request_info *info,
1374 struct iw_param *frag, char *extra) 1372 struct iw_param *frag, char *extra)
1375{ 1373{
1376 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1374 struct zd1201 *zd = netdev_priv(dev);
1377 short fragt; 1375 short fragt;
1378 int err; 1376 int err;
1379 1377
@@ -1402,7 +1400,7 @@ static int zd1201_get_retry(struct net_device *dev,
1402static int zd1201_set_encode(struct net_device *dev, 1400static int zd1201_set_encode(struct net_device *dev,
1403 struct iw_request_info *info, struct iw_point *erq, char *key) 1401 struct iw_request_info *info, struct iw_point *erq, char *key)
1404{ 1402{
1405 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1403 struct zd1201 *zd = netdev_priv(dev);
1406 short i; 1404 short i;
1407 int err, rid; 1405 int err, rid;
1408 1406
@@ -1459,7 +1457,7 @@ static int zd1201_set_encode(struct net_device *dev,
1459static int zd1201_get_encode(struct net_device *dev, 1457static int zd1201_get_encode(struct net_device *dev,
1460 struct iw_request_info *info, struct iw_point *erq, char *key) 1458 struct iw_request_info *info, struct iw_point *erq, char *key)
1461{ 1459{
1462 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1460 struct zd1201 *zd = netdev_priv(dev);
1463 short i; 1461 short i;
1464 int err; 1462 int err;
1465 1463
@@ -1492,7 +1490,7 @@ static int zd1201_get_encode(struct net_device *dev,
1492static int zd1201_set_power(struct net_device *dev, 1490static int zd1201_set_power(struct net_device *dev,
1493 struct iw_request_info *info, struct iw_param *vwrq, char *extra) 1491 struct iw_request_info *info, struct iw_param *vwrq, char *extra)
1494{ 1492{
1495 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1493 struct zd1201 *zd = netdev_priv(dev);
1496 short enabled, duration, level; 1494 short enabled, duration, level;
1497 int err; 1495 int err;
1498 1496
@@ -1531,7 +1529,7 @@ out:
1531static int zd1201_get_power(struct net_device *dev, 1529static int zd1201_get_power(struct net_device *dev,
1532 struct iw_request_info *info, struct iw_param *vwrq, char *extra) 1530 struct iw_request_info *info, struct iw_param *vwrq, char *extra)
1533{ 1531{
1534 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1532 struct zd1201 *zd = netdev_priv(dev);
1535 short enabled, level, duration; 1533 short enabled, level, duration;
1536 int err; 1534 int err;
1537 1535
@@ -1618,7 +1616,7 @@ static const iw_handler zd1201_iw_handler[] =
1618static int zd1201_set_hostauth(struct net_device *dev, 1616static int zd1201_set_hostauth(struct net_device *dev,
1619 struct iw_request_info *info, struct iw_param *rrq, char *extra) 1617 struct iw_request_info *info, struct iw_param *rrq, char *extra)
1620{ 1618{
1621 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1619 struct zd1201 *zd = netdev_priv(dev);
1622 1620
1623 if (!zd->ap) 1621 if (!zd->ap)
1624 return -EOPNOTSUPP; 1622 return -EOPNOTSUPP;
@@ -1629,7 +1627,7 @@ static int zd1201_set_hostauth(struct net_device *dev,
1629static int zd1201_get_hostauth(struct net_device *dev, 1627static int zd1201_get_hostauth(struct net_device *dev,
1630 struct iw_request_info *info, struct iw_param *rrq, char *extra) 1628 struct iw_request_info *info, struct iw_param *rrq, char *extra)
1631{ 1629{
1632 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1630 struct zd1201 *zd = netdev_priv(dev);
1633 short hostauth; 1631 short hostauth;
1634 int err; 1632 int err;
1635 1633
@@ -1648,7 +1646,7 @@ static int zd1201_get_hostauth(struct net_device *dev,
1648static int zd1201_auth_sta(struct net_device *dev, 1646static int zd1201_auth_sta(struct net_device *dev,
1649 struct iw_request_info *info, struct sockaddr *sta, char *extra) 1647 struct iw_request_info *info, struct sockaddr *sta, char *extra)
1650{ 1648{
1651 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1649 struct zd1201 *zd = netdev_priv(dev);
1652 unsigned char buffer[10]; 1650 unsigned char buffer[10];
1653 1651
1654 if (!zd->ap) 1652 if (!zd->ap)
@@ -1664,7 +1662,7 @@ static int zd1201_auth_sta(struct net_device *dev,
1664static int zd1201_set_maxassoc(struct net_device *dev, 1662static int zd1201_set_maxassoc(struct net_device *dev,
1665 struct iw_request_info *info, struct iw_param *rrq, char *extra) 1663 struct iw_request_info *info, struct iw_param *rrq, char *extra)
1666{ 1664{
1667 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1665 struct zd1201 *zd = netdev_priv(dev);
1668 int err; 1666 int err;
1669 1667
1670 if (!zd->ap) 1668 if (!zd->ap)
@@ -1679,7 +1677,7 @@ static int zd1201_set_maxassoc(struct net_device *dev,
1679static int zd1201_get_maxassoc(struct net_device *dev, 1677static int zd1201_get_maxassoc(struct net_device *dev,
1680 struct iw_request_info *info, struct iw_param *rrq, char *extra) 1678 struct iw_request_info *info, struct iw_param *rrq, char *extra)
1681{ 1679{
1682 struct zd1201 *zd = (struct zd1201 *)dev->priv; 1680 struct zd1201 *zd = netdev_priv(dev);
1683 short maxassoc; 1681 short maxassoc;
1684 int err; 1682 int err;
1685 1683
@@ -1731,6 +1729,7 @@ static int zd1201_probe(struct usb_interface *interface,
1731 const struct usb_device_id *id) 1729 const struct usb_device_id *id)
1732{ 1730{
1733 struct zd1201 *zd; 1731 struct zd1201 *zd;
1732 struct net_device *dev;
1734 struct usb_device *usb; 1733 struct usb_device *usb;
1735 int err; 1734 int err;
1736 short porttype; 1735 short porttype;
@@ -1738,9 +1737,12 @@ static int zd1201_probe(struct usb_interface *interface,
1738 1737
1739 usb = interface_to_usbdev(interface); 1738 usb = interface_to_usbdev(interface);
1740 1739
1741 zd = kzalloc(sizeof(struct zd1201), GFP_KERNEL); 1740 dev = alloc_etherdev(sizeof(*zd));
1742 if (!zd) 1741 if (!dev)
1743 return -ENOMEM; 1742 return -ENOMEM;
1743 zd = netdev_priv(dev);
1744 zd->dev = dev;
1745
1744 zd->ap = ap; 1746 zd->ap = ap;
1745 zd->usb = usb; 1747 zd->usb = usb;
1746 zd->removed = 0; 1748 zd->removed = 0;
@@ -1775,34 +1777,29 @@ static int zd1201_probe(struct usb_interface *interface,
1775 if (err) 1777 if (err)
1776 goto err_start; 1778 goto err_start;
1777 1779
1778 zd->dev = alloc_etherdev(0); 1780 dev->open = zd1201_net_open;
1779 if (!zd->dev) 1781 dev->stop = zd1201_net_stop;
1780 goto err_start; 1782 dev->get_stats = zd1201_get_stats;
1781 1783 dev->wireless_handlers =
1782 zd->dev->priv = zd;
1783 zd->dev->open = zd1201_net_open;
1784 zd->dev->stop = zd1201_net_stop;
1785 zd->dev->get_stats = zd1201_get_stats;
1786 zd->dev->wireless_handlers =
1787 (struct iw_handler_def *)&zd1201_iw_handlers; 1784 (struct iw_handler_def *)&zd1201_iw_handlers;
1788 zd->dev->hard_start_xmit = zd1201_hard_start_xmit; 1785 dev->hard_start_xmit = zd1201_hard_start_xmit;
1789 zd->dev->watchdog_timeo = ZD1201_TX_TIMEOUT; 1786 dev->watchdog_timeo = ZD1201_TX_TIMEOUT;
1790 zd->dev->tx_timeout = zd1201_tx_timeout; 1787 dev->tx_timeout = zd1201_tx_timeout;
1791 zd->dev->set_multicast_list = zd1201_set_multicast; 1788 dev->set_multicast_list = zd1201_set_multicast;
1792 zd->dev->set_mac_address = zd1201_set_mac_address; 1789 dev->set_mac_address = zd1201_set_mac_address;
1793 strcpy(zd->dev->name, "wlan%d"); 1790 strcpy(dev->name, "wlan%d");
1794 1791
1795 err = zd1201_getconfig(zd, ZD1201_RID_CNFOWNMACADDR, 1792 err = zd1201_getconfig(zd, ZD1201_RID_CNFOWNMACADDR,
1796 zd->dev->dev_addr, zd->dev->addr_len); 1793 dev->dev_addr, dev->addr_len);
1797 if (err) 1794 if (err)
1798 goto err_net; 1795 goto err_start;
1799 1796
1800 /* Set wildcard essid to match zd->essid */ 1797 /* Set wildcard essid to match zd->essid */
1801 *(__le16 *)buf = cpu_to_le16(0); 1798 *(__le16 *)buf = cpu_to_le16(0);
1802 err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf, 1799 err = zd1201_setconfig(zd, ZD1201_RID_CNFDESIREDSSID, buf,
1803 IW_ESSID_MAX_SIZE+2, 1); 1800 IW_ESSID_MAX_SIZE+2, 1);
1804 if (err) 1801 if (err)
1805 goto err_net; 1802 goto err_start;
1806 1803
1807 if (zd->ap) 1804 if (zd->ap)
1808 porttype = ZD1201_PORTTYPE_AP; 1805 porttype = ZD1201_PORTTYPE_AP;
@@ -1810,30 +1807,28 @@ static int zd1201_probe(struct usb_interface *interface,
1810 porttype = ZD1201_PORTTYPE_BSS; 1807 porttype = ZD1201_PORTTYPE_BSS;
1811 err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype); 1808 err = zd1201_setconfig16(zd, ZD1201_RID_CNFPORTTYPE, porttype);
1812 if (err) 1809 if (err)
1813 goto err_net; 1810 goto err_start;
1814 1811
1815 SET_NETDEV_DEV(zd->dev, &usb->dev); 1812 SET_NETDEV_DEV(dev, &usb->dev);
1816 1813
1817 err = register_netdev(zd->dev); 1814 err = register_netdev(dev);
1818 if (err) 1815 if (err)
1819 goto err_net; 1816 goto err_start;
1820 dev_info(&usb->dev, "%s: ZD1201 USB Wireless interface\n", 1817 dev_info(&usb->dev, "%s: ZD1201 USB Wireless interface\n",
1821 zd->dev->name); 1818 dev->name);
1822 1819
1823 usb_set_intfdata(interface, zd); 1820 usb_set_intfdata(interface, zd);
1824 zd1201_enable(zd); /* zd1201 likes to startup enabled, */ 1821 zd1201_enable(zd); /* zd1201 likes to startup enabled, */
1825 zd1201_disable(zd); /* interfering with all the wifis in range */ 1822 zd1201_disable(zd); /* interfering with all the wifis in range */
1826 return 0; 1823 return 0;
1827 1824
1828err_net:
1829 free_netdev(zd->dev);
1830err_start: 1825err_start:
1831 /* Leave the device in reset state */ 1826 /* Leave the device in reset state */
1832 zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0); 1827 zd1201_docmd(zd, ZD1201_CMDCODE_INIT, 0, 0, 0);
1833err_zd: 1828err_zd:
1834 usb_free_urb(zd->tx_urb); 1829 usb_free_urb(zd->tx_urb);
1835 usb_free_urb(zd->rx_urb); 1830 usb_free_urb(zd->rx_urb);
1836 kfree(zd); 1831 free_netdev(dev);
1837 return err; 1832 return err;
1838} 1833}
1839 1834
@@ -1846,10 +1841,6 @@ static void zd1201_disconnect(struct usb_interface *interface)
1846 if (!zd) 1841 if (!zd)
1847 return; 1842 return;
1848 usb_set_intfdata(interface, NULL); 1843 usb_set_intfdata(interface, NULL);
1849 if (zd->dev) {
1850 unregister_netdev(zd->dev);
1851 free_netdev(zd->dev);
1852 }
1853 1844
1854 hlist_for_each_entry_safe(frag, node, node2, &zd->fraglist, fnode) { 1845 hlist_for_each_entry_safe(frag, node, node2, &zd->fraglist, fnode) {
1855 hlist_del_init(&frag->fnode); 1846 hlist_del_init(&frag->fnode);
@@ -1865,7 +1856,11 @@ static void zd1201_disconnect(struct usb_interface *interface)
1865 usb_kill_urb(zd->rx_urb); 1856 usb_kill_urb(zd->rx_urb);
1866 usb_free_urb(zd->rx_urb); 1857 usb_free_urb(zd->rx_urb);
1867 } 1858 }
1868 kfree(zd); 1859
1860 if (zd->dev) {
1861 unregister_netdev(zd->dev);
1862 free_netdev(zd->dev);
1863 }
1869} 1864}
1870 1865
1871#ifdef CONFIG_PM 1866#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index e0ac58b8ff1f..f1519143f8a6 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -378,7 +378,6 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
378 [0] = { .addr = CR_MAC_ADDR_P1 }, 378 [0] = { .addr = CR_MAC_ADDR_P1 },
379 [1] = { .addr = CR_MAC_ADDR_P2 }, 379 [1] = { .addr = CR_MAC_ADDR_P2 },
380 }; 380 };
381 DECLARE_MAC_BUF(mac);
382 381
383 if (mac_addr) { 382 if (mac_addr) {
384 reqs[0].value = (mac_addr[3] << 24) 383 reqs[0].value = (mac_addr[3] << 24)
@@ -387,8 +386,7 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr)
387 | mac_addr[0]; 386 | mac_addr[0];
388 reqs[1].value = (mac_addr[5] << 8) 387 reqs[1].value = (mac_addr[5] << 8)
389 | mac_addr[4]; 388 | mac_addr[4];
390 dev_dbg_f(zd_chip_dev(chip), 389 dev_dbg_f(zd_chip_dev(chip), "mac addr %pM\n", mac_addr);
391 "mac addr %s\n", print_mac(mac, mac_addr));
392 } else { 390 } else {
393 dev_dbg_f(zd_chip_dev(chip), "set NULL mac\n"); 391 dev_dbg_f(zd_chip_dev(chip), "set NULL mac\n");
394 } 392 }
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index cac732f4047f..9caa96a13586 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -171,7 +171,7 @@ int zd_mac_init_hw(struct ieee80211_hw *hw)
171 171
172 r = zd_reg2alpha2(mac->regdomain, alpha2); 172 r = zd_reg2alpha2(mac->regdomain, alpha2);
173 if (!r) 173 if (!r)
174 regulatory_hint(hw->wiphy, alpha2, NULL); 174 regulatory_hint(hw->wiphy, alpha2);
175 175
176 r = 0; 176 r = 0;
177disable_int: 177disable_int:
@@ -296,15 +296,14 @@ static void zd_op_stop(struct ieee80211_hw *hw)
296 * If no status information has been requested, the skb is freed. 296 * If no status information has been requested, the skb is freed.
297 */ 297 */
298static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, 298static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
299 u32 flags, int ackssi, bool success) 299 int ackssi, bool success)
300{ 300{
301 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 301 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
302 302
303 memset(&info->status, 0, sizeof(info->status)); 303 ieee80211_tx_info_clear_status(info);
304 304
305 if (!success) 305 if (success)
306 info->status.excessive_retries = 1; 306 info->flags |= IEEE80211_TX_STAT_ACK;
307 info->flags |= flags;
308 info->status.ack_signal = ackssi; 307 info->status.ack_signal = ackssi;
309 ieee80211_tx_status_irqsafe(hw, skb); 308 ieee80211_tx_status_irqsafe(hw, skb);
310} 309}
@@ -326,7 +325,7 @@ void zd_mac_tx_failed(struct ieee80211_hw *hw)
326 if (skb == NULL) 325 if (skb == NULL)
327 return; 326 return;
328 327
329 tx_status(hw, skb, 0, 0, 0); 328 tx_status(hw, skb, 0, 0);
330} 329}
331 330
332/** 331/**
@@ -342,12 +341,12 @@ void zd_mac_tx_failed(struct ieee80211_hw *hw)
342void zd_mac_tx_to_dev(struct sk_buff *skb, int error) 341void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
343{ 342{
344 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 343 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
345 struct ieee80211_hw *hw = info->driver_data[0]; 344 struct ieee80211_hw *hw = info->rate_driver_data[0];
346 345
347 skb_pull(skb, sizeof(struct zd_ctrlset)); 346 skb_pull(skb, sizeof(struct zd_ctrlset));
348 if (unlikely(error || 347 if (unlikely(error ||
349 (info->flags & IEEE80211_TX_CTL_NO_ACK))) { 348 (info->flags & IEEE80211_TX_CTL_NO_ACK))) {
350 tx_status(hw, skb, 0, 0, !error); 349 tx_status(hw, skb, 0, !error);
351 } else { 350 } else {
352 struct sk_buff_head *q = 351 struct sk_buff_head *q =
353 &zd_hw_mac(hw)->ack_wait_queue; 352 &zd_hw_mac(hw)->ack_wait_queue;
@@ -406,7 +405,8 @@ static int zd_calc_tx_length_us(u8 *service, u8 zd_rate, u16 tx_length)
406} 405}
407 406
408static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, 407static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
409 struct ieee80211_hdr *header, u32 flags) 408 struct ieee80211_hdr *header,
409 struct ieee80211_tx_info *info)
410{ 410{
411 /* 411 /*
412 * CONTROL TODO: 412 * CONTROL TODO:
@@ -417,7 +417,7 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
417 cs->control = 0; 417 cs->control = 0;
418 418
419 /* First fragment */ 419 /* First fragment */
420 if (flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 420 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
421 cs->control |= ZD_CS_NEED_RANDOM_BACKOFF; 421 cs->control |= ZD_CS_NEED_RANDOM_BACKOFF;
422 422
423 /* Multicast */ 423 /* Multicast */
@@ -428,10 +428,10 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
428 if (ieee80211_is_pspoll(header->frame_control)) 428 if (ieee80211_is_pspoll(header->frame_control))
429 cs->control |= ZD_CS_PS_POLL_FRAME; 429 cs->control |= ZD_CS_PS_POLL_FRAME;
430 430
431 if (flags & IEEE80211_TX_CTL_USE_RTS_CTS) 431 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
432 cs->control |= ZD_CS_RTS; 432 cs->control |= ZD_CS_RTS;
433 433
434 if (flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) 434 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
435 cs->control |= ZD_CS_SELF_CTS; 435 cs->control |= ZD_CS_SELF_CTS;
436 436
437 /* FIXME: Management frame? */ 437 /* FIXME: Management frame? */
@@ -517,12 +517,12 @@ static int fill_ctrlset(struct zd_mac *mac,
517 txrate = ieee80211_get_tx_rate(mac->hw, info); 517 txrate = ieee80211_get_tx_rate(mac->hw, info);
518 518
519 cs->modulation = txrate->hw_value; 519 cs->modulation = txrate->hw_value;
520 if (info->flags & IEEE80211_TX_CTL_SHORT_PREAMBLE) 520 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
521 cs->modulation = txrate->hw_value_short; 521 cs->modulation = txrate->hw_value_short;
522 522
523 cs->tx_length = cpu_to_le16(frag_len); 523 cs->tx_length = cpu_to_le16(frag_len);
524 524
525 cs_set_control(mac, cs, hdr, info->flags); 525 cs_set_control(mac, cs, hdr, info);
526 526
527 packet_length = frag_len + sizeof(struct zd_ctrlset) + 10; 527 packet_length = frag_len + sizeof(struct zd_ctrlset) + 10;
528 ZD_ASSERT(packet_length <= 0xffff); 528 ZD_ASSERT(packet_length <= 0xffff);
@@ -577,7 +577,7 @@ static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
577 if (r) 577 if (r)
578 return r; 578 return r;
579 579
580 info->driver_data[0] = hw; 580 info->rate_driver_data[0] = hw;
581 581
582 r = zd_usb_tx(&mac->chip.usb, skb); 582 r = zd_usb_tx(&mac->chip.usb, skb);
583 if (r) 583 if (r)
@@ -618,7 +618,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
618 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN))) 618 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
619 { 619 {
620 __skb_unlink(skb, q); 620 __skb_unlink(skb, q);
621 tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1); 621 tx_status(hw, skb, stats->signal, 1);
622 goto out; 622 goto out;
623 } 623 }
624 } 624 }
@@ -743,9 +743,11 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
743 zd_write_mac_addr(&mac->chip, NULL); 743 zd_write_mac_addr(&mac->chip, NULL);
744} 744}
745 745
746static int zd_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 746static int zd_op_config(struct ieee80211_hw *hw, u32 changed)
747{ 747{
748 struct zd_mac *mac = zd_hw_mac(hw); 748 struct zd_mac *mac = zd_hw_mac(hw);
749 struct ieee80211_conf *conf = &hw->conf;
750
749 return zd_chip_set_channel(&mac->chip, conf->channel->hw_value); 751 return zd_chip_set_channel(&mac->chip, conf->channel->hw_value);
750} 752}
751 753
@@ -852,14 +854,12 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
852 if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)) { 854 if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI)) {
853 zd_mc_add_all(&hash); 855 zd_mc_add_all(&hash);
854 } else { 856 } else {
855 DECLARE_MAC_BUF(macbuf);
856
857 zd_mc_clear(&hash); 857 zd_mc_clear(&hash);
858 for (i = 0; i < mc_count; i++) { 858 for (i = 0; i < mc_count; i++) {
859 if (!mclist) 859 if (!mclist)
860 break; 860 break;
861 dev_dbg_f(zd_mac_dev(mac), "mc addr %s\n", 861 dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n",
862 print_mac(macbuf, mclist->dmi_addr)); 862 mclist->dmi_addr);
863 zd_mc_add_addr(&hash, mclist->dmi_addr); 863 zd_mc_add_addr(&hash, mclist->dmi_addr);
864 mclist = mclist->next; 864 mclist = mclist->next;
865 } 865 }
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index a3ccd8c1c716..04c139666965 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -909,7 +909,7 @@ free_urb:
909 * it might be freed by zd_mac_tx_to_dev or mac80211) 909 * it might be freed by zd_mac_tx_to_dev or mac80211)
910 */ 910 */
911 info = IEEE80211_SKB_CB(skb); 911 info = IEEE80211_SKB_CB(skb);
912 usb = &zd_hw_mac(info->driver_data[0])->chip.usb; 912 usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb;
913 zd_mac_tx_to_dev(skb, urb->status); 913 zd_mac_tx_to_dev(skb, urb->status);
914 free_tx_urb(usb, urb); 914 free_tx_urb(usb, urb);
915 tx_dec_submitted_urbs(usb); 915 tx_dec_submitted_urbs(usb);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 6d017adc914a..761635be9104 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -196,7 +196,7 @@ static void rx_refill_timeout(unsigned long data)
196{ 196{
197 struct net_device *dev = (struct net_device *)data; 197 struct net_device *dev = (struct net_device *)data;
198 struct netfront_info *np = netdev_priv(dev); 198 struct netfront_info *np = netdev_priv(dev);
199 netif_rx_schedule(dev, &np->napi); 199 netif_rx_schedule(&np->napi);
200} 200}
201 201
202static int netfront_tx_slot_available(struct netfront_info *np) 202static int netfront_tx_slot_available(struct netfront_info *np)
@@ -328,7 +328,7 @@ static int xennet_open(struct net_device *dev)
328 xennet_alloc_rx_buffers(dev); 328 xennet_alloc_rx_buffers(dev);
329 np->rx.sring->rsp_event = np->rx.rsp_cons + 1; 329 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
330 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 330 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
331 netif_rx_schedule(dev, &np->napi); 331 netif_rx_schedule(&np->napi);
332 } 332 }
333 spin_unlock_bh(&np->rx_lock); 333 spin_unlock_bh(&np->rx_lock);
334 334
@@ -841,7 +841,6 @@ static int handle_incoming_queue(struct net_device *dev,
841 841
842 /* Pass it up. */ 842 /* Pass it up. */
843 netif_receive_skb(skb); 843 netif_receive_skb(skb);
844 dev->last_rx = jiffies;
845 } 844 }
846 845
847 return packets_dropped; 846 return packets_dropped;
@@ -980,7 +979,7 @@ err:
980 979
981 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); 980 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
982 if (!more_to_do) 981 if (!more_to_do)
983 __netif_rx_complete(dev, napi); 982 __netif_rx_complete(napi);
984 983
985 local_irq_restore(flags); 984 local_irq_restore(flags);
986 } 985 }
@@ -1311,7 +1310,7 @@ static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1311 xennet_tx_buf_gc(dev); 1310 xennet_tx_buf_gc(dev);
1312 /* Under tx_lock: protects access to rx shared-ring indexes. */ 1311 /* Under tx_lock: protects access to rx shared-ring indexes. */
1313 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 1312 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
1314 netif_rx_schedule(dev, &np->napi); 1313 netif_rx_schedule(&np->napi);
1315 } 1314 }
1316 1315
1317 spin_unlock_irqrestore(&np->tx_lock, flags); 1316 spin_unlock_irqrestore(&np->tx_lock, flags);
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
index da42aa06a3ba..03a3f34e9039 100644
--- a/drivers/net/xtsonic.c
+++ b/drivers/net/xtsonic.c
@@ -239,8 +239,6 @@ int __init xtsonic_probe(struct platform_device *pdev)
239 struct resource *resmem, *resirq; 239 struct resource *resmem, *resirq;
240 int err = 0; 240 int err = 0;
241 241
242 DECLARE_MAC_BUF(mac);
243
244 if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL) 242 if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL)
245 return -ENODEV; 243 return -ENODEV;
246 244
@@ -263,8 +261,8 @@ int __init xtsonic_probe(struct platform_device *pdev)
263 if ((err = register_netdev(dev))) 261 if ((err = register_netdev(dev)))
264 goto out1; 262 goto out1;
265 263
266 printk("%s: SONIC ethernet @%08lx, MAC %s, IRQ %d\n", dev->name, 264 printk("%s: SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->name,
267 dev->base_addr, print_mac(mac, dev->dev_addr), dev->irq); 265 dev->base_addr, dev->dev_addr, dev->irq);
268 266
269 return 0; 267 return 0;
270 268
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 57e1f495b9fc..cf9712922778 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -355,6 +355,16 @@ static int yellowfin_close(struct net_device *dev);
355static void set_rx_mode(struct net_device *dev); 355static void set_rx_mode(struct net_device *dev);
356static const struct ethtool_ops ethtool_ops; 356static const struct ethtool_ops ethtool_ops;
357 357
358static const struct net_device_ops netdev_ops = {
359 .ndo_open = yellowfin_open,
360 .ndo_stop = yellowfin_close,
361 .ndo_start_xmit = yellowfin_start_xmit,
362 .ndo_set_multicast_list = set_rx_mode,
363 .ndo_change_mtu = eth_change_mtu,
364 .ndo_validate_addr = eth_validate_addr,
365 .ndo_do_ioctl = netdev_ioctl,
366 .ndo_tx_timeout = yellowfin_tx_timeout,
367};
358 368
359static int __devinit yellowfin_init_one(struct pci_dev *pdev, 369static int __devinit yellowfin_init_one(struct pci_dev *pdev,
360 const struct pci_device_id *ent) 370 const struct pci_device_id *ent)
@@ -374,7 +384,6 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
374#else 384#else
375 int bar = 1; 385 int bar = 1;
376#endif 386#endif
377 DECLARE_MAC_BUF(mac);
378 387
379/* when built into the kernel, we only print version if device is found */ 388/* when built into the kernel, we only print version if device is found */
380#ifndef MODULE 389#ifndef MODULE
@@ -465,13 +474,8 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
465 np->duplex_lock = 1; 474 np->duplex_lock = 1;
466 475
467 /* The Yellowfin-specific entries in the device structure. */ 476 /* The Yellowfin-specific entries in the device structure. */
468 dev->open = &yellowfin_open; 477 dev->netdev_ops = &netdev_ops;
469 dev->hard_start_xmit = &yellowfin_start_xmit;
470 dev->stop = &yellowfin_close;
471 dev->set_multicast_list = &set_rx_mode;
472 dev->do_ioctl = &netdev_ioctl;
473 SET_ETHTOOL_OPS(dev, &ethtool_ops); 478 SET_ETHTOOL_OPS(dev, &ethtool_ops);
474 dev->tx_timeout = yellowfin_tx_timeout;
475 dev->watchdog_timeo = TX_TIMEOUT; 479 dev->watchdog_timeo = TX_TIMEOUT;
476 480
477 if (mtu) 481 if (mtu)
@@ -481,10 +485,10 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
481 if (i) 485 if (i)
482 goto err_out_unmap_status; 486 goto err_out_unmap_status;
483 487
484 printk(KERN_INFO "%s: %s type %8x at %p, %s, IRQ %d.\n", 488 printk(KERN_INFO "%s: %s type %8x at %p, %pM, IRQ %d.\n",
485 dev->name, pci_id_tbl[chip_idx].name, 489 dev->name, pci_id_tbl[chip_idx].name,
486 ioread32(ioaddr + ChipRev), ioaddr, 490 ioread32(ioaddr + ChipRev), ioaddr,
487 print_mac(mac, dev->dev_addr), irq); 491 dev->dev_addr, irq);
488 492
489 if (np->drv_flags & HasMII) { 493 if (np->drv_flags & HasMII) {
490 int phy, phy_idx = 0; 494 int phy, phy_idx = 0;
@@ -1100,11 +1104,9 @@ static int yellowfin_rx(struct net_device *dev)
1100 memcmp(le32_to_cpu(yp->rx_ring_dma + 1104 memcmp(le32_to_cpu(yp->rx_ring_dma +
1101 entry*sizeof(struct yellowfin_desc)), 1105 entry*sizeof(struct yellowfin_desc)),
1102 "\377\377\377\377\377\377", 6) != 0) { 1106 "\377\377\377\377\377\377", 6) != 0) {
1103 if (bogus_rx++ == 0) { 1107 if (bogus_rx++ == 0)
1104 DECLARE_MAC_BUF(mac); 1108 printk(KERN_WARNING "%s: Bad frame to %pM\n",
1105 printk(KERN_WARNING "%s: Bad frame to %s\n", 1109 dev->name, buf_addr);
1106 dev->name, print_mac(mac, buf_addr));
1107 }
1108#endif 1110#endif
1109 } else { 1111 } else {
1110 struct sk_buff *skb; 1112 struct sk_buff *skb;
@@ -1141,7 +1143,6 @@ static int yellowfin_rx(struct net_device *dev)
1141 } 1143 }
1142 skb->protocol = eth_type_trans(skb, dev); 1144 skb->protocol = eth_type_trans(skb, dev);
1143 netif_rx(skb); 1145 netif_rx(skb);
1144 dev->last_rx = jiffies;
1145 dev->stats.rx_packets++; 1146 dev->stats.rx_packets++;
1146 dev->stats.rx_bytes += pkt_len; 1147 dev->stats.rx_bytes += pkt_len;
1147 } 1148 }
@@ -1423,14 +1424,3 @@ static void __exit yellowfin_cleanup (void)
1423 1424
1424module_init(yellowfin_init); 1425module_init(yellowfin_init);
1425module_exit(yellowfin_cleanup); 1426module_exit(yellowfin_cleanup);
1426
1427/*
1428 * Local variables:
1429 * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c yellowfin.c"
1430 * compile-command-alphaLX: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c yellowfin.c -fomit-frame-pointer -fno-strength-reduce -mno-fp-regs -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED"
1431 * simple-compile-command: "gcc -DMODULE -O6 -c yellowfin.c"
1432 * c-indent-level: 4
1433 * c-basic-offset: 4
1434 * tab-width: 4
1435 * End:
1436 */
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index a86c022d6a94..f0b15c9347d0 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -167,7 +167,7 @@ static void znet_tx_timeout (struct net_device *dev);
167/* Request needed resources */ 167/* Request needed resources */
168static int znet_request_resources (struct net_device *dev) 168static int znet_request_resources (struct net_device *dev)
169{ 169{
170 struct znet_private *znet = dev->priv; 170 struct znet_private *znet = netdev_priv(dev);
171 unsigned long flags; 171 unsigned long flags;
172 172
173 if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev)) 173 if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev))
@@ -201,7 +201,7 @@ static int znet_request_resources (struct net_device *dev)
201 201
202static void znet_release_resources (struct net_device *dev) 202static void znet_release_resources (struct net_device *dev)
203{ 203{
204 struct znet_private *znet = dev->priv; 204 struct znet_private *znet = netdev_priv(dev);
205 unsigned long flags; 205 unsigned long flags;
206 206
207 release_region (znet->sia_base, znet->sia_size); 207 release_region (znet->sia_base, znet->sia_size);
@@ -216,7 +216,7 @@ static void znet_release_resources (struct net_device *dev)
216/* Keep the magical SIA stuff in a single function... */ 216/* Keep the magical SIA stuff in a single function... */
217static void znet_transceiver_power (struct net_device *dev, int on) 217static void znet_transceiver_power (struct net_device *dev, int on)
218{ 218{
219 struct znet_private *znet = dev->priv; 219 struct znet_private *znet = netdev_priv(dev);
220 unsigned char v; 220 unsigned char v;
221 221
222 /* Turn on/off the 82501 SIA, using zenith-specific magic. */ 222 /* Turn on/off the 82501 SIA, using zenith-specific magic. */
@@ -235,7 +235,7 @@ static void znet_transceiver_power (struct net_device *dev, int on)
235 Also used from hardware_init. */ 235 Also used from hardware_init. */
236static void znet_set_multicast_list (struct net_device *dev) 236static void znet_set_multicast_list (struct net_device *dev)
237{ 237{
238 struct znet_private *znet = dev->priv; 238 struct znet_private *znet = netdev_priv(dev);
239 short ioaddr = dev->base_addr; 239 short ioaddr = dev->base_addr;
240 struct i82593_conf_block *cfblk = &znet->i593_init; 240 struct i82593_conf_block *cfblk = &znet->i593_init;
241 241
@@ -370,7 +370,6 @@ static int __init znet_probe (void)
370 struct net_device *dev; 370 struct net_device *dev;
371 char *p; 371 char *p;
372 int err = -ENOMEM; 372 int err = -ENOMEM;
373 DECLARE_MAC_BUF(mac);
374 373
375 /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */ 374 /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */
376 for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++) 375 for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++)
@@ -387,7 +386,7 @@ static int __init znet_probe (void)
387 if (!dev) 386 if (!dev)
388 return -ENOMEM; 387 return -ENOMEM;
389 388
390 znet = dev->priv; 389 znet = netdev_priv(dev);
391 390
392 netinfo = (struct netidblk *)p; 391 netinfo = (struct netidblk *)p;
393 dev->base_addr = netinfo->iobase1; 392 dev->base_addr = netinfo->iobase1;
@@ -397,9 +396,9 @@ static int __init znet_probe (void)
397 for (i = 0; i < 6; i++) 396 for (i = 0; i < 6; i++)
398 dev->dev_addr[i] = netinfo->netid[i]; 397 dev->dev_addr[i] = netinfo->netid[i];
399 398
400 printk(KERN_INFO "%s: ZNET at %#3lx, %s" 399 printk(KERN_INFO "%s: ZNET at %#3lx, %pM"
401 ", using IRQ %d DMA %d and %d.\n", 400 ", using IRQ %d DMA %d and %d.\n",
402 dev->name, dev->base_addr, print_mac(mac, dev->dev_addr), 401 dev->name, dev->base_addr, dev->dev_addr,
403 dev->irq, netinfo->dma1, netinfo->dma2); 402 dev->irq, netinfo->dma1, netinfo->dma2);
404 403
405 if (znet_debug > 1) { 404 if (znet_debug > 1) {
@@ -531,7 +530,7 @@ static void znet_tx_timeout (struct net_device *dev)
531static int znet_send_packet(struct sk_buff *skb, struct net_device *dev) 530static int znet_send_packet(struct sk_buff *skb, struct net_device *dev)
532{ 531{
533 int ioaddr = dev->base_addr; 532 int ioaddr = dev->base_addr;
534 struct znet_private *znet = dev->priv; 533 struct znet_private *znet = netdev_priv(dev);
535 unsigned long flags; 534 unsigned long flags;
536 short length = skb->len; 535 short length = skb->len;
537 536
@@ -601,7 +600,7 @@ static int znet_send_packet(struct sk_buff *skb, struct net_device *dev)
601static irqreturn_t znet_interrupt(int irq, void *dev_id) 600static irqreturn_t znet_interrupt(int irq, void *dev_id)
602{ 601{
603 struct net_device *dev = dev_id; 602 struct net_device *dev = dev_id;
604 struct znet_private *znet = dev->priv; 603 struct znet_private *znet = netdev_priv(dev);
605 int ioaddr; 604 int ioaddr;
606 int boguscnt = 20; 605 int boguscnt = 20;
607 int handled = 0; 606 int handled = 0;
@@ -679,7 +678,7 @@ static irqreturn_t znet_interrupt(int irq, void *dev_id)
679 678
680static void znet_rx(struct net_device *dev) 679static void znet_rx(struct net_device *dev)
681{ 680{
682 struct znet_private *znet = dev->priv; 681 struct znet_private *znet = netdev_priv(dev);
683 int ioaddr = dev->base_addr; 682 int ioaddr = dev->base_addr;
684 int boguscount = 1; 683 int boguscount = 1;
685 short next_frame_end_offset = 0; /* Offset of next frame start. */ 684 short next_frame_end_offset = 0; /* Offset of next frame start. */
@@ -786,7 +785,6 @@ static void znet_rx(struct net_device *dev)
786 } 785 }
787 skb->protocol=eth_type_trans(skb,dev); 786 skb->protocol=eth_type_trans(skb,dev);
788 netif_rx(skb); 787 netif_rx(skb);
789 dev->last_rx = jiffies;
790 dev->stats.rx_packets++; 788 dev->stats.rx_packets++;
791 dev->stats.rx_bytes += pkt_len; 789 dev->stats.rx_bytes += pkt_len;
792 } 790 }
@@ -829,7 +827,7 @@ static void show_dma(struct net_device *dev)
829{ 827{
830 short ioaddr = dev->base_addr; 828 short ioaddr = dev->base_addr;
831 unsigned char stat = inb (ioaddr); 829 unsigned char stat = inb (ioaddr);
832 struct znet_private *znet = dev->priv; 830 struct znet_private *znet = netdev_priv(dev);
833 unsigned long flags; 831 unsigned long flags;
834 short dma_port = ((znet->tx_dma&3)<<2) + IO_DMA2_BASE; 832 short dma_port = ((znet->tx_dma&3)<<2) + IO_DMA2_BASE;
835 unsigned addr = inb(dma_port); 833 unsigned addr = inb(dma_port);
@@ -852,7 +850,7 @@ static void hardware_init(struct net_device *dev)
852{ 850{
853 unsigned long flags; 851 unsigned long flags;
854 short ioaddr = dev->base_addr; 852 short ioaddr = dev->base_addr;
855 struct znet_private *znet = dev->priv; 853 struct znet_private *znet = netdev_priv(dev);
856 854
857 znet->rx_cur = znet->rx_start; 855 znet->rx_cur = znet->rx_start;
858 znet->tx_cur = znet->tx_start; 856 znet->tx_cur = znet->tx_start;
@@ -914,7 +912,7 @@ static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset)
914static __exit void znet_cleanup (void) 912static __exit void znet_cleanup (void)
915{ 913{
916 if (znet_dev) { 914 if (znet_dev) {
917 struct znet_private *znet = znet_dev->priv; 915 struct znet_private *znet = netdev_priv(znet_dev);
918 916
919 unregister_netdev (znet_dev); 917 unregister_netdev (znet_dev);
920 kfree (znet->rx_start); 918 kfree (znet->rx_start);
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index 3926b2aa9cca..affd904deafc 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -122,7 +122,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
122 break; 122 break;
123 board = z->resource.start; 123 board = z->resource.start;
124 ioaddr = board+cards[i].offset; 124 ioaddr = board+cards[i].offset;
125 dev = ____alloc_ei_netdev(0); 125 dev = alloc_ei_netdev();
126 if (!dev) 126 if (!dev)
127 return -ENOMEM; 127 return -ENOMEM;
128 if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { 128 if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) {
@@ -139,6 +139,20 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
139 return 0; 139 return 0;
140} 140}
141 141
142static const struct net_device_ops zorro8390_netdev_ops = {
143 .ndo_open = zorro8390_open,
144 .ndo_stop = zorro8390_close,
145 .ndo_start_xmit = ei_start_xmit,
146 .ndo_tx_timeout = ei_tx_timeout,
147 .ndo_get_stats = ei_get_stats,
148 .ndo_set_multicast_list = ei_set_multicast_list,
149 .ndo_validate_addr = eth_validate_addr,
150 .ndo_change_mtu = eth_change_mtu,
151#ifdef CONFIG_NET_POLL_CONTROLLER
152 .ndo_poll_controller = ei_poll,
153#endif
154};
155
142static int __devinit zorro8390_init(struct net_device *dev, 156static int __devinit zorro8390_init(struct net_device *dev,
143 unsigned long board, const char *name, 157 unsigned long board, const char *name,
144 unsigned long ioaddr) 158 unsigned long ioaddr)
@@ -151,7 +165,6 @@ static int __devinit zorro8390_init(struct net_device *dev,
151 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 165 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
152 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, 166 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
153 }; 167 };
154 DECLARE_MAC_BUF(mac);
155 168
156 /* Reset card. Who knows what dain-bramaged state it was left in. */ 169 /* Reset card. Who knows what dain-bramaged state it was left in. */
157 { 170 {
@@ -216,7 +229,7 @@ static int __devinit zorro8390_init(struct net_device *dev,
216 dev->dev_addr[i] = SA_prom[i]; 229 dev->dev_addr[i] = SA_prom[i];
217 230
218#ifdef DEBUG 231#ifdef DEBUG
219 printk("%s", print_mac(mac, dev->dev_addr)); 232 printk("%pM", dev->dev_addr);
220#endif 233#endif
221 234
222 ei_status.name = name; 235 ei_status.name = name;
@@ -231,12 +244,8 @@ static int __devinit zorro8390_init(struct net_device *dev,
231 ei_status.block_output = &zorro8390_block_output; 244 ei_status.block_output = &zorro8390_block_output;
232 ei_status.get_8390_hdr = &zorro8390_get_8390_hdr; 245 ei_status.get_8390_hdr = &zorro8390_get_8390_hdr;
233 ei_status.reg_offset = zorro8390_offsets; 246 ei_status.reg_offset = zorro8390_offsets;
234 dev->open = &zorro8390_open;
235 dev->stop = &zorro8390_close;
236#ifdef CONFIG_NET_POLL_CONTROLLER
237 dev->poll_controller = __ei_poll;
238#endif
239 247
248 dev->netdev_ops = &zorro8390_netdev_ops;
240 __NS8390_init(dev, 0); 249 __NS8390_init(dev, 0);
241 err = register_netdev(dev); 250 err = register_netdev(dev);
242 if (err) { 251 if (err) {
@@ -244,8 +253,8 @@ static int __devinit zorro8390_init(struct net_device *dev,
244 return err; 253 return err;
245 } 254 }
246 255
247 printk(KERN_INFO "%s: %s at 0x%08lx, Ethernet Address %s\n", 256 printk(KERN_INFO "%s: %s at 0x%08lx, Ethernet Address %pM\n",
248 dev->name, name, board, print_mac(mac, dev->dev_addr)); 257 dev->name, name, board, dev->dev_addr);
249 258
250 return 0; 259 return 0;
251} 260}